blob: 7c347bcc942115f7f3101467d55cc5bbe0c3cf68 [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010019#include <linux/sizes.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010020
Russell King15d07dc2012-03-28 18:30:01 +010021#include <asm/cp15.h>
Russell King0ba8b9b22008-08-10 18:08:10 +010022#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000023#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050024#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010025#include <asm/setup.h>
Russell Kinge616c592009-09-27 20:55:43 +010026#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010027#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040028#include <asm/highmem.h>
David Howells9f97da72012-03-28 18:30:01 +010029#include <asm/system_info.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010030#include <asm/traps.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010031
32#include <asm/mach/arch.h>
33#include <asm/mach/map.h>
Rob Herringc2794432012-02-29 18:10:58 -060034#include <asm/mach/pci.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010035
36#include "mm.h"
37
Russell Kingd111e8f2006-09-27 15:27:33 +010038/*
39 * empty_zero_page is a special page that is used for
40 * zero-initialized data and COW.
41 */
42struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040043EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010044
45/*
46 * The pmd table for the upper-most set of pages.
47 */
48pmd_t *top_pmd;
49
Russell Kingae8f1542006-09-27 15:38:34 +010050#define CPOLICY_UNCACHED 0
51#define CPOLICY_BUFFERED 1
52#define CPOLICY_WRITETHROUGH 2
53#define CPOLICY_WRITEBACK 3
54#define CPOLICY_WRITEALLOC 4
55
56static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
57static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010058pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010059pgprot_t pgprot_kernel;
60
Imre_Deak44b18692007-02-11 13:45:13 +010061EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010062EXPORT_SYMBOL(pgprot_kernel);
63
64struct cachepolicy {
65 const char policy[16];
66 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010067 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000068 pteval_t pte;
Russell Kingae8f1542006-09-27 15:38:34 +010069};
70
71static struct cachepolicy cache_policies[] __initdata = {
72 {
73 .policy = "uncached",
74 .cr_mask = CR_W|CR_C,
75 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010076 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010077 }, {
78 .policy = "buffered",
79 .cr_mask = CR_C,
80 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010081 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010082 }, {
83 .policy = "writethrough",
84 .cr_mask = 0,
85 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010086 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010087 }, {
88 .policy = "writeback",
89 .cr_mask = 0,
90 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010091 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010092 }, {
93 .policy = "writealloc",
94 .cr_mask = 0,
95 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +010096 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +010097 }
98};
99
Uwe Kleine-Königb849a602012-01-16 10:34:31 +0100100#ifdef CONFIG_CPU_CP15
Russell Kingae8f1542006-09-27 15:38:34 +0100101/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +0100102 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +0100103 * problems by allowing the cache or the cache and
104 * writebuffer to be turned off. (Note: the write
105 * buffer should not be on and the cache off).
106 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100107static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100108{
109 int i;
110
111 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
112 int len = strlen(cache_policies[i].policy);
113
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100114 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100115 cachepolicy = i;
116 cr_alignment &= ~cache_policies[i].cr_mask;
117 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100118 break;
119 }
120 }
121 if (i == ARRAY_SIZE(cache_policies))
122 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000123 /*
124 * This restriction is partly to do with the way we boot; it is
125 * unpredictable to have memory mapped using two different sets of
126 * memory attributes (shared, type, and cache attribs). We can not
127 * change these attributes once the initial assembly has setup the
128 * page tables.
129 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100130 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
131 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
132 cachepolicy = CPOLICY_WRITEBACK;
133 }
Russell Kingae8f1542006-09-27 15:38:34 +0100134 flush_cache_all();
135 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100136 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100137}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100138early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100139
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100140static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100141{
142 char *p = "buffered";
143 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100144 early_cachepolicy(p);
145 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100146}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100147early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100148
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100149static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100150{
151 char *p = "uncached";
152 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100153 early_cachepolicy(p);
154 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100155}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100156early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100157
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000158#ifndef CONFIG_ARM_LPAE
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100159static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100160{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100161 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100162 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100163 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100164 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100165 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100166}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100167early_param("ecc", early_ecc);
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000168#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100169
170static int __init noalign_setup(char *__unused)
171{
172 cr_alignment &= ~CR_A;
173 cr_no_alignment &= ~CR_A;
174 set_cr(cr_alignment);
175 return 1;
176}
177__setup("noalign", noalign_setup);
178
Russell King255d1f82006-12-18 00:12:47 +0000179#ifndef CONFIG_SMP
180void adjust_cr(unsigned long mask, unsigned long set)
181{
182 unsigned long flags;
183
184 mask &= ~CR_A;
185
186 set &= mask;
187
188 local_irq_save(flags);
189
190 cr_no_alignment = (cr_no_alignment & ~mask) | set;
191 cr_alignment = (cr_alignment & ~mask) | set;
192
193 set_cr((get_cr() & ~mask) | set);
194
195 local_irq_restore(flags);
196}
197#endif
198
Uwe Kleine-Königb849a602012-01-16 10:34:31 +0100199#else /* ifdef CONFIG_CPU_CP15 */
200
201static int __init early_cachepolicy(char *p)
202{
203 pr_warning("cachepolicy kernel parameter not supported without cp15\n");
204}
205early_param("cachepolicy", early_cachepolicy);
206
207static int __init noalign_setup(char *__unused)
208{
209 pr_warning("noalign kernel parameter not supported without cp15\n");
210}
211__setup("noalign", noalign_setup);
212
213#endif /* ifdef CONFIG_CPU_CP15 / else */
214
Russell King36bb94b2010-11-16 08:40:36 +0000215#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000216#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100217
Russell Kingb29e9f52007-04-21 10:47:29 +0100218static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100219 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100220 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
221 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100222 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000223 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100224 .domain = DOMAIN_IO,
225 },
226 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100227 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100228 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000229 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100230 .domain = DOMAIN_IO,
231 },
232 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100233 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100234 .prot_l1 = PMD_TYPE_TABLE,
235 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
236 .domain = DOMAIN_IO,
Rob Herringc2794432012-02-29 18:10:58 -0600237 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100238 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100239 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100240 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000241 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100242 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100243 },
Russell Kingebb4c652008-11-09 11:18:36 +0000244 [MT_UNCACHED] = {
245 .prot_pte = PROT_PTE_DEVICE,
246 .prot_l1 = PMD_TYPE_TABLE,
247 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
248 .domain = DOMAIN_IO,
249 },
Russell Kingae8f1542006-09-27 15:38:34 +0100250 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100251 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100252 .domain = DOMAIN_KERNEL,
253 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000254#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100255 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100256 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100257 .domain = DOMAIN_KERNEL,
258 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000259#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100260 [MT_LOW_VECTORS] = {
261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000262 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100263 .prot_l1 = PMD_TYPE_TABLE,
264 .domain = DOMAIN_USER,
265 },
266 [MT_HIGH_VECTORS] = {
267 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000268 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100269 .prot_l1 = PMD_TYPE_TABLE,
270 .domain = DOMAIN_USER,
271 },
272 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000273 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100274 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100275 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100276 .domain = DOMAIN_KERNEL,
277 },
278 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100279 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100280 .domain = DOMAIN_KERNEL,
281 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100282 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100283 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000284 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100285 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100286 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
287 .domain = DOMAIN_KERNEL,
288 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100289 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100290 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000291 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100292 .prot_l1 = PMD_TYPE_TABLE,
293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
294 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100295 },
296 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100298 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100299 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100300 },
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700301 [MT_MEMORY_SO] = {
302 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Santosh Shilimkar93d5bf02013-01-17 07:18:04 +0100303 L_PTE_MT_UNCACHED | L_PTE_XN,
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700304 .prot_l1 = PMD_TYPE_TABLE,
305 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
306 PMD_SECT_UNCACHED | PMD_SECT_XN,
307 .domain = DOMAIN_KERNEL,
308 },
Marek Szyprowskic7909502011-12-29 13:09:51 +0100309 [MT_MEMORY_DMA_READY] = {
310 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
311 .prot_l1 = PMD_TYPE_TABLE,
312 .domain = DOMAIN_KERNEL,
313 },
Russell Kingae8f1542006-09-27 15:38:34 +0100314};
315
Russell Kingb29e9f52007-04-21 10:47:29 +0100316const struct mem_type *get_mem_type(unsigned int type)
317{
318 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
319}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200320EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100321
Russell Kingae8f1542006-09-27 15:38:34 +0100322/*
323 * Adjust the PMD section entries according to the CPU in use.
324 */
325static void __init build_mem_type_table(void)
326{
327 struct cachepolicy *cp;
328 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100329 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100330 int cpu_arch = cpu_architecture();
331 int i;
332
Catalin Marinas11179d82007-07-20 11:42:24 +0100333 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100334#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100335 if (cachepolicy > CPOLICY_BUFFERED)
336 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100337#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100338 if (cachepolicy > CPOLICY_WRITETHROUGH)
339 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100340#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100341 }
Russell Kingae8f1542006-09-27 15:38:34 +0100342 if (cpu_arch < CPU_ARCH_ARMv5) {
343 if (cachepolicy >= CPOLICY_WRITEALLOC)
344 cachepolicy = CPOLICY_WRITEBACK;
345 ecc_mask = 0;
346 }
Russell Kingf00ec482010-09-04 10:47:48 +0100347 if (is_smp())
348 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100349
350 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000351 * Strip out features not present on earlier architectures.
352 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
353 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100354 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000355 if (cpu_arch < CPU_ARCH_ARMv5)
356 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
357 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
358 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
359 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
360 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100361
362 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000363 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
364 * "update-able on write" bit on ARM610). However, Xscale and
365 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100366 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000367 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100368 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100369 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100370 mem_types[i].prot_l1 &= ~PMD_BIT4;
371 }
372 } else if (cpu_arch < CPU_ARCH_ARMv6) {
373 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100374 if (mem_types[i].prot_l1)
375 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100376 if (mem_types[i].prot_sect)
377 mem_types[i].prot_sect |= PMD_BIT4;
378 }
379 }
Russell Kingae8f1542006-09-27 15:38:34 +0100380
Russell Kingb1cce6b2008-11-04 10:52:28 +0000381 /*
382 * Mark the device areas according to the CPU/architecture.
383 */
384 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
385 if (!cpu_is_xsc3()) {
386 /*
387 * Mark device regions on ARMv6+ as execute-never
388 * to prevent speculative instruction fetches.
389 */
390 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
391 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
392 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
393 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
394 }
395 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
396 /*
397 * For ARMv7 with TEX remapping,
398 * - shared device is SXCB=1100
399 * - nonshared device is SXCB=0100
400 * - write combine device mem is SXCB=0001
401 * (Uncached Normal memory)
402 */
403 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
404 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
405 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
406 } else if (cpu_is_xsc3()) {
407 /*
408 * For Xscale3,
409 * - shared device is TEXCB=00101
410 * - nonshared device is TEXCB=01000
411 * - write combine device mem is TEXCB=00100
412 * (Inner/Outer Uncacheable in xsc3 parlance)
413 */
414 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
415 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
416 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
417 } else {
418 /*
419 * For ARMv6 and ARMv7 without TEX remapping,
420 * - shared device is TEXCB=00001
421 * - nonshared device is TEXCB=01000
422 * - write combine device mem is TEXCB=00100
423 * (Uncached Normal in ARMv6 parlance).
424 */
425 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
426 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
427 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
428 }
429 } else {
430 /*
431 * On others, write combining is "Uncached/Buffered"
432 */
433 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
434 }
435
436 /*
437 * Now deal with the memory-type mappings
438 */
Russell Kingae8f1542006-09-27 15:38:34 +0100439 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100440 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
441
Russell Kingbb30f362008-09-06 20:04:59 +0100442 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100443 * ARMv6 and above have extended page tables.
444 */
445 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000446#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100447 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100448 * Mark cache clean areas and XIP ROM read only
449 * from SVC mode and no access from userspace.
450 */
451 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
452 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
453 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000454#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100455
Russell Kingf00ec482010-09-04 10:47:48 +0100456 if (is_smp()) {
457 /*
458 * Mark memory with the "shared" attribute
459 * for SMP systems
460 */
461 user_pgprot |= L_PTE_SHARED;
462 kern_pgprot |= L_PTE_SHARED;
463 vecs_pgprot |= L_PTE_SHARED;
464 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
465 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
466 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
467 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
468 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
469 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100470 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
Russell Kingf00ec482010-09-04 10:47:48 +0100471 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
472 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
473 }
Russell Kingae8f1542006-09-27 15:38:34 +0100474 }
475
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100476 /*
477 * Non-cacheable Normal - intended for memory areas that must
478 * not cause dirty cache line writebacks when used
479 */
480 if (cpu_arch >= CPU_ARCH_ARMv6) {
481 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
482 /* Non-cacheable Normal is XCB = 001 */
483 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
484 PMD_SECT_BUFFERED;
485 } else {
486 /* For both ARMv6 and non-TEX-remapping ARMv7 */
487 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
488 PMD_SECT_TEX(1);
489 }
490 } else {
491 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
492 }
493
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000494#ifdef CONFIG_ARM_LPAE
495 /*
496 * Do not generate access flag faults for the kernel mappings.
497 */
498 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
499 mem_types[i].prot_pte |= PTE_EXT_AF;
Vitaly Andrianov1a3abcf2012-05-15 15:01:16 +0100500 if (mem_types[i].prot_sect)
501 mem_types[i].prot_sect |= PMD_SECT_AF;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000502 }
503 kern_pgprot |= PTE_EXT_AF;
504 vecs_pgprot |= PTE_EXT_AF;
505#endif
506
Russell Kingae8f1542006-09-27 15:38:34 +0100507 for (i = 0; i < 16; i++) {
Will Deacon864aa042012-09-18 19:18:35 +0100508 pteval_t v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100509 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100510 }
511
Russell Kingbb30f362008-09-06 20:04:59 +0100512 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
513 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100514
Imre_Deak44b18692007-02-11 13:45:13 +0100515 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100516 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000517 L_PTE_DIRTY | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100518
519 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
520 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
521 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100522 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100523 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100524 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100525 mem_types[MT_ROM].prot_sect |= cp->pmd;
526
527 switch (cp->pmd) {
528 case PMD_SECT_WT:
529 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
530 break;
531 case PMD_SECT_WB:
532 case PMD_SECT_WBWA:
533 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
534 break;
535 }
536 printk("Memory policy: ECC %sabled, Data cache %s\n",
537 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100538
539 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
540 struct mem_type *t = &mem_types[i];
541 if (t->prot_l1)
542 t->prot_l1 |= PMD_DOMAIN(t->domain);
543 if (t->prot_sect)
544 t->prot_sect |= PMD_DOMAIN(t->domain);
545 }
Russell Kingae8f1542006-09-27 15:38:34 +0100546}
547
Catalin Marinasd9073872010-09-13 16:01:24 +0100548#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
549pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
550 unsigned long size, pgprot_t vma_prot)
551{
552 if (!pfn_valid(pfn))
553 return pgprot_noncached(vma_prot);
554 else if (file->f_flags & O_SYNC)
555 return pgprot_writecombine(vma_prot);
556 return vma_prot;
557}
558EXPORT_SYMBOL(phys_mem_access_prot);
559#endif
560
Russell Kingae8f1542006-09-27 15:38:34 +0100561#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
562
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400563static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000564{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400565 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100566 memset(ptr, 0, sz);
567 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000568}
569
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400570static void __init *early_alloc(unsigned long sz)
571{
572 return early_alloc_aligned(sz, sz);
573}
574
Russell King4bb2e272010-07-01 18:33:29 +0100575static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
576{
577 if (pmd_none(*pmd)) {
Catalin Marinas410f1482011-02-14 12:58:04 +0100578 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
Russell King97092e02010-11-16 00:16:01 +0000579 __pmd_populate(pmd, __pa(pte), prot);
Russell King4bb2e272010-07-01 18:33:29 +0100580 }
581 BUG_ON(pmd_bad(*pmd));
582 return pte_offset_kernel(pmd, addr);
583}
584
Russell King24e6c692007-04-21 10:21:28 +0100585static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
586 unsigned long end, unsigned long pfn,
587 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100588{
Russell King4bb2e272010-07-01 18:33:29 +0100589 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
Russell King24e6c692007-04-21 10:21:28 +0100590 do {
Russell King40d192b2008-09-06 21:15:56 +0100591 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100592 pfn++;
593 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100594}
595
Russell King516295e2010-11-21 16:27:49 +0000596static void __init alloc_init_section(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000597 unsigned long end, phys_addr_t phys,
Russell King24e6c692007-04-21 10:21:28 +0100598 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100599{
Russell King516295e2010-11-21 16:27:49 +0000600 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100601
Russell King24e6c692007-04-21 10:21:28 +0100602 /*
603 * Try a section mapping - end, addr and phys must all be aligned
604 * to a section boundary. Note that PMDs refer to the individual
605 * L1 entries, whereas PGDs refer to a group of L1 entries making
606 * up one logical pointer to an L2 table.
607 */
Marek Szyprowskic7909502011-12-29 13:09:51 +0100608 if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
Russell King24e6c692007-04-21 10:21:28 +0100609 pmd_t *p = pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100610
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000611#ifndef CONFIG_ARM_LPAE
Russell King24e6c692007-04-21 10:21:28 +0100612 if (addr & SECTION_SIZE)
613 pmd++;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000614#endif
Russell King24e6c692007-04-21 10:21:28 +0100615
616 do {
617 *pmd = __pmd(phys | type->prot_sect);
618 phys += SECTION_SIZE;
619 } while (pmd++, addr += SECTION_SIZE, addr != end);
620
621 flush_pmd_entry(p);
622 } else {
623 /*
624 * No need to loop; pte's aren't interested in the
625 * individual L1 entries.
626 */
627 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
Russell Kingae8f1542006-09-27 15:38:34 +0100628 }
Russell Kingae8f1542006-09-27 15:38:34 +0100629}
630
Stephen Boyd14904922012-04-27 01:40:10 +0100631static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
632 unsigned long end, unsigned long phys, const struct mem_type *type)
Russell King516295e2010-11-21 16:27:49 +0000633{
634 pud_t *pud = pud_offset(pgd, addr);
635 unsigned long next;
636
637 do {
638 next = pud_addr_end(addr, end);
639 alloc_init_section(pud, addr, next, phys, type);
640 phys += next - addr;
641 } while (pud++, addr = next, addr != end);
642}
643
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000644#ifndef CONFIG_ARM_LPAE
Russell King4a56c1e2007-04-21 10:16:48 +0100645static void __init create_36bit_mapping(struct map_desc *md,
646 const struct mem_type *type)
647{
Russell King97092e02010-11-16 00:16:01 +0000648 unsigned long addr, length, end;
649 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100650 pgd_t *pgd;
651
652 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100653 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100654 length = PAGE_ALIGN(md->length);
655
656 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
657 printk(KERN_ERR "MM: CPU does not support supersection "
658 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100659 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100660 return;
661 }
662
663 /* N.B. ARMv6 supersections are only defined to work with domain 0.
664 * Since domain assignments can in fact be arbitrary, the
665 * 'domain == 0' check below is required to insure that ARMv6
666 * supersections are only allocated for domain 0 regardless
667 * of the actual domain assignments in use.
668 */
669 if (type->domain) {
670 printk(KERN_ERR "MM: invalid domain in supersection "
671 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100672 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100673 return;
674 }
675
676 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100677 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
678 " at 0x%08lx invalid alignment\n",
679 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100680 return;
681 }
682
683 /*
684 * Shift bits [35:32] of address into bits [23:20] of PMD
685 * (See ARMv6 spec).
686 */
687 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
688
689 pgd = pgd_offset_k(addr);
690 end = addr + length;
691 do {
Russell King516295e2010-11-21 16:27:49 +0000692 pud_t *pud = pud_offset(pgd, addr);
693 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100694 int i;
695
696 for (i = 0; i < 16; i++)
697 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
698
699 addr += SUPERSECTION_SIZE;
700 phys += SUPERSECTION_SIZE;
701 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
702 } while (addr != end);
703}
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000704#endif /* !CONFIG_ARM_LPAE */
Russell King4a56c1e2007-04-21 10:16:48 +0100705
Russell Kingae8f1542006-09-27 15:38:34 +0100706/*
707 * Create the page directory entries and any necessary
708 * page tables for the mapping specified by `md'. We
709 * are able to cope here with varying sizes and address
710 * offsets, and we take full advantage of sections and
711 * supersections.
712 */
Russell Kinga2227122010-03-25 18:56:05 +0000713static void __init create_mapping(struct map_desc *md)
Russell Kingae8f1542006-09-27 15:38:34 +0100714{
Will Deaconcae62922011-02-15 12:42:57 +0100715 unsigned long addr, length, end;
716 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100717 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100718 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100719
720 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100721 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
722 " at 0x%08lx in user region\n",
723 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100724 return;
725 }
726
727 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400728 md->virtual >= PAGE_OFFSET &&
729 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100730 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400731 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100732 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100733 }
734
Russell Kingd5c98172007-04-21 10:05:32 +0100735 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100736
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000737#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100738 /*
739 * Catch 36-bit addresses
740 */
Russell King4a56c1e2007-04-21 10:16:48 +0100741 if (md->pfn >= 0x100000) {
742 create_36bit_mapping(md, type);
743 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100744 }
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000745#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100746
Russell King7b9c7b42007-07-04 21:16:33 +0100747 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100748 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100749 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100750
Russell King24e6c692007-04-21 10:21:28 +0100751 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100752 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100753 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100754 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100755 return;
756 }
757
Russell King24e6c692007-04-21 10:21:28 +0100758 pgd = pgd_offset_k(addr);
759 end = addr + length;
760 do {
761 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100762
Russell King516295e2010-11-21 16:27:49 +0000763 alloc_init_pud(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100764
Russell King24e6c692007-04-21 10:21:28 +0100765 phys += next - addr;
766 addr = next;
767 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100768}
769
770/*
771 * Create the architecture specific mappings
772 */
773void __init iotable_init(struct map_desc *io_desc, int nr)
774{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400775 struct map_desc *md;
776 struct vm_struct *vm;
Russell Kingae8f1542006-09-27 15:38:34 +0100777
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400778 if (!nr)
779 return;
780
781 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
782
783 for (md = io_desc; nr; md++, nr--) {
784 create_mapping(md);
785 vm->addr = (void *)(md->virtual & PAGE_MASK);
786 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Rob Herringc2794432012-02-29 18:10:58 -0600787 vm->phys_addr = __pfn_to_phys(md->pfn);
788 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400789 vm->flags |= VM_ARM_MTYPE(md->type);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400790 vm->caller = iotable_init;
791 vm_area_add_early(vm++);
792 }
Russell Kingae8f1542006-09-27 15:38:34 +0100793}
794
Rob Herringc2794432012-02-29 18:10:58 -0600795void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
796 void *caller)
797{
798 struct vm_struct *vm;
799
800 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
801 vm->addr = (void *)addr;
802 vm->size = size;
Arnd Bergmann863e99a2012-09-04 15:01:37 +0200803 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
Rob Herringc2794432012-02-29 18:10:58 -0600804 vm->caller = caller;
805 vm_area_add_early(vm);
806}
807
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100808#ifndef CONFIG_ARM_LPAE
809
810/*
811 * The Linux PMD is made of two consecutive section entries covering 2MB
812 * (see definition in include/asm/pgtable-2level.h). However a call to
813 * create_mapping() may optimize static mappings by using individual
814 * 1MB section mappings. This leaves the actual PMD potentially half
815 * initialized if the top or bottom section entry isn't used, leaving it
816 * open to problems if a subsequent ioremap() or vmalloc() tries to use
817 * the virtual space left free by that unused section entry.
818 *
819 * Let's avoid the issue by inserting dummy vm entries covering the unused
820 * PMD halves once the static mappings are in place.
821 */
822
823static void __init pmd_empty_section_gap(unsigned long addr)
824{
Rob Herringc2794432012-02-29 18:10:58 -0600825 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100826}
827
828static void __init fill_pmd_gaps(void)
829{
830 struct vm_struct *vm;
831 unsigned long addr, next = 0;
832 pmd_t *pmd;
833
834 /* we're still single threaded hence no lock needed here */
835 for (vm = vmlist; vm; vm = vm->next) {
Russell Kinga8490882012-08-25 09:03:15 +0100836 if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100837 continue;
838 addr = (unsigned long)vm->addr;
839 if (addr < next)
840 continue;
841
842 /*
843 * Check if this vm starts on an odd section boundary.
844 * If so and the first section entry for this PMD is free
845 * then we block the corresponding virtual address.
846 */
847 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
848 pmd = pmd_off_k(addr);
849 if (pmd_none(*pmd))
850 pmd_empty_section_gap(addr & PMD_MASK);
851 }
852
853 /*
854 * Then check if this vm ends on an odd section boundary.
855 * If so and the second section entry for this PMD is empty
856 * then we block the corresponding virtual address.
857 */
858 addr += vm->size;
859 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
860 pmd = pmd_off_k(addr) + 1;
861 if (pmd_none(*pmd))
862 pmd_empty_section_gap(addr);
863 }
864
865 /* no need to look at any vm entry until we hit the next PMD */
866 next = (addr + PMD_SIZE - 1) & PMD_MASK;
867 }
868}
869
870#else
871#define fill_pmd_gaps() do { } while (0)
872#endif
873
Rob Herringc2794432012-02-29 18:10:58 -0600874#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
875static void __init pci_reserve_io(void)
876{
877 struct vm_struct *vm;
878 unsigned long addr;
879
880 /* we're still single threaded hence no lock needed here */
881 for (vm = vmlist; vm; vm = vm->next) {
882 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
883 continue;
884 addr = (unsigned long)vm->addr;
885 addr &= ~(SZ_2M - 1);
886 if (addr == PCI_IO_VIRT_BASE)
887 return;
888
889 }
890 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
891}
892#else
893#define pci_reserve_io() do { } while (0)
894#endif
895
Rob Herringe5c5f2a2012-10-22 11:42:54 -0600896#ifdef CONFIG_DEBUG_LL
897void __init debug_ll_io_init(void)
898{
899 struct map_desc map;
900
901 debug_ll_addr(&map.pfn, &map.virtual);
902 if (!map.pfn || !map.virtual)
903 return;
904 map.pfn = __phys_to_pfn(map.pfn);
905 map.virtual &= PAGE_MASK;
906 map.length = PAGE_SIZE;
907 map.type = MT_DEVICE;
908 create_mapping(&map);
909}
910#endif
911
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400912static void * __initdata vmalloc_min =
913 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +0100914
915/*
916 * vmalloc=size forces the vmalloc area to be exactly 'size'
917 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400918 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +0100919 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100920static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100921{
Russell King79612392010-05-22 16:20:14 +0100922 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100923
924 if (vmalloc_reserve < SZ_16M) {
925 vmalloc_reserve = SZ_16M;
926 printk(KERN_WARNING
927 "vmalloc area too small, limiting to %luMB\n",
928 vmalloc_reserve >> 20);
929 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400930
931 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
932 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
933 printk(KERN_WARNING
934 "vmalloc area is too big, limiting to %luMB\n",
935 vmalloc_reserve >> 20);
936 }
Russell King79612392010-05-22 16:20:14 +0100937
938 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100939 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100940}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100941early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100942
Marek Szyprowskic7909502011-12-29 13:09:51 +0100943phys_addr_t arm_lowmem_limit __initdata = 0;
Russell King8df65162010-10-27 19:57:38 +0100944
Russell King0371d3f2011-07-05 19:58:29 +0100945void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200946{
Russell Kingdde58282009-08-15 12:36:00 +0100947 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200948
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400949 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400950 struct membank *bank = &meminfo.bank[j];
951 *bank = meminfo.bank[i];
952
Will Deacon77f73a22011-11-22 17:30:32 +0000953 if (bank->start > ULONG_MAX)
954 highmem = 1;
955
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400956#ifdef CONFIG_HIGHMEM
Will Deacon40f7bfe2011-05-19 13:22:48 +0100957 if (__va(bank->start) >= vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +0100958 __va(bank->start) < (void *)PAGE_OFFSET)
959 highmem = 1;
960
961 bank->highmem = highmem;
962
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400963 /*
964 * Split those memory banks which are partially overlapping
965 * the vmalloc area greatly simplifying things later.
966 */
Will Deacon77f73a22011-11-22 17:30:32 +0000967 if (!highmem && __va(bank->start) < vmalloc_min &&
Russell King79612392010-05-22 16:20:14 +0100968 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400969 if (meminfo.nr_banks >= NR_BANKS) {
970 printk(KERN_CRIT "NR_BANKS too low, "
971 "ignoring high memory\n");
972 } else {
973 memmove(bank + 1, bank,
974 (meminfo.nr_banks - i) * sizeof(*bank));
975 meminfo.nr_banks++;
976 i++;
Russell King79612392010-05-22 16:20:14 +0100977 bank[1].size -= vmalloc_min - __va(bank->start);
978 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +0100979 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400980 j++;
981 }
Russell King79612392010-05-22 16:20:14 +0100982 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400983 }
984#else
Russell King041d7852009-09-27 17:40:42 +0100985 bank->highmem = highmem;
986
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400987 /*
Will Deacon77f73a22011-11-22 17:30:32 +0000988 * Highmem banks not allowed with !CONFIG_HIGHMEM.
989 */
990 if (highmem) {
991 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
992 "(!CONFIG_HIGHMEM).\n",
993 (unsigned long long)bank->start,
994 (unsigned long long)bank->start + bank->size - 1);
995 continue;
996 }
997
998 /*
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400999 * Check whether this memory bank would entirely overlap
1000 * the vmalloc area.
1001 */
Russell King79612392010-05-22 16:20:14 +01001002 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f2009-03-28 19:18:05 +01001003 __va(bank->start) < (void *)PAGE_OFFSET) {
Russell Kinge33b9d02011-02-20 11:47:41 +00001004 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001005 "(vmalloc region overlap).\n",
Russell Kinge33b9d02011-02-20 11:47:41 +00001006 (unsigned long long)bank->start,
1007 (unsigned long long)bank->start + bank->size - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001008 continue;
1009 }
1010
1011 /*
1012 * Check whether this memory bank would partially overlap
1013 * the vmalloc area.
1014 */
Jonathan Austin36418c52012-08-23 14:02:59 +01001015 if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
1016 __va(bank->start + bank->size - 1) <= __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +01001017 unsigned long newsize = vmalloc_min - __va(bank->start);
Russell Kinge33b9d02011-02-20 11:47:41 +00001018 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
1019 "to -%.8llx (vmalloc region overlap).\n",
1020 (unsigned long long)bank->start,
1021 (unsigned long long)bank->start + bank->size - 1,
1022 (unsigned long long)bank->start + newsize - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001023 bank->size = newsize;
1024 }
1025#endif
Marek Szyprowskic7909502011-12-29 13:09:51 +01001026 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
1027 arm_lowmem_limit = bank->start + bank->size;
Will Deacon40f7bfe2011-05-19 13:22:48 +01001028
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001029 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001030 }
Russell Kinge616c592009-09-27 20:55:43 +01001031#ifdef CONFIG_HIGHMEM
1032 if (highmem) {
1033 const char *reason = NULL;
1034
1035 if (cache_is_vipt_aliasing()) {
1036 /*
1037 * Interactions between kmap and other mappings
1038 * make highmem support with aliasing VIPT caches
1039 * rather difficult.
1040 */
1041 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +01001042 }
1043 if (reason) {
1044 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1045 reason);
1046 while (j > 0 && meminfo.bank[j - 1].highmem)
1047 j--;
1048 }
1049 }
1050#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001051 meminfo.nr_banks = j;
Marek Szyprowskic7909502011-12-29 13:09:51 +01001052 high_memory = __va(arm_lowmem_limit - 1) + 1;
1053 memblock_set_current_limit(arm_lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001054}
1055
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001056static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001057{
1058 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +01001059 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +01001060
1061 /*
1062 * Clear out all the mappings below the kernel image.
1063 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001064 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001065 pmd_clear(pmd_off_k(addr));
1066
1067#ifdef CONFIG_XIP_KERNEL
1068 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +01001069 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001070#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001071 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001072 pmd_clear(pmd_off_k(addr));
1073
1074 /*
Russell King8df65162010-10-27 19:57:38 +01001075 * Find the end of the first block of lowmem.
1076 */
1077 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
Marek Szyprowskic7909502011-12-29 13:09:51 +01001078 if (end >= arm_lowmem_limit)
1079 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001080
1081 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001082 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001083 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +01001084 */
Russell King8df65162010-10-27 19:57:38 +01001085 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001086 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001087 pmd_clear(pmd_off_k(addr));
1088}
1089
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001090#ifdef CONFIG_ARM_LPAE
1091/* the first page is reserved for pgd */
1092#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1093 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1094#else
Catalin Marinase73fc882011-08-23 14:07:23 +01001095#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001096#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001097
Russell Kingd111e8f2006-09-27 15:27:33 +01001098/*
Russell King2778f622010-07-09 16:27:52 +01001099 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +01001100 */
Russell King2778f622010-07-09 16:27:52 +01001101void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001102{
Russell Kingd111e8f2006-09-27 15:27:33 +01001103 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001104 * Reserve the page tables. These are already in use,
1105 * and can only be in node 0.
1106 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001107 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +01001108
Russell Kingd111e8f2006-09-27 15:27:33 +01001109#ifdef CONFIG_SA1111
1110 /*
1111 * Because of the SA1111 DMA bug, we want to preserve our
1112 * precious DMA-able memory...
1113 */
Russell King2778f622010-07-09 16:27:52 +01001114 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +01001115#endif
Russell Kingd111e8f2006-09-27 15:27:33 +01001116}
1117
1118/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001119 * Set up the device mappings. Since we clear out the page tables for all
1120 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +01001121 * This means you have to be careful how you debug this function, or any
1122 * called function. This means you can't use any function or debugging
1123 * method which may touch any device, otherwise the kernel _will_ crash.
1124 */
1125static void __init devicemaps_init(struct machine_desc *mdesc)
1126{
1127 struct map_desc map;
1128 unsigned long addr;
Russell King94e5a852012-01-18 15:32:49 +00001129 void *vectors;
Russell Kingd111e8f2006-09-27 15:27:33 +01001130
1131 /*
1132 * Allocate the vector page early.
1133 */
Russell King94e5a852012-01-18 15:32:49 +00001134 vectors = early_alloc(PAGE_SIZE);
1135
1136 early_trap_init(vectors);
Russell Kingd111e8f2006-09-27 15:27:33 +01001137
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001138 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001139 pmd_clear(pmd_off_k(addr));
1140
1141 /*
1142 * Map the kernel if it is XIP.
1143 * It is always first in the modulearea.
1144 */
1145#ifdef CONFIG_XIP_KERNEL
1146 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +00001147 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +00001148 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001149 map.type = MT_ROM;
1150 create_mapping(&map);
1151#endif
1152
1153 /*
1154 * Map the cache flushing regions.
1155 */
1156#ifdef FLUSH_BASE
1157 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1158 map.virtual = FLUSH_BASE;
1159 map.length = SZ_1M;
1160 map.type = MT_CACHECLEAN;
1161 create_mapping(&map);
1162#endif
1163#ifdef FLUSH_BASE_MINICACHE
1164 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1165 map.virtual = FLUSH_BASE_MINICACHE;
1166 map.length = SZ_1M;
1167 map.type = MT_MINICLEAN;
1168 create_mapping(&map);
1169#endif
1170
1171 /*
1172 * Create a mapping for the machine vectors at the high-vectors
1173 * location (0xffff0000). If we aren't using high-vectors, also
1174 * create a mapping at the low-vectors virtual address.
1175 */
Russell King94e5a852012-01-18 15:32:49 +00001176 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
Russell Kingd111e8f2006-09-27 15:27:33 +01001177 map.virtual = 0xffff0000;
1178 map.length = PAGE_SIZE;
1179 map.type = MT_HIGH_VECTORS;
1180 create_mapping(&map);
1181
1182 if (!vectors_high()) {
1183 map.virtual = 0;
1184 map.type = MT_LOW_VECTORS;
1185 create_mapping(&map);
1186 }
1187
1188 /*
1189 * Ask the machine support to map in the statically mapped devices.
1190 */
1191 if (mdesc->map_io)
1192 mdesc->map_io();
Nicolas Pitre19b52ab2012-06-27 17:28:57 +01001193 fill_pmd_gaps();
Russell Kingd111e8f2006-09-27 15:27:33 +01001194
Rob Herringc2794432012-02-29 18:10:58 -06001195 /* Reserve fixed i/o space in VMALLOC region */
1196 pci_reserve_io();
1197
Russell Kingd111e8f2006-09-27 15:27:33 +01001198 /*
1199 * Finally flush the caches and tlb to ensure that we're in a
1200 * consistent state wrt the writebuffer. This also ensures that
1201 * any write-allocated cache lines in the vector page are written
1202 * back. After this point, we can start to touch devices again.
1203 */
1204 local_flush_tlb_all();
1205 flush_cache_all();
1206}
1207
Nicolas Pitred73cd422008-09-15 16:44:55 -04001208static void __init kmap_init(void)
1209{
1210#ifdef CONFIG_HIGHMEM
Russell King4bb2e272010-07-01 18:33:29 +01001211 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1212 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001213#endif
1214}
1215
Russell Kinga2227122010-03-25 18:56:05 +00001216static void __init map_lowmem(void)
1217{
Russell King8df65162010-10-27 19:57:38 +01001218 struct memblock_region *reg;
Russell Kinga2227122010-03-25 18:56:05 +00001219
1220 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001221 for_each_memblock(memory, reg) {
1222 phys_addr_t start = reg->base;
1223 phys_addr_t end = start + reg->size;
1224 struct map_desc map;
Russell Kinga2227122010-03-25 18:56:05 +00001225
Marek Szyprowskic7909502011-12-29 13:09:51 +01001226 if (end > arm_lowmem_limit)
1227 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001228 if (start >= end)
1229 break;
1230
1231 map.pfn = __phys_to_pfn(start);
1232 map.virtual = __phys_to_virt(start);
1233 map.length = end - start;
1234 map.type = MT_MEMORY;
1235
1236 create_mapping(&map);
Russell Kinga2227122010-03-25 18:56:05 +00001237 }
1238}
1239
Russell Kingd111e8f2006-09-27 15:27:33 +01001240/*
1241 * paging_init() sets up the page tables, initialises the zone memory
1242 * maps, and sets up the zero page, bad page and bad page tables.
1243 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001244void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001245{
1246 void *zero_page;
1247
Marek Szyprowskic7909502011-12-29 13:09:51 +01001248 memblock_set_current_limit(arm_lowmem_limit);
Russell King0371d3f2011-07-05 19:58:29 +01001249
Russell Kingd111e8f2006-09-27 15:27:33 +01001250 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001251 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001252 map_lowmem();
Marek Szyprowskic7909502011-12-29 13:09:51 +01001253 dma_contiguous_remap();
Russell Kingd111e8f2006-09-27 15:27:33 +01001254 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001255 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001256
1257 top_pmd = pmd_off_k(0xffff0000);
1258
Russell King3abe9d32010-03-25 17:02:59 +00001259 /* allocate the zero page. */
1260 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001261
Russell King8d717a52010-05-22 19:47:18 +01001262 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001263
Russell Kingd111e8f2006-09-27 15:27:33 +01001264 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001265 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001266}