blob: e6b733bebd89e49072c2e5363c2998eb6a9e4cf5 [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010019
Russell King15d07dc2012-03-28 18:30:01 +010020#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010021#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000022#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050023#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010024#include <asm/setup.h>
25#include <asm/sizes.h>
Russell Kinge616c592009-09-27 20:55:43 +010026#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010027#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040028#include <asm/highmem.h>
David Howells9f97da72012-03-28 18:30:01 +010029#include <asm/system_info.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010030#include <asm/traps.h>
Neil Leederf06ab972011-10-25 17:57:26 -040031#include <asm/mmu_writeable.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010032
33#include <asm/mach/arch.h>
34#include <asm/mach/map.h>
35
36#include "mm.h"
37
Russell Kingd111e8f2006-09-27 15:27:33 +010038/*
39 * empty_zero_page is a special page that is used for
40 * zero-initialized data and COW.
41 */
42struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040043EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010044
45/*
46 * The pmd table for the upper-most set of pages.
47 */
48pmd_t *top_pmd;
49
Russell Kingae8f1542006-09-27 15:38:34 +010050#define CPOLICY_UNCACHED 0
51#define CPOLICY_BUFFERED 1
52#define CPOLICY_WRITETHROUGH 2
53#define CPOLICY_WRITEBACK 3
54#define CPOLICY_WRITEALLOC 4
55
Neil Leederf06ab972011-10-25 17:57:26 -040056#define RX_AREA_START _text
57#define RX_AREA_END __start_rodata
58
Russell Kingae8f1542006-09-27 15:38:34 +010059static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
60static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010061pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010062pgprot_t pgprot_kernel;
63
Imre_Deak44b18692007-02-11 13:45:13 +010064EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010065EXPORT_SYMBOL(pgprot_kernel);
66
67struct cachepolicy {
68 const char policy[16];
69 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010070 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000071 pteval_t pte;
Russell Kingae8f1542006-09-27 15:38:34 +010072};
73
74static struct cachepolicy cache_policies[] __initdata = {
75 {
76 .policy = "uncached",
77 .cr_mask = CR_W|CR_C,
78 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010079 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010080 }, {
81 .policy = "buffered",
82 .cr_mask = CR_C,
83 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010084 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010085 }, {
86 .policy = "writethrough",
87 .cr_mask = 0,
88 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010089 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010090 }, {
91 .policy = "writeback",
92 .cr_mask = 0,
93 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010094 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010095 }, {
96 .policy = "writealloc",
97 .cr_mask = 0,
98 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +010099 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +0100100 }
101};
102
103/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +0100104 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +0100105 * problems by allowing the cache or the cache and
106 * writebuffer to be turned off. (Note: the write
107 * buffer should not be on and the cache off).
108 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100109static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100110{
111 int i;
112
113 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
114 int len = strlen(cache_policies[i].policy);
115
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100116 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100117 cachepolicy = i;
118 cr_alignment &= ~cache_policies[i].cr_mask;
119 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100120 break;
121 }
122 }
123 if (i == ARRAY_SIZE(cache_policies))
124 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000125 /*
126 * This restriction is partly to do with the way we boot; it is
127 * unpredictable to have memory mapped using two different sets of
128 * memory attributes (shared, type, and cache attribs). We can not
129 * change these attributes once the initial assembly has setup the
130 * page tables.
131 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100132 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
133 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
134 cachepolicy = CPOLICY_WRITEBACK;
135 }
Russell Kingae8f1542006-09-27 15:38:34 +0100136 flush_cache_all();
137 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100138 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100139}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100140early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100141
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100142static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100143{
144 char *p = "buffered";
145 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100146 early_cachepolicy(p);
147 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100148}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100149early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100150
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100151static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100152{
153 char *p = "uncached";
154 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100155 early_cachepolicy(p);
156 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100157}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100158early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100159
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000160#ifndef CONFIG_ARM_LPAE
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100161static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100162{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100163 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100164 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100165 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100166 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100167 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100168}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100169early_param("ecc", early_ecc);
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000170#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100171
172static int __init noalign_setup(char *__unused)
173{
174 cr_alignment &= ~CR_A;
175 cr_no_alignment &= ~CR_A;
176 set_cr(cr_alignment);
177 return 1;
178}
179__setup("noalign", noalign_setup);
180
Russell King255d1f82006-12-18 00:12:47 +0000181#ifndef CONFIG_SMP
182void adjust_cr(unsigned long mask, unsigned long set)
183{
184 unsigned long flags;
185
186 mask &= ~CR_A;
187
188 set &= mask;
189
190 local_irq_save(flags);
191
192 cr_no_alignment = (cr_no_alignment & ~mask) | set;
193 cr_alignment = (cr_alignment & ~mask) | set;
194
195 set_cr((get_cr() & ~mask) | set);
196
197 local_irq_restore(flags);
198}
199#endif
200
Russell King36bb94b2010-11-16 08:40:36 +0000201#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000202#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100203
Russell Kingb29e9f52007-04-21 10:47:29 +0100204static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100205 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
207 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100208 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000209 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100210 .domain = DOMAIN_IO,
211 },
212 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100213 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100214 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000215 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100216 .domain = DOMAIN_IO,
217 },
218 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100219 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100220 .prot_l1 = PMD_TYPE_TABLE,
221 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
222 .domain = DOMAIN_IO,
223 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100224 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100225 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100226 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000227 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100228 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100229 },
Russell Kingebb4c652008-11-09 11:18:36 +0000230 [MT_UNCACHED] = {
231 .prot_pte = PROT_PTE_DEVICE,
232 .prot_l1 = PMD_TYPE_TABLE,
233 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
234 .domain = DOMAIN_IO,
235 },
Russell Kingae8f1542006-09-27 15:38:34 +0100236 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100237 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100238 .domain = DOMAIN_KERNEL,
239 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000240#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100241 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100242 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100243 .domain = DOMAIN_KERNEL,
244 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000245#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100246 [MT_LOW_VECTORS] = {
247 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000248 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100249 .prot_l1 = PMD_TYPE_TABLE,
250 .domain = DOMAIN_USER,
251 },
252 [MT_HIGH_VECTORS] = {
253 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000254 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100255 .prot_l1 = PMD_TYPE_TABLE,
256 .domain = DOMAIN_USER,
257 },
258 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000259 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100260 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100261 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100262 .domain = DOMAIN_KERNEL,
263 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264 [MT_MEMORY_R] = {
265 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
266 .domain = DOMAIN_KERNEL,
267 },
268 [MT_MEMORY_RW] = {
269 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
270 .domain = DOMAIN_KERNEL,
271 },
272 [MT_MEMORY_RX] = {
273 .prot_sect = PMD_TYPE_SECT,
274 .domain = DOMAIN_KERNEL,
275 },
Russell Kingae8f1542006-09-27 15:38:34 +0100276 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100277 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100278 .domain = DOMAIN_KERNEL,
279 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100280 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100281 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000282 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100283 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100284 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
285 .domain = DOMAIN_KERNEL,
286 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100287 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100288 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000289 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100290 .prot_l1 = PMD_TYPE_TABLE,
291 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
292 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100293 },
294 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000295 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100296 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100297 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100298 },
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700299 [MT_MEMORY_SO] = {
300 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
301 L_PTE_MT_UNCACHED,
302 .prot_l1 = PMD_TYPE_TABLE,
303 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
304 PMD_SECT_UNCACHED | PMD_SECT_XN,
305 .domain = DOMAIN_KERNEL,
306 },
Russell Kingae8f1542006-09-27 15:38:34 +0100307};
308
Russell Kingb29e9f52007-04-21 10:47:29 +0100309const struct mem_type *get_mem_type(unsigned int type)
310{
311 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
312}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200313EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100314
Russell Kingae8f1542006-09-27 15:38:34 +0100315/*
316 * Adjust the PMD section entries according to the CPU in use.
317 */
318static void __init build_mem_type_table(void)
319{
320 struct cachepolicy *cp;
321 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100322 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100323 int cpu_arch = cpu_architecture();
324 int i;
325
Catalin Marinas11179d82007-07-20 11:42:24 +0100326 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100327#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100328 if (cachepolicy > CPOLICY_BUFFERED)
329 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100330#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100331 if (cachepolicy > CPOLICY_WRITETHROUGH)
332 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100333#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100334 }
Russell Kingae8f1542006-09-27 15:38:34 +0100335 if (cpu_arch < CPU_ARCH_ARMv5) {
336 if (cachepolicy >= CPOLICY_WRITEALLOC)
337 cachepolicy = CPOLICY_WRITEBACK;
338 ecc_mask = 0;
339 }
Russell Kingf00ec482010-09-04 10:47:48 +0100340 if (is_smp())
341 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100342
343 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000344 * Strip out features not present on earlier architectures.
345 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
346 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100347 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000348 if (cpu_arch < CPU_ARCH_ARMv5)
349 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
350 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
351 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
352 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
353 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100354
355 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000356 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
357 * "update-able on write" bit on ARM610). However, Xscale and
358 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100359 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000360 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100361 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100362 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100363 mem_types[i].prot_l1 &= ~PMD_BIT4;
364 }
365 } else if (cpu_arch < CPU_ARCH_ARMv6) {
366 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100367 if (mem_types[i].prot_l1)
368 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100369 if (mem_types[i].prot_sect)
370 mem_types[i].prot_sect |= PMD_BIT4;
371 }
372 }
Russell Kingae8f1542006-09-27 15:38:34 +0100373
Russell Kingb1cce6b2008-11-04 10:52:28 +0000374 /*
375 * Mark the device areas according to the CPU/architecture.
376 */
377 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
378 if (!cpu_is_xsc3()) {
379 /*
380 * Mark device regions on ARMv6+ as execute-never
381 * to prevent speculative instruction fetches.
382 */
383 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
384 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
385 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
386 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
387 }
388 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
389 /*
390 * For ARMv7 with TEX remapping,
391 * - shared device is SXCB=1100
392 * - nonshared device is SXCB=0100
393 * - write combine device mem is SXCB=0001
394 * (Uncached Normal memory)
395 */
396 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
397 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
398 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
399 } else if (cpu_is_xsc3()) {
400 /*
401 * For Xscale3,
402 * - shared device is TEXCB=00101
403 * - nonshared device is TEXCB=01000
404 * - write combine device mem is TEXCB=00100
405 * (Inner/Outer Uncacheable in xsc3 parlance)
406 */
407 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
408 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
409 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
410 } else {
411 /*
412 * For ARMv6 and ARMv7 without TEX remapping,
413 * - shared device is TEXCB=00001
414 * - nonshared device is TEXCB=01000
415 * - write combine device mem is TEXCB=00100
416 * (Uncached Normal in ARMv6 parlance).
417 */
418 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
419 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
420 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
421 }
422 } else {
423 /*
424 * On others, write combining is "Uncached/Buffered"
425 */
426 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
427 }
428
429 /*
430 * Now deal with the memory-type mappings
431 */
Russell Kingae8f1542006-09-27 15:38:34 +0100432 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100433 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
434
Russell Kingbb30f362008-09-06 20:04:59 +0100435 /*
436 * Only use write-through for non-SMP systems
437 */
Russell Kingf00ec482010-09-04 10:47:48 +0100438 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
Russell Kingbb30f362008-09-06 20:04:59 +0100439 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
Russell Kingae8f1542006-09-27 15:38:34 +0100440
441 /*
442 * Enable CPU-specific coherency if supported.
443 * (Only available on XSC3 at the moment.)
444 */
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100445 if (arch_is_coherent() && cpu_is_xsc3()) {
Russell Kingb1cce6b2008-11-04 10:52:28 +0000446 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100447 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
448 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
449 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
450 }
Russell Kingae8f1542006-09-27 15:38:34 +0100451 /*
452 * ARMv6 and above have extended page tables.
453 */
454 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000455#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100456 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100457 * Mark cache clean areas and XIP ROM read only
458 * from SVC mode and no access from userspace.
459 */
460 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700461 mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
462 mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Russell Kingae8f1542006-09-27 15:38:34 +0100463 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
464 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000465#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100466
Russell Kingf00ec482010-09-04 10:47:48 +0100467 if (is_smp()) {
468 /*
469 * Mark memory with the "shared" attribute
470 * for SMP systems
471 */
472 user_pgprot |= L_PTE_SHARED;
473 kern_pgprot |= L_PTE_SHARED;
474 vecs_pgprot |= L_PTE_SHARED;
475 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
476 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
477 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
478 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
479 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
480 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
481 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
483 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
484 mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
Russell Kingf00ec482010-09-04 10:47:48 +0100485 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
486 }
Russell Kingae8f1542006-09-27 15:38:34 +0100487 }
488
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100489 /*
490 * Non-cacheable Normal - intended for memory areas that must
491 * not cause dirty cache line writebacks when used
492 */
493 if (cpu_arch >= CPU_ARCH_ARMv6) {
494 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
495 /* Non-cacheable Normal is XCB = 001 */
496 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
497 PMD_SECT_BUFFERED;
498 } else {
499 /* For both ARMv6 and non-TEX-remapping ARMv7 */
500 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
501 PMD_SECT_TEX(1);
502 }
503 } else {
504 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
505 }
506
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000507#ifdef CONFIG_ARM_LPAE
508 /*
509 * Do not generate access flag faults for the kernel mappings.
510 */
511 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
512 mem_types[i].prot_pte |= PTE_EXT_AF;
Vitaly Andrianov1a3abcf2012-05-15 15:01:16 +0100513 if (mem_types[i].prot_sect)
514 mem_types[i].prot_sect |= PMD_SECT_AF;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000515 }
516 kern_pgprot |= PTE_EXT_AF;
517 vecs_pgprot |= PTE_EXT_AF;
518#endif
519
Russell Kingae8f1542006-09-27 15:38:34 +0100520 for (i = 0; i < 16; i++) {
521 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100522 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100523 }
524
Russell Kingbb30f362008-09-06 20:04:59 +0100525 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
526 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100527
Imre_Deak44b18692007-02-11 13:45:13 +0100528 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100529 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000530 L_PTE_DIRTY | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100531
532 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
533 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
534 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100535 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
536 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
538 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
539 mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100540 mem_types[MT_ROM].prot_sect |= cp->pmd;
541
542 switch (cp->pmd) {
543 case PMD_SECT_WT:
544 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
545 break;
546 case PMD_SECT_WB:
547 case PMD_SECT_WBWA:
548 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
549 break;
550 }
551 printk("Memory policy: ECC %sabled, Data cache %s\n",
552 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100553
554 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
555 struct mem_type *t = &mem_types[i];
556 if (t->prot_l1)
557 t->prot_l1 |= PMD_DOMAIN(t->domain);
558 if (t->prot_sect)
559 t->prot_sect |= PMD_DOMAIN(t->domain);
560 }
Russell Kingae8f1542006-09-27 15:38:34 +0100561}
562
Catalin Marinasd9073872010-09-13 16:01:24 +0100563#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
564pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
565 unsigned long size, pgprot_t vma_prot)
566{
567 if (!pfn_valid(pfn))
568 return pgprot_noncached(vma_prot);
569 else if (file->f_flags & O_SYNC)
570 return pgprot_writecombine(vma_prot);
571 return vma_prot;
572}
573EXPORT_SYMBOL(phys_mem_access_prot);
574#endif
575
Russell Kingae8f1542006-09-27 15:38:34 +0100576#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
577
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400578static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000579{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400580 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100581 memset(ptr, 0, sz);
582 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000583}
584
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400585static void __init *early_alloc(unsigned long sz)
586{
587 return early_alloc_aligned(sz, sz);
588}
589
Colin Crosse5e483d2011-08-11 17:15:24 -0700590static pte_t * __init early_pte_alloc(pmd_t *pmd)
591{
592 if (pmd_none(*pmd) || pmd_bad(*pmd))
593 return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
594 return pmd_page_vaddr(*pmd);
595}
596
597static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
598{
599 __pmd_populate(pmd, __pa(pte), prot);
600 BUG_ON(pmd_bad(*pmd));
601}
602
Steve Mucklef132c6c2012-06-06 18:30:57 -0700603#ifdef CONFIG_HIGHMEM
Colin Crosse5e483d2011-08-11 17:15:24 -0700604static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
605 unsigned long addr, unsigned long prot)
Russell King4bb2e272010-07-01 18:33:29 +0100606{
607 if (pmd_none(*pmd)) {
Colin Crosse5e483d2011-08-11 17:15:24 -0700608 pte_t *pte = early_pte_alloc(pmd);
609 early_pte_install(pmd, pte, prot);
Russell King4bb2e272010-07-01 18:33:29 +0100610 }
611 BUG_ON(pmd_bad(*pmd));
612 return pte_offset_kernel(pmd, addr);
613}
Steve Mucklef132c6c2012-06-06 18:30:57 -0700614#endif
Russell King4bb2e272010-07-01 18:33:29 +0100615
Russell King24e6c692007-04-21 10:21:28 +0100616static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
617 unsigned long end, unsigned long pfn,
618 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100619{
Colin Crosse5e483d2011-08-11 17:15:24 -0700620 pte_t *start_pte = early_pte_alloc(pmd);
621 pte_t *pte = start_pte + pte_index(addr);
622
623 /* If replacing a section mapping, the whole section must be replaced */
624 BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
625
Russell King24e6c692007-04-21 10:21:28 +0100626 do {
Russell King40d192b2008-09-06 21:15:56 +0100627 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100628 pfn++;
629 } while (pte++, addr += PAGE_SIZE, addr != end);
Colin Crosse5e483d2011-08-11 17:15:24 -0700630 early_pte_install(pmd, start_pte, type->prot_l1);
Russell Kingae8f1542006-09-27 15:38:34 +0100631}
632
Russell King516295e2010-11-21 16:27:49 +0000633static void __init alloc_init_section(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000634 unsigned long end, phys_addr_t phys,
Colin Crosse5e483d2011-08-11 17:15:24 -0700635 const struct mem_type *type,
636 bool force_pages)
Russell Kingae8f1542006-09-27 15:38:34 +0100637{
Russell King516295e2010-11-21 16:27:49 +0000638 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100639
Russell King24e6c692007-04-21 10:21:28 +0100640 /*
641 * Try a section mapping - end, addr and phys must all be aligned
642 * to a section boundary. Note that PMDs refer to the individual
643 * L1 entries, whereas PGDs refer to a group of L1 entries making
644 * up one logical pointer to an L2 table.
645 */
Colin Crosse5e483d2011-08-11 17:15:24 -0700646 if (((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
Russell King24e6c692007-04-21 10:21:28 +0100647 pmd_t *p = pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100648
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000649#ifndef CONFIG_ARM_LPAE
Russell King24e6c692007-04-21 10:21:28 +0100650 if (addr & SECTION_SIZE)
651 pmd++;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000652#endif
Russell King24e6c692007-04-21 10:21:28 +0100653
654 do {
655 *pmd = __pmd(phys | type->prot_sect);
656 phys += SECTION_SIZE;
657 } while (pmd++, addr += SECTION_SIZE, addr != end);
658
659 flush_pmd_entry(p);
660 } else {
661 /*
662 * No need to loop; pte's aren't interested in the
663 * individual L1 entries.
664 */
665 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
Russell Kingae8f1542006-09-27 15:38:34 +0100666 }
Russell Kingae8f1542006-09-27 15:38:34 +0100667}
668
Stephen Boyd14904922012-04-27 01:40:10 +0100669static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
Colin Crossf02fac62012-05-07 18:20:34 -0700670 unsigned long end, unsigned long phys, const struct mem_type *type,
671 bool force_pages)
Russell King516295e2010-11-21 16:27:49 +0000672{
673 pud_t *pud = pud_offset(pgd, addr);
674 unsigned long next;
675
676 do {
677 next = pud_addr_end(addr, end);
Colin Crosse5e483d2011-08-11 17:15:24 -0700678 alloc_init_section(pud, addr, next, phys, type, force_pages);
Russell King516295e2010-11-21 16:27:49 +0000679 phys += next - addr;
680 } while (pud++, addr = next, addr != end);
681}
682
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000683#ifndef CONFIG_ARM_LPAE
Russell King4a56c1e2007-04-21 10:16:48 +0100684static void __init create_36bit_mapping(struct map_desc *md,
685 const struct mem_type *type)
686{
Russell King97092e02010-11-16 00:16:01 +0000687 unsigned long addr, length, end;
688 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100689 pgd_t *pgd;
690
691 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100692 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100693 length = PAGE_ALIGN(md->length);
694
695 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
696 printk(KERN_ERR "MM: CPU does not support supersection "
697 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100698 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100699 return;
700 }
701
702 /* N.B. ARMv6 supersections are only defined to work with domain 0.
703 * Since domain assignments can in fact be arbitrary, the
704 * 'domain == 0' check below is required to insure that ARMv6
705 * supersections are only allocated for domain 0 regardless
706 * of the actual domain assignments in use.
707 */
708 if (type->domain) {
709 printk(KERN_ERR "MM: invalid domain in supersection "
710 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100711 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100712 return;
713 }
714
715 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100716 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
717 " at 0x%08lx invalid alignment\n",
718 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100719 return;
720 }
721
722 /*
723 * Shift bits [35:32] of address into bits [23:20] of PMD
724 * (See ARMv6 spec).
725 */
726 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
727
728 pgd = pgd_offset_k(addr);
729 end = addr + length;
730 do {
Russell King516295e2010-11-21 16:27:49 +0000731 pud_t *pud = pud_offset(pgd, addr);
732 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100733 int i;
734
735 for (i = 0; i < 16; i++)
736 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
737
738 addr += SUPERSECTION_SIZE;
739 phys += SUPERSECTION_SIZE;
740 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
741 } while (addr != end);
742}
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000743#endif /* !CONFIG_ARM_LPAE */
Russell King4a56c1e2007-04-21 10:16:48 +0100744
Russell Kingae8f1542006-09-27 15:38:34 +0100745/*
746 * Create the page directory entries and any necessary
747 * page tables for the mapping specified by `md'. We
748 * are able to cope here with varying sizes and address
749 * offsets, and we take full advantage of sections and
750 * supersections.
751 */
Colin Crosse5e483d2011-08-11 17:15:24 -0700752static void __init create_mapping(struct map_desc *md, bool force_pages)
Russell Kingae8f1542006-09-27 15:38:34 +0100753{
Will Deaconcae62922011-02-15 12:42:57 +0100754 unsigned long addr, length, end;
755 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100756 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100757 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100758
759 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100760 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
761 " at 0x%08lx in user region\n",
762 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100763 return;
764 }
765
766 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400767 md->virtual >= PAGE_OFFSET &&
768 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100769 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400770 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100771 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100772 }
773
Russell Kingd5c98172007-04-21 10:05:32 +0100774 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100775
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000776#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100777 /*
778 * Catch 36-bit addresses
779 */
Russell King4a56c1e2007-04-21 10:16:48 +0100780 if (md->pfn >= 0x100000) {
781 create_36bit_mapping(md, type);
782 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100783 }
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000784#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100785
Russell King7b9c7b42007-07-04 21:16:33 +0100786 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100787 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100788 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100789
Russell King24e6c692007-04-21 10:21:28 +0100790 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100791 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100792 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100793 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100794 return;
795 }
796
Russell King24e6c692007-04-21 10:21:28 +0100797 pgd = pgd_offset_k(addr);
798 end = addr + length;
799 do {
800 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100801
Colin Crosse5e483d2011-08-11 17:15:24 -0700802 alloc_init_pud(pgd, addr, next, phys, type, force_pages);
Russell Kingae8f1542006-09-27 15:38:34 +0100803
Russell King24e6c692007-04-21 10:21:28 +0100804 phys += next - addr;
805 addr = next;
806 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100807}
808
809/*
810 * Create the architecture specific mappings
811 */
812void __init iotable_init(struct map_desc *io_desc, int nr)
813{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400814 struct map_desc *md;
815 struct vm_struct *vm;
Russell Kingae8f1542006-09-27 15:38:34 +0100816
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400817 if (!nr)
818 return;
819
820 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
821
822 for (md = io_desc; nr; md++, nr--) {
Colin Crosse5e483d2011-08-11 17:15:24 -0700823 create_mapping(md, false);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400824 vm->addr = (void *)(md->virtual & PAGE_MASK);
825 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
826 vm->phys_addr = __pfn_to_phys(md->pfn);
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400827 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
828 vm->flags |= VM_ARM_MTYPE(md->type);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400829 vm->caller = iotable_init;
830 vm_area_add_early(vm++);
831 }
Russell Kingae8f1542006-09-27 15:38:34 +0100832}
833
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400834static void * __initdata vmalloc_min =
835 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +0100836
837/*
838 * vmalloc=size forces the vmalloc area to be exactly 'size'
839 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400840 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +0100841 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100842static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100843{
Russell King79612392010-05-22 16:20:14 +0100844 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100845
846 if (vmalloc_reserve < SZ_16M) {
847 vmalloc_reserve = SZ_16M;
848 printk(KERN_WARNING
849 "vmalloc area too small, limiting to %luMB\n",
850 vmalloc_reserve >> 20);
851 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400852
853 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
854 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
855 printk(KERN_WARNING
856 "vmalloc area is too big, limiting to %luMB\n",
857 vmalloc_reserve >> 20);
858 }
Russell King79612392010-05-22 16:20:14 +0100859
860 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100861 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100862}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100863early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100864
Russell King8df65162010-10-27 19:57:38 +0100865static phys_addr_t lowmem_limit __initdata = 0;
866
Russell King0371d3f2011-07-05 19:58:29 +0100867void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200868{
Russell Kingdde58282009-08-15 12:36:00 +0100869 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200870
Larry Bassel31a949b2012-04-11 15:53:21 -0700871#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
872 find_membank0_hole();
873#endif
874
Larry Basself973fab2011-10-14 10:55:11 -0700875#if (defined CONFIG_HIGHMEM) && (defined CONFIG_FIX_MOVABLE_ZONE)
Jack Cheung22cda042011-12-16 15:20:14 -0800876 if (movable_reserved_size && __pa(vmalloc_min) > movable_reserved_start)
877 vmalloc_min = __va(movable_reserved_start);
Larry Basself973fab2011-10-14 10:55:11 -0700878#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400879 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400880 struct membank *bank = &meminfo.bank[j];
881 *bank = meminfo.bank[i];
882
Will Deacon77f73a22011-11-22 17:30:32 +0000883 if (bank->start > ULONG_MAX)
884 highmem = 1;
885
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400886#ifdef CONFIG_HIGHMEM
Will Deacon40f7bfe2011-05-19 13:22:48 +0100887 if (__va(bank->start) >= vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +0100888 __va(bank->start) < (void *)PAGE_OFFSET)
889 highmem = 1;
890
891 bank->highmem = highmem;
892
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400893 /*
894 * Split those memory banks which are partially overlapping
895 * the vmalloc area greatly simplifying things later.
896 */
Will Deacon77f73a22011-11-22 17:30:32 +0000897 if (!highmem && __va(bank->start) < vmalloc_min &&
Russell King79612392010-05-22 16:20:14 +0100898 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400899 if (meminfo.nr_banks >= NR_BANKS) {
900 printk(KERN_CRIT "NR_BANKS too low, "
901 "ignoring high memory\n");
902 } else {
903 memmove(bank + 1, bank,
904 (meminfo.nr_banks - i) * sizeof(*bank));
905 meminfo.nr_banks++;
906 i++;
Russell King79612392010-05-22 16:20:14 +0100907 bank[1].size -= vmalloc_min - __va(bank->start);
908 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +0100909 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400910 j++;
911 }
Russell King79612392010-05-22 16:20:14 +0100912 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400913 }
914#else
Russell King041d7852009-09-27 17:40:42 +0100915 bank->highmem = highmem;
916
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400917 /*
Will Deacon77f73a22011-11-22 17:30:32 +0000918 * Highmem banks not allowed with !CONFIG_HIGHMEM.
919 */
920 if (highmem) {
921 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
922 "(!CONFIG_HIGHMEM).\n",
923 (unsigned long long)bank->start,
924 (unsigned long long)bank->start + bank->size - 1);
925 continue;
926 }
927
928 /*
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400929 * Check whether this memory bank would entirely overlap
930 * the vmalloc area.
931 */
Russell King79612392010-05-22 16:20:14 +0100932 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f2009-03-28 19:18:05 +0100933 __va(bank->start) < (void *)PAGE_OFFSET) {
Russell Kinge33b9d02011-02-20 11:47:41 +0000934 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400935 "(vmalloc region overlap).\n",
Russell Kinge33b9d02011-02-20 11:47:41 +0000936 (unsigned long long)bank->start,
937 (unsigned long long)bank->start + bank->size - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400938 continue;
939 }
940
941 /*
942 * Check whether this memory bank would partially overlap
943 * the vmalloc area.
944 */
Russell King79612392010-05-22 16:20:14 +0100945 if (__va(bank->start + bank->size) > vmalloc_min ||
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400946 __va(bank->start + bank->size) < __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +0100947 unsigned long newsize = vmalloc_min - __va(bank->start);
Russell Kinge33b9d02011-02-20 11:47:41 +0000948 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
949 "to -%.8llx (vmalloc region overlap).\n",
950 (unsigned long long)bank->start,
951 (unsigned long long)bank->start + bank->size - 1,
952 (unsigned long long)bank->start + newsize - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400953 bank->size = newsize;
954 }
955#endif
Will Deacon40f7bfe2011-05-19 13:22:48 +0100956 if (!bank->highmem && bank->start + bank->size > lowmem_limit)
957 lowmem_limit = bank->start + bank->size;
958
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400959 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200960 }
Russell Kinge616c592009-09-27 20:55:43 +0100961#ifdef CONFIG_HIGHMEM
962 if (highmem) {
963 const char *reason = NULL;
964
965 if (cache_is_vipt_aliasing()) {
966 /*
967 * Interactions between kmap and other mappings
968 * make highmem support with aliasing VIPT caches
969 * rather difficult.
970 */
971 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +0100972 }
973 if (reason) {
974 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
975 reason);
976 while (j > 0 && meminfo.bank[j - 1].highmem)
977 j--;
978 }
979 }
980#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400981 meminfo.nr_banks = j;
Nicolas Pitre55a81732011-09-18 22:40:00 -0400982 high_memory = __va(lowmem_limit - 1) + 1;
Will Deacon40f7bfe2011-05-19 13:22:48 +0100983 memblock_set_current_limit(lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200984}
985
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400986static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100987{
988 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +0100989 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +0100990
991 /*
992 * Clear out all the mappings below the kernel image.
993 */
Catalin Marinase73fc882011-08-23 14:07:23 +0100994 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100995 pmd_clear(pmd_off_k(addr));
996
997#ifdef CONFIG_XIP_KERNEL
998 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +0100999 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001000#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001001 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001002 pmd_clear(pmd_off_k(addr));
1003
1004 /*
Russell King8df65162010-10-27 19:57:38 +01001005 * Find the end of the first block of lowmem.
1006 */
1007 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1008 if (end >= lowmem_limit)
1009 end = lowmem_limit;
1010
1011 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001012 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001013 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +01001014 */
Russell King8df65162010-10-27 19:57:38 +01001015 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001016 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001017 pmd_clear(pmd_off_k(addr));
1018}
1019
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001020#ifdef CONFIG_ARM_LPAE
1021/* the first page is reserved for pgd */
1022#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1023 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1024#else
Catalin Marinase73fc882011-08-23 14:07:23 +01001025#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001026#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001027
Russell Kingd111e8f2006-09-27 15:27:33 +01001028/*
Russell King2778f622010-07-09 16:27:52 +01001029 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +01001030 */
Russell King2778f622010-07-09 16:27:52 +01001031void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001032{
Russell Kingd111e8f2006-09-27 15:27:33 +01001033 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001034 * Reserve the page tables. These are already in use,
1035 * and can only be in node 0.
1036 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001037 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +01001038
Russell Kingd111e8f2006-09-27 15:27:33 +01001039#ifdef CONFIG_SA1111
1040 /*
1041 * Because of the SA1111 DMA bug, we want to preserve our
1042 * precious DMA-able memory...
1043 */
Russell King2778f622010-07-09 16:27:52 +01001044 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +01001045#endif
Russell Kingd111e8f2006-09-27 15:27:33 +01001046}
1047
1048/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001049 * Set up the device mappings. Since we clear out the page tables for all
1050 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +01001051 * This means you have to be careful how you debug this function, or any
1052 * called function. This means you can't use any function or debugging
1053 * method which may touch any device, otherwise the kernel _will_ crash.
1054 */
1055static void __init devicemaps_init(struct machine_desc *mdesc)
1056{
1057 struct map_desc map;
1058 unsigned long addr;
Russell King94e5a852012-01-18 15:32:49 +00001059 void *vectors;
Russell Kingd111e8f2006-09-27 15:27:33 +01001060
1061 /*
1062 * Allocate the vector page early.
1063 */
Russell King94e5a852012-01-18 15:32:49 +00001064 vectors = early_alloc(PAGE_SIZE);
1065
1066 early_trap_init(vectors);
Russell Kingd111e8f2006-09-27 15:27:33 +01001067
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001068 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001069 pmd_clear(pmd_off_k(addr));
1070
1071 /*
1072 * Map the kernel if it is XIP.
1073 * It is always first in the modulearea.
1074 */
1075#ifdef CONFIG_XIP_KERNEL
1076 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +00001077 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +00001078 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001079 map.type = MT_ROM;
1080 create_mapping(&map);
1081#endif
1082
1083 /*
1084 * Map the cache flushing regions.
1085 */
1086#ifdef FLUSH_BASE
1087 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1088 map.virtual = FLUSH_BASE;
1089 map.length = SZ_1M;
1090 map.type = MT_CACHECLEAN;
1091 create_mapping(&map);
1092#endif
1093#ifdef FLUSH_BASE_MINICACHE
1094 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1095 map.virtual = FLUSH_BASE_MINICACHE;
1096 map.length = SZ_1M;
1097 map.type = MT_MINICLEAN;
1098 create_mapping(&map);
1099#endif
1100
1101 /*
1102 * Create a mapping for the machine vectors at the high-vectors
1103 * location (0xffff0000). If we aren't using high-vectors, also
1104 * create a mapping at the low-vectors virtual address.
1105 */
Russell King94e5a852012-01-18 15:32:49 +00001106 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
Russell Kingd111e8f2006-09-27 15:27:33 +01001107 map.virtual = 0xffff0000;
1108 map.length = PAGE_SIZE;
1109 map.type = MT_HIGH_VECTORS;
Colin Crosse5e483d2011-08-11 17:15:24 -07001110 create_mapping(&map, false);
Russell Kingd111e8f2006-09-27 15:27:33 +01001111
1112 if (!vectors_high()) {
1113 map.virtual = 0;
1114 map.type = MT_LOW_VECTORS;
Colin Crosse5e483d2011-08-11 17:15:24 -07001115 create_mapping(&map, false);
Russell Kingd111e8f2006-09-27 15:27:33 +01001116 }
1117
1118 /*
1119 * Ask the machine support to map in the statically mapped devices.
1120 */
1121 if (mdesc->map_io)
1122 mdesc->map_io();
1123
1124 /*
1125 * Finally flush the caches and tlb to ensure that we're in a
1126 * consistent state wrt the writebuffer. This also ensures that
1127 * any write-allocated cache lines in the vector page are written
1128 * back. After this point, we can start to touch devices again.
1129 */
1130 local_flush_tlb_all();
1131 flush_cache_all();
1132}
1133
Nicolas Pitred73cd422008-09-15 16:44:55 -04001134static void __init kmap_init(void)
1135{
1136#ifdef CONFIG_HIGHMEM
Colin Crosse5e483d2011-08-11 17:15:24 -07001137 pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
Russell King4bb2e272010-07-01 18:33:29 +01001138 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001139#endif
1140}
1141
Neil Leederf06ab972011-10-25 17:57:26 -04001142#ifdef CONFIG_STRICT_MEMORY_RWX
1143static struct {
1144 pmd_t *pmd_to_flush;
1145 pmd_t *pmd;
1146 unsigned long addr;
1147 pmd_t saved_pmd;
1148 bool made_writeable;
1149} mem_unprotect;
1150
1151static DEFINE_SPINLOCK(mem_text_writeable_lock);
1152
1153void mem_text_writeable_spinlock(unsigned long *flags)
1154{
1155 spin_lock_irqsave(&mem_text_writeable_lock, *flags);
1156}
1157
1158void mem_text_writeable_spinunlock(unsigned long *flags)
1159{
1160 spin_unlock_irqrestore(&mem_text_writeable_lock, *flags);
1161}
1162
1163/*
1164 * mem_text_address_writeable() and mem_text_address_restore()
1165 * should be called as a pair. They are used to make the
1166 * specified address in the kernel text section temporarily writeable
1167 * when it has been marked read-only by STRICT_MEMORY_RWX.
1168 * Used by kprobes and other debugging tools to set breakpoints etc.
1169 * mem_text_address_writeable() is invoked before writing.
1170 * After the write, mem_text_address_restore() must be called
1171 * to restore the original state.
1172 * This is only effective when used on the kernel text section
1173 * marked as MEMORY_RX by map_lowmem()
1174 *
1175 * They must each be called with mem_text_writeable_lock locked
1176 * by the caller, with no unlocking between the calls.
1177 * The caller should release mem_text_writeable_lock immediately
1178 * after the call to mem_text_address_restore().
1179 * Only the write and associated cache operations should be performed
1180 * between the calls.
1181 */
1182
1183/* this function must be called with mem_text_writeable_lock held */
1184void mem_text_address_writeable(unsigned long addr)
1185{
1186 struct task_struct *tsk = current;
1187 struct mm_struct *mm = tsk->active_mm;
1188 pgd_t *pgd = pgd_offset(mm, addr);
1189 pud_t *pud = pud_offset(pgd, addr);
1190
1191 mem_unprotect.made_writeable = 0;
1192
1193 if ((addr < (unsigned long)RX_AREA_START) ||
1194 (addr >= (unsigned long)RX_AREA_END))
1195 return;
1196
1197 mem_unprotect.pmd = pmd_offset(pud, addr);
1198 mem_unprotect.pmd_to_flush = mem_unprotect.pmd;
1199 mem_unprotect.addr = addr & PAGE_MASK;
1200
1201 if (addr & SECTION_SIZE)
1202 mem_unprotect.pmd++;
1203
1204 mem_unprotect.saved_pmd = *mem_unprotect.pmd;
1205 if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) != PMD_TYPE_SECT)
1206 return;
1207
1208 *mem_unprotect.pmd &= ~PMD_SECT_APX;
1209
1210 flush_pmd_entry(mem_unprotect.pmd_to_flush);
1211 flush_tlb_kernel_page(mem_unprotect.addr);
1212 mem_unprotect.made_writeable = 1;
1213}
1214
1215/* this function must be called with mem_text_writeable_lock held */
1216void mem_text_address_restore(void)
1217{
1218 if (mem_unprotect.made_writeable) {
1219 *mem_unprotect.pmd = mem_unprotect.saved_pmd;
1220 flush_pmd_entry(mem_unprotect.pmd_to_flush);
1221 flush_tlb_kernel_page(mem_unprotect.addr);
1222 }
1223}
1224#endif
1225
Neil Leeder32942752011-11-07 10:56:46 -05001226void mem_text_write_kernel_word(unsigned long *addr, unsigned long word)
1227{
1228 unsigned long flags;
1229
1230 mem_text_writeable_spinlock(&flags);
1231 mem_text_address_writeable((unsigned long)addr);
1232 *addr = word;
1233 flush_icache_range((unsigned long)addr,
1234 ((unsigned long)addr + sizeof(long)));
1235 mem_text_address_restore();
1236 mem_text_writeable_spinunlock(&flags);
1237}
1238EXPORT_SYMBOL(mem_text_write_kernel_word);
1239
Steve Mucklef132c6c2012-06-06 18:30:57 -07001240extern char __init_data[];
Colin Crosse5e483d2011-08-11 17:15:24 -07001241
Russell Kinga2227122010-03-25 18:56:05 +00001242static void __init map_lowmem(void)
1243{
Russell King8df65162010-10-27 19:57:38 +01001244 struct memblock_region *reg;
Colin Crosse5e483d2011-08-11 17:15:24 -07001245 phys_addr_t start;
1246 phys_addr_t end;
1247 struct map_desc map;
Russell Kinga2227122010-03-25 18:56:05 +00001248
1249 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001250 for_each_memblock(memory, reg) {
Colin Crosse5e483d2011-08-11 17:15:24 -07001251 start = reg->base;
1252 end = start + reg->size;
Russell Kinga2227122010-03-25 18:56:05 +00001253
Russell King8df65162010-10-27 19:57:38 +01001254 if (end > lowmem_limit)
1255 end = lowmem_limit;
1256 if (start >= end)
1257 break;
1258
1259 map.pfn = __phys_to_pfn(start);
1260 map.virtual = __phys_to_virt(start);
Jin Hongada9e122011-07-19 12:44:39 -07001261#ifdef CONFIG_STRICT_MEMORY_RWX
1262 if (start <= __pa(_text) && __pa(_text) < end) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001263 map.length = SECTION_SIZE;
Jin Hongada9e122011-07-19 12:44:39 -07001264 map.type = MT_MEMORY;
1265
Steve Mucklef132c6c2012-06-06 18:30:57 -07001266 create_mapping(&map, false);
Jin Hongada9e122011-07-19 12:44:39 -07001267
Steve Mucklef132c6c2012-06-06 18:30:57 -07001268 map.pfn = __phys_to_pfn(start + SECTION_SIZE);
1269 map.virtual = __phys_to_virt(start + SECTION_SIZE);
1270 map.length = (unsigned long)RX_AREA_END - map.virtual;
Jin Hongada9e122011-07-19 12:44:39 -07001271 map.type = MT_MEMORY_RX;
1272
Steve Mucklef132c6c2012-06-06 18:30:57 -07001273 create_mapping(&map, false);
Jin Hongada9e122011-07-19 12:44:39 -07001274
1275 map.pfn = __phys_to_pfn(__pa(__start_rodata));
1276 map.virtual = (unsigned long)__start_rodata;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001277 map.length = __init_begin - __start_rodata;
Jin Hongada9e122011-07-19 12:44:39 -07001278 map.type = MT_MEMORY_R;
1279
Steve Mucklef132c6c2012-06-06 18:30:57 -07001280 create_mapping(&map, false);
Jin Hongada9e122011-07-19 12:44:39 -07001281
Steve Mucklef132c6c2012-06-06 18:30:57 -07001282 map.pfn = __phys_to_pfn(__pa(__init_begin));
1283 map.virtual = (unsigned long)__init_begin;
1284 map.length = __init_data - __init_begin;
1285 map.type = MT_MEMORY;
1286
1287 create_mapping(&map, false);
1288
1289 map.pfn = __phys_to_pfn(__pa(__init_data));
1290 map.virtual = (unsigned long)__init_data;
1291 map.length = __phys_to_virt(end) - (unsigned int)__init_data;
Jin Hongada9e122011-07-19 12:44:39 -07001292 map.type = MT_MEMORY_RW;
1293 } else {
1294 map.length = end - start;
1295 map.type = MT_MEMORY_RW;
1296 }
1297#else
Russell King8df65162010-10-27 19:57:38 +01001298 map.length = end - start;
1299 map.type = MT_MEMORY;
Jin Hongada9e122011-07-19 12:44:39 -07001300#endif
Russell King8df65162010-10-27 19:57:38 +01001301
Colin Crosse5e483d2011-08-11 17:15:24 -07001302 create_mapping(&map, false);
Russell Kinga2227122010-03-25 18:56:05 +00001303 }
Colin Crosse5e483d2011-08-11 17:15:24 -07001304
1305#ifdef CONFIG_DEBUG_RODATA
1306 start = __pa(_stext) & PMD_MASK;
1307 end = ALIGN(__pa(__end_rodata), PMD_SIZE);
1308
1309 map.pfn = __phys_to_pfn(start);
1310 map.virtual = __phys_to_virt(start);
1311 map.length = end - start;
1312 map.type = MT_MEMORY;
1313
1314 create_mapping(&map, true);
1315#endif
Russell Kinga2227122010-03-25 18:56:05 +00001316}
1317
Russell Kingd111e8f2006-09-27 15:27:33 +01001318/*
1319 * paging_init() sets up the page tables, initialises the zone memory
1320 * maps, and sets up the zero page, bad page and bad page tables.
1321 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001322void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001323{
1324 void *zero_page;
1325
Russell King0371d3f2011-07-05 19:58:29 +01001326 memblock_set_current_limit(lowmem_limit);
1327
Russell Kingd111e8f2006-09-27 15:27:33 +01001328 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001329 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001330 map_lowmem();
Russell Kingd111e8f2006-09-27 15:27:33 +01001331 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001332 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001333
1334 top_pmd = pmd_off_k(0xffff0000);
1335
Russell King3abe9d32010-03-25 17:02:59 +00001336 /* allocate the zero page. */
1337 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001338
Russell King8d717a52010-05-22 19:47:18 +01001339 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001340
Russell Kingd111e8f2006-09-27 15:27:33 +01001341 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001342 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001343}