| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/arm/mm/mmu.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1995-2005 Russell King | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | */ | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 10 | #include <linux/module.h> | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 11 | #include <linux/kernel.h> | 
|  | 12 | #include <linux/errno.h> | 
|  | 13 | #include <linux/init.h> | 
|  | 14 | #include <linux/bootmem.h> | 
|  | 15 | #include <linux/mman.h> | 
|  | 16 | #include <linux/nodemask.h> | 
|  | 17 |  | 
|  | 18 | #include <asm/mach-types.h> | 
|  | 19 | #include <asm/setup.h> | 
|  | 20 | #include <asm/sizes.h> | 
|  | 21 | #include <asm/tlb.h> | 
|  | 22 |  | 
|  | 23 | #include <asm/mach/arch.h> | 
|  | 24 | #include <asm/mach/map.h> | 
|  | 25 |  | 
|  | 26 | #include "mm.h" | 
|  | 27 |  | 
|  | 28 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
|  | 29 |  | 
| Russell King | 6ae5a6e | 2006-09-30 10:50:05 +0100 | [diff] [blame] | 30 | extern void _stext, _etext, __data_start, _end; | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 31 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 
|  | 32 |  | 
|  | 33 | /* | 
|  | 34 | * empty_zero_page is a special page that is used for | 
|  | 35 | * zero-initialized data and COW. | 
|  | 36 | */ | 
|  | 37 | struct page *empty_zero_page; | 
| Aneesh Kumar K.V | 3653f3a | 2008-04-29 08:11:12 -0400 | [diff] [blame] | 38 | EXPORT_SYMBOL(empty_zero_page); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 39 |  | 
|  | 40 | /* | 
|  | 41 | * The pmd table for the upper-most set of pages. | 
|  | 42 | */ | 
|  | 43 | pmd_t *top_pmd; | 
|  | 44 |  | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 45 | #define CPOLICY_UNCACHED	0 | 
|  | 46 | #define CPOLICY_BUFFERED	1 | 
|  | 47 | #define CPOLICY_WRITETHROUGH	2 | 
|  | 48 | #define CPOLICY_WRITEBACK	3 | 
|  | 49 | #define CPOLICY_WRITEALLOC	4 | 
|  | 50 |  | 
|  | 51 | static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; | 
|  | 52 | static unsigned int ecc_mask __initdata = 0; | 
| Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 53 | pgprot_t pgprot_user; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 54 | pgprot_t pgprot_kernel; | 
|  | 55 |  | 
| Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 56 | EXPORT_SYMBOL(pgprot_user); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 57 | EXPORT_SYMBOL(pgprot_kernel); | 
|  | 58 |  | 
|  | 59 | struct cachepolicy { | 
|  | 60 | const char	policy[16]; | 
|  | 61 | unsigned int	cr_mask; | 
|  | 62 | unsigned int	pmd; | 
|  | 63 | unsigned int	pte; | 
|  | 64 | }; | 
|  | 65 |  | 
|  | 66 | static struct cachepolicy cache_policies[] __initdata = { | 
|  | 67 | { | 
|  | 68 | .policy		= "uncached", | 
|  | 69 | .cr_mask	= CR_W|CR_C, | 
|  | 70 | .pmd		= PMD_SECT_UNCACHED, | 
|  | 71 | .pte		= 0, | 
|  | 72 | }, { | 
|  | 73 | .policy		= "buffered", | 
|  | 74 | .cr_mask	= CR_C, | 
|  | 75 | .pmd		= PMD_SECT_BUFFERED, | 
|  | 76 | .pte		= PTE_BUFFERABLE, | 
|  | 77 | }, { | 
|  | 78 | .policy		= "writethrough", | 
|  | 79 | .cr_mask	= 0, | 
|  | 80 | .pmd		= PMD_SECT_WT, | 
|  | 81 | .pte		= PTE_CACHEABLE, | 
|  | 82 | }, { | 
|  | 83 | .policy		= "writeback", | 
|  | 84 | .cr_mask	= 0, | 
|  | 85 | .pmd		= PMD_SECT_WB, | 
|  | 86 | .pte		= PTE_BUFFERABLE|PTE_CACHEABLE, | 
|  | 87 | }, { | 
|  | 88 | .policy		= "writealloc", | 
|  | 89 | .cr_mask	= 0, | 
|  | 90 | .pmd		= PMD_SECT_WBWA, | 
|  | 91 | .pte		= PTE_BUFFERABLE|PTE_CACHEABLE, | 
|  | 92 | } | 
|  | 93 | }; | 
|  | 94 |  | 
|  | 95 | /* | 
| Simon Arlott | 6cbdc8c | 2007-05-11 20:40:30 +0100 | [diff] [blame] | 96 | * These are useful for identifying cache coherency | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 97 | * problems by allowing the cache or the cache and | 
|  | 98 | * writebuffer to be turned off.  (Note: the write | 
|  | 99 | * buffer should not be on and the cache off). | 
|  | 100 | */ | 
|  | 101 | static void __init early_cachepolicy(char **p) | 
|  | 102 | { | 
|  | 103 | int i; | 
|  | 104 |  | 
|  | 105 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | 
|  | 106 | int len = strlen(cache_policies[i].policy); | 
|  | 107 |  | 
|  | 108 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { | 
|  | 109 | cachepolicy = i; | 
|  | 110 | cr_alignment &= ~cache_policies[i].cr_mask; | 
|  | 111 | cr_no_alignment &= ~cache_policies[i].cr_mask; | 
|  | 112 | *p += len; | 
|  | 113 | break; | 
|  | 114 | } | 
|  | 115 | } | 
|  | 116 | if (i == ARRAY_SIZE(cache_policies)) | 
|  | 117 | printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 118 | if (cpu_architecture() >= CPU_ARCH_ARMv6) { | 
|  | 119 | printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); | 
|  | 120 | cachepolicy = CPOLICY_WRITEBACK; | 
|  | 121 | } | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 122 | flush_cache_all(); | 
|  | 123 | set_cr(cr_alignment); | 
|  | 124 | } | 
|  | 125 | __early_param("cachepolicy=", early_cachepolicy); | 
|  | 126 |  | 
|  | 127 | static void __init early_nocache(char **__unused) | 
|  | 128 | { | 
|  | 129 | char *p = "buffered"; | 
|  | 130 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | 
|  | 131 | early_cachepolicy(&p); | 
|  | 132 | } | 
|  | 133 | __early_param("nocache", early_nocache); | 
|  | 134 |  | 
|  | 135 | static void __init early_nowrite(char **__unused) | 
|  | 136 | { | 
|  | 137 | char *p = "uncached"; | 
|  | 138 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | 
|  | 139 | early_cachepolicy(&p); | 
|  | 140 | } | 
|  | 141 | __early_param("nowb", early_nowrite); | 
|  | 142 |  | 
|  | 143 | static void __init early_ecc(char **p) | 
|  | 144 | { | 
|  | 145 | if (memcmp(*p, "on", 2) == 0) { | 
|  | 146 | ecc_mask = PMD_PROTECTION; | 
|  | 147 | *p += 2; | 
|  | 148 | } else if (memcmp(*p, "off", 3) == 0) { | 
|  | 149 | ecc_mask = 0; | 
|  | 150 | *p += 3; | 
|  | 151 | } | 
|  | 152 | } | 
|  | 153 | __early_param("ecc=", early_ecc); | 
|  | 154 |  | 
|  | 155 | static int __init noalign_setup(char *__unused) | 
|  | 156 | { | 
|  | 157 | cr_alignment &= ~CR_A; | 
|  | 158 | cr_no_alignment &= ~CR_A; | 
|  | 159 | set_cr(cr_alignment); | 
|  | 160 | return 1; | 
|  | 161 | } | 
|  | 162 | __setup("noalign", noalign_setup); | 
|  | 163 |  | 
| Russell King | 255d1f8 | 2006-12-18 00:12:47 +0000 | [diff] [blame] | 164 | #ifndef CONFIG_SMP | 
|  | 165 | void adjust_cr(unsigned long mask, unsigned long set) | 
|  | 166 | { | 
|  | 167 | unsigned long flags; | 
|  | 168 |  | 
|  | 169 | mask &= ~CR_A; | 
|  | 170 |  | 
|  | 171 | set &= mask; | 
|  | 172 |  | 
|  | 173 | local_irq_save(flags); | 
|  | 174 |  | 
|  | 175 | cr_no_alignment = (cr_no_alignment & ~mask) | set; | 
|  | 176 | cr_alignment = (cr_alignment & ~mask) | set; | 
|  | 177 |  | 
|  | 178 | set_cr((get_cr() & ~mask) | set); | 
|  | 179 |  | 
|  | 180 | local_irq_restore(flags); | 
|  | 181 | } | 
|  | 182 | #endif | 
|  | 183 |  | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 184 | #define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE | 
|  | 185 | #define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE | 
|  | 186 |  | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 187 | static struct mem_type mem_types[] = { | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 188 | [MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */ | 
|  | 189 | .prot_pte	= PROT_PTE_DEVICE, | 
|  | 190 | .prot_l1	= PMD_TYPE_TABLE, | 
|  | 191 | .prot_sect	= PROT_SECT_DEVICE | PMD_SECT_UNCACHED, | 
|  | 192 | .domain		= DOMAIN_IO, | 
|  | 193 | }, | 
|  | 194 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ | 
|  | 195 | .prot_pte	= PROT_PTE_DEVICE, | 
|  | 196 | .prot_pte_ext	= PTE_EXT_TEX(2), | 
|  | 197 | .prot_l1	= PMD_TYPE_TABLE, | 
|  | 198 | .prot_sect	= PROT_SECT_DEVICE | PMD_SECT_TEX(2), | 
|  | 199 | .domain		= DOMAIN_IO, | 
|  | 200 | }, | 
|  | 201 | [MT_DEVICE_CACHED] = {	  /* ioremap_cached */ | 
|  | 202 | .prot_pte	= PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE, | 
|  | 203 | .prot_l1	= PMD_TYPE_TABLE, | 
|  | 204 | .prot_sect	= PROT_SECT_DEVICE | PMD_SECT_WB, | 
|  | 205 | .domain		= DOMAIN_IO, | 
|  | 206 | }, | 
|  | 207 | [MT_DEVICE_IXP2000] = {	  /* IXP2400 requires XCB=101 for on-chip I/O */ | 
|  | 208 | .prot_pte	= PROT_PTE_DEVICE, | 
|  | 209 | .prot_l1	= PMD_TYPE_TABLE, | 
|  | 210 | .prot_sect	= PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE | | 
|  | 211 | PMD_SECT_TEX(1), | 
|  | 212 | .domain		= DOMAIN_IO, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 213 | }, | 
| Lennert Buytenhek | 1ad77a8 | 2008-09-05 13:17:11 +0100 | [diff] [blame] | 214 | [MT_DEVICE_WC] = {	/* ioremap_wc */ | 
|  | 215 | .prot_pte	= PROT_PTE_DEVICE, | 
|  | 216 | .prot_l1	= PMD_TYPE_TABLE, | 
|  | 217 | .prot_sect	= PROT_SECT_DEVICE, | 
|  | 218 | .domain		= DOMAIN_IO, | 
|  | 219 | }, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 220 | [MT_CACHECLEAN] = { | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 221 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 222 | .domain    = DOMAIN_KERNEL, | 
|  | 223 | }, | 
|  | 224 | [MT_MINICLEAN] = { | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 225 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 226 | .domain    = DOMAIN_KERNEL, | 
|  | 227 | }, | 
|  | 228 | [MT_LOW_VECTORS] = { | 
|  | 229 | .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 
|  | 230 | L_PTE_EXEC, | 
|  | 231 | .prot_l1   = PMD_TYPE_TABLE, | 
|  | 232 | .domain    = DOMAIN_USER, | 
|  | 233 | }, | 
|  | 234 | [MT_HIGH_VECTORS] = { | 
|  | 235 | .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 
|  | 236 | L_PTE_USER | L_PTE_EXEC, | 
|  | 237 | .prot_l1   = PMD_TYPE_TABLE, | 
|  | 238 | .domain    = DOMAIN_USER, | 
|  | 239 | }, | 
|  | 240 | [MT_MEMORY] = { | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 241 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 242 | .domain    = DOMAIN_KERNEL, | 
|  | 243 | }, | 
|  | 244 | [MT_ROM] = { | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 245 | .prot_sect = PMD_TYPE_SECT, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 246 | .domain    = DOMAIN_KERNEL, | 
|  | 247 | }, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 248 | }; | 
|  | 249 |  | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 250 | const struct mem_type *get_mem_type(unsigned int type) | 
|  | 251 | { | 
|  | 252 | return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; | 
|  | 253 | } | 
|  | 254 |  | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 255 | /* | 
|  | 256 | * Adjust the PMD section entries according to the CPU in use. | 
|  | 257 | */ | 
|  | 258 | static void __init build_mem_type_table(void) | 
|  | 259 | { | 
|  | 260 | struct cachepolicy *cp; | 
|  | 261 | unsigned int cr = get_cr(); | 
|  | 262 | unsigned int user_pgprot, kern_pgprot; | 
|  | 263 | int cpu_arch = cpu_architecture(); | 
|  | 264 | int i; | 
|  | 265 |  | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 266 | if (cpu_arch < CPU_ARCH_ARMv6) { | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 267 | #if defined(CONFIG_CPU_DCACHE_DISABLE) | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 268 | if (cachepolicy > CPOLICY_BUFFERED) | 
|  | 269 | cachepolicy = CPOLICY_BUFFERED; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 270 | #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 271 | if (cachepolicy > CPOLICY_WRITETHROUGH) | 
|  | 272 | cachepolicy = CPOLICY_WRITETHROUGH; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 273 | #endif | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 274 | } | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 275 | if (cpu_arch < CPU_ARCH_ARMv5) { | 
|  | 276 | if (cachepolicy >= CPOLICY_WRITEALLOC) | 
|  | 277 | cachepolicy = CPOLICY_WRITEBACK; | 
|  | 278 | ecc_mask = 0; | 
|  | 279 | } | 
|  | 280 |  | 
|  | 281 | /* | 
| Lennert Buytenhek | 1ad77a8 | 2008-09-05 13:17:11 +0100 | [diff] [blame] | 282 | * On non-Xscale3 ARMv5-and-older systems, use CB=01 | 
|  | 283 | * (Uncached/Buffered) for ioremap_wc() mappings.  On XScale3 | 
|  | 284 | * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable | 
|  | 285 | * in xsc3 parlance, Uncached Normal in ARMv6 parlance). | 
|  | 286 | */ | 
|  | 287 | if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { | 
|  | 288 | mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1); | 
|  | 289 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | 
|  | 290 | } else { | 
|  | 291 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE; | 
|  | 292 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | /* | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 296 | * ARMv5 and lower, bit 4 must be set for page tables. | 
|  | 297 | * (was: cache "update-able on write" bit on ARM610) | 
|  | 298 | * However, Xscale cores require this bit to be cleared. | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 299 | */ | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 300 | if (cpu_is_xscale()) { | 
|  | 301 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 302 | mem_types[i].prot_sect &= ~PMD_BIT4; | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 303 | mem_types[i].prot_l1 &= ~PMD_BIT4; | 
|  | 304 | } | 
|  | 305 | } else if (cpu_arch < CPU_ARCH_ARMv6) { | 
|  | 306 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 307 | if (mem_types[i].prot_l1) | 
|  | 308 | mem_types[i].prot_l1 |= PMD_BIT4; | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 309 | if (mem_types[i].prot_sect) | 
|  | 310 | mem_types[i].prot_sect |= PMD_BIT4; | 
|  | 311 | } | 
|  | 312 | } | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 313 |  | 
|  | 314 | cp = &cache_policies[cachepolicy]; | 
|  | 315 | kern_pgprot = user_pgprot = cp->pte; | 
|  | 316 |  | 
|  | 317 | /* | 
|  | 318 | * Enable CPU-specific coherency if supported. | 
|  | 319 | * (Only available on XSC3 at the moment.) | 
|  | 320 | */ | 
|  | 321 | if (arch_is_coherent()) { | 
|  | 322 | if (cpu_is_xsc3()) { | 
|  | 323 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 
| Lennert Buytenhek | 0e5fdca | 2006-12-02 00:03:47 +0100 | [diff] [blame] | 324 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 325 | } | 
|  | 326 | } | 
|  | 327 |  | 
|  | 328 | /* | 
|  | 329 | * ARMv6 and above have extended page tables. | 
|  | 330 | */ | 
|  | 331 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | 
|  | 332 | /* | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 333 | * Mark cache clean areas and XIP ROM read only | 
|  | 334 | * from SVC mode and no access from userspace. | 
|  | 335 | */ | 
|  | 336 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 
|  | 337 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 
|  | 338 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 
|  | 339 |  | 
|  | 340 | /* | 
|  | 341 | * Mark the device area as "shared device" | 
|  | 342 | */ | 
|  | 343 | mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; | 
|  | 344 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | 
|  | 345 |  | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 346 | #ifdef CONFIG_SMP | 
|  | 347 | /* | 
|  | 348 | * Mark memory with the "shared" attribute for SMP systems | 
|  | 349 | */ | 
|  | 350 | user_pgprot |= L_PTE_SHARED; | 
|  | 351 | kern_pgprot |= L_PTE_SHARED; | 
|  | 352 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 
|  | 353 | #endif | 
|  | 354 | } | 
|  | 355 |  | 
|  | 356 | for (i = 0; i < 16; i++) { | 
|  | 357 | unsigned long v = pgprot_val(protection_map[i]); | 
|  | 358 | v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; | 
|  | 359 | protection_map[i] = __pgprot(v); | 
|  | 360 | } | 
|  | 361 |  | 
|  | 362 | mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; | 
|  | 363 | mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; | 
|  | 364 |  | 
|  | 365 | if (cpu_arch >= CPU_ARCH_ARMv5) { | 
|  | 366 | #ifndef CONFIG_SMP | 
|  | 367 | /* | 
|  | 368 | * Only use write-through for non-SMP systems | 
|  | 369 | */ | 
|  | 370 | mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | 
|  | 371 | mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | 
|  | 372 | #endif | 
|  | 373 | } else { | 
|  | 374 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | 
|  | 375 | } | 
|  | 376 |  | 
| Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 377 | pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 378 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 
|  | 379 | L_PTE_DIRTY | L_PTE_WRITE | | 
|  | 380 | L_PTE_EXEC | kern_pgprot); | 
|  | 381 |  | 
|  | 382 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 
|  | 383 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 
|  | 384 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | 
|  | 385 | mem_types[MT_ROM].prot_sect |= cp->pmd; | 
|  | 386 |  | 
|  | 387 | switch (cp->pmd) { | 
|  | 388 | case PMD_SECT_WT: | 
|  | 389 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; | 
|  | 390 | break; | 
|  | 391 | case PMD_SECT_WB: | 
|  | 392 | case PMD_SECT_WBWA: | 
|  | 393 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; | 
|  | 394 | break; | 
|  | 395 | } | 
|  | 396 | printk("Memory policy: ECC %sabled, Data cache %s\n", | 
|  | 397 | ecc_mask ? "en" : "dis", cp->policy); | 
| Russell King | 2497f0a | 2007-04-21 09:59:44 +0100 | [diff] [blame] | 398 |  | 
|  | 399 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | 
|  | 400 | struct mem_type *t = &mem_types[i]; | 
|  | 401 | if (t->prot_l1) | 
|  | 402 | t->prot_l1 |= PMD_DOMAIN(t->domain); | 
|  | 403 | if (t->prot_sect) | 
|  | 404 | t->prot_sect |= PMD_DOMAIN(t->domain); | 
|  | 405 | } | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 406 | } | 
|  | 407 |  | 
|  | 408 | #define vectors_base()	(vectors_high() ? 0xffff0000 : 0) | 
|  | 409 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 410 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | 
|  | 411 | unsigned long end, unsigned long pfn, | 
|  | 412 | const struct mem_type *type) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 413 | { | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 414 | pte_t *pte; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 415 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 416 | if (pmd_none(*pmd)) { | 
|  | 417 | pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); | 
|  | 418 | __pmd_populate(pmd, __pa(pte) | type->prot_l1); | 
|  | 419 | } | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 420 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 421 | pte = pte_offset_kernel(pmd, addr); | 
|  | 422 | do { | 
| Russell King | c172cc9 | 2007-04-21 10:52:32 +0100 | [diff] [blame] | 423 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), | 
|  | 424 | type->prot_pte_ext); | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 425 | pfn++; | 
|  | 426 | } while (pte++, addr += PAGE_SIZE, addr != end); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 427 | } | 
|  | 428 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 429 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, | 
|  | 430 | unsigned long end, unsigned long phys, | 
|  | 431 | const struct mem_type *type) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 432 | { | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 433 | pmd_t *pmd = pmd_offset(pgd, addr); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 434 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 435 | /* | 
|  | 436 | * Try a section mapping - end, addr and phys must all be aligned | 
|  | 437 | * to a section boundary.  Note that PMDs refer to the individual | 
|  | 438 | * L1 entries, whereas PGDs refer to a group of L1 entries making | 
|  | 439 | * up one logical pointer to an L2 table. | 
|  | 440 | */ | 
|  | 441 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { | 
|  | 442 | pmd_t *p = pmd; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 443 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 444 | if (addr & SECTION_SIZE) | 
|  | 445 | pmd++; | 
|  | 446 |  | 
|  | 447 | do { | 
|  | 448 | *pmd = __pmd(phys | type->prot_sect); | 
|  | 449 | phys += SECTION_SIZE; | 
|  | 450 | } while (pmd++, addr += SECTION_SIZE, addr != end); | 
|  | 451 |  | 
|  | 452 | flush_pmd_entry(p); | 
|  | 453 | } else { | 
|  | 454 | /* | 
|  | 455 | * No need to loop; pte's aren't interested in the | 
|  | 456 | * individual L1 entries. | 
|  | 457 | */ | 
|  | 458 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 459 | } | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 460 | } | 
|  | 461 |  | 
| Russell King | 4a56c1e | 2007-04-21 10:16:48 +0100 | [diff] [blame] | 462 | static void __init create_36bit_mapping(struct map_desc *md, | 
|  | 463 | const struct mem_type *type) | 
|  | 464 | { | 
|  | 465 | unsigned long phys, addr, length, end; | 
|  | 466 | pgd_t *pgd; | 
|  | 467 |  | 
|  | 468 | addr = md->virtual; | 
|  | 469 | phys = (unsigned long)__pfn_to_phys(md->pfn); | 
|  | 470 | length = PAGE_ALIGN(md->length); | 
|  | 471 |  | 
|  | 472 | if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { | 
|  | 473 | printk(KERN_ERR "MM: CPU does not support supersection " | 
|  | 474 | "mapping for 0x%08llx at 0x%08lx\n", | 
|  | 475 | __pfn_to_phys((u64)md->pfn), addr); | 
|  | 476 | return; | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 | /* N.B.	ARMv6 supersections are only defined to work with domain 0. | 
|  | 480 | *	Since domain assignments can in fact be arbitrary, the | 
|  | 481 | *	'domain == 0' check below is required to insure that ARMv6 | 
|  | 482 | *	supersections are only allocated for domain 0 regardless | 
|  | 483 | *	of the actual domain assignments in use. | 
|  | 484 | */ | 
|  | 485 | if (type->domain) { | 
|  | 486 | printk(KERN_ERR "MM: invalid domain in supersection " | 
|  | 487 | "mapping for 0x%08llx at 0x%08lx\n", | 
|  | 488 | __pfn_to_phys((u64)md->pfn), addr); | 
|  | 489 | return; | 
|  | 490 | } | 
|  | 491 |  | 
|  | 492 | if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { | 
|  | 493 | printk(KERN_ERR "MM: cannot create mapping for " | 
|  | 494 | "0x%08llx at 0x%08lx invalid alignment\n", | 
|  | 495 | __pfn_to_phys((u64)md->pfn), addr); | 
|  | 496 | return; | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | /* | 
|  | 500 | * Shift bits [35:32] of address into bits [23:20] of PMD | 
|  | 501 | * (See ARMv6 spec). | 
|  | 502 | */ | 
|  | 503 | phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); | 
|  | 504 |  | 
|  | 505 | pgd = pgd_offset_k(addr); | 
|  | 506 | end = addr + length; | 
|  | 507 | do { | 
|  | 508 | pmd_t *pmd = pmd_offset(pgd, addr); | 
|  | 509 | int i; | 
|  | 510 |  | 
|  | 511 | for (i = 0; i < 16; i++) | 
|  | 512 | *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); | 
|  | 513 |  | 
|  | 514 | addr += SUPERSECTION_SIZE; | 
|  | 515 | phys += SUPERSECTION_SIZE; | 
|  | 516 | pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; | 
|  | 517 | } while (addr != end); | 
|  | 518 | } | 
|  | 519 |  | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 520 | /* | 
|  | 521 | * Create the page directory entries and any necessary | 
|  | 522 | * page tables for the mapping specified by `md'.  We | 
|  | 523 | * are able to cope here with varying sizes and address | 
|  | 524 | * offsets, and we take full advantage of sections and | 
|  | 525 | * supersections. | 
|  | 526 | */ | 
|  | 527 | void __init create_mapping(struct map_desc *md) | 
|  | 528 | { | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 529 | unsigned long phys, addr, length, end; | 
| Russell King | d5c9817 | 2007-04-21 10:05:32 +0100 | [diff] [blame] | 530 | const struct mem_type *type; | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 531 | pgd_t *pgd; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 532 |  | 
|  | 533 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 
|  | 534 | printk(KERN_WARNING "BUG: not creating mapping for " | 
|  | 535 | "0x%08llx at 0x%08lx in user region\n", | 
|  | 536 | __pfn_to_phys((u64)md->pfn), md->virtual); | 
|  | 537 | return; | 
|  | 538 | } | 
|  | 539 |  | 
|  | 540 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | 
|  | 541 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | 
|  | 542 | printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " | 
|  | 543 | "overlaps vmalloc space\n", | 
|  | 544 | __pfn_to_phys((u64)md->pfn), md->virtual); | 
|  | 545 | } | 
|  | 546 |  | 
| Russell King | d5c9817 | 2007-04-21 10:05:32 +0100 | [diff] [blame] | 547 | type = &mem_types[md->type]; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 548 |  | 
|  | 549 | /* | 
|  | 550 | * Catch 36-bit addresses | 
|  | 551 | */ | 
| Russell King | 4a56c1e | 2007-04-21 10:16:48 +0100 | [diff] [blame] | 552 | if (md->pfn >= 0x100000) { | 
|  | 553 | create_36bit_mapping(md, type); | 
|  | 554 | return; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 555 | } | 
|  | 556 |  | 
| Russell King | 7b9c7b4 | 2007-07-04 21:16:33 +0100 | [diff] [blame] | 557 | addr = md->virtual & PAGE_MASK; | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 558 | phys = (unsigned long)__pfn_to_phys(md->pfn); | 
| Russell King | 7b9c7b4 | 2007-07-04 21:16:33 +0100 | [diff] [blame] | 559 | length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 560 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 561 | if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 562 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 
|  | 563 | "be mapped using pages, ignoring.\n", | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 564 | __pfn_to_phys(md->pfn), addr); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 565 | return; | 
|  | 566 | } | 
|  | 567 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 568 | pgd = pgd_offset_k(addr); | 
|  | 569 | end = addr + length; | 
|  | 570 | do { | 
|  | 571 | unsigned long next = pgd_addr_end(addr, end); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 572 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 573 | alloc_init_section(pgd, addr, next, phys, type); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 574 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 575 | phys += next - addr; | 
|  | 576 | addr = next; | 
|  | 577 | } while (pgd++, addr != end); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 578 | } | 
|  | 579 |  | 
|  | 580 | /* | 
|  | 581 | * Create the architecture specific mappings | 
|  | 582 | */ | 
|  | 583 | void __init iotable_init(struct map_desc *io_desc, int nr) | 
|  | 584 | { | 
|  | 585 | int i; | 
|  | 586 |  | 
|  | 587 | for (i = 0; i < nr; i++) | 
|  | 588 | create_mapping(io_desc + i); | 
|  | 589 | } | 
|  | 590 |  | 
| Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 591 | static int __init check_membank_valid(struct membank *mb) | 
|  | 592 | { | 
|  | 593 | /* | 
|  | 594 | * Check whether this memory region has non-zero size. | 
|  | 595 | */ | 
|  | 596 | if (mb->size == 0) | 
|  | 597 | return 0; | 
|  | 598 |  | 
|  | 599 | /* | 
|  | 600 | * Check whether this memory region would entirely overlap | 
|  | 601 | * the vmalloc area. | 
|  | 602 | */ | 
|  | 603 | if (phys_to_virt(mb->start) >= VMALLOC_MIN) { | 
|  | 604 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " | 
|  | 605 | "(vmalloc region overlap).\n", | 
|  | 606 | mb->start, mb->start + mb->size - 1); | 
|  | 607 | return 0; | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 | /* | 
|  | 611 | * Check whether this memory region would partially overlap | 
|  | 612 | * the vmalloc area. | 
|  | 613 | */ | 
|  | 614 | if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) || | 
|  | 615 | phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) { | 
|  | 616 | unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start); | 
|  | 617 |  | 
|  | 618 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " | 
|  | 619 | "to -%.8lx (vmalloc region overlap).\n", | 
|  | 620 | mb->start, mb->start + mb->size - 1, | 
|  | 621 | mb->start + newsize - 1); | 
|  | 622 | mb->size = newsize; | 
|  | 623 | } | 
|  | 624 |  | 
|  | 625 | return 1; | 
|  | 626 | } | 
|  | 627 |  | 
|  | 628 | static void __init sanity_check_meminfo(struct meminfo *mi) | 
|  | 629 | { | 
|  | 630 | int i; | 
|  | 631 | int j; | 
|  | 632 |  | 
|  | 633 | for (i = 0, j = 0; i < mi->nr_banks; i++) { | 
|  | 634 | if (check_membank_valid(&mi->bank[i])) | 
|  | 635 | mi->bank[j++] = mi->bank[i]; | 
|  | 636 | } | 
|  | 637 | mi->nr_banks = j; | 
|  | 638 | } | 
|  | 639 |  | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 640 | static inline void prepare_page_table(struct meminfo *mi) | 
|  | 641 | { | 
|  | 642 | unsigned long addr; | 
|  | 643 |  | 
|  | 644 | /* | 
|  | 645 | * Clear out all the mappings below the kernel image. | 
|  | 646 | */ | 
|  | 647 | for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) | 
|  | 648 | pmd_clear(pmd_off_k(addr)); | 
|  | 649 |  | 
|  | 650 | #ifdef CONFIG_XIP_KERNEL | 
|  | 651 | /* The XIP kernel is mapped in the module area -- skip over it */ | 
|  | 652 | addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK; | 
|  | 653 | #endif | 
|  | 654 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) | 
|  | 655 | pmd_clear(pmd_off_k(addr)); | 
|  | 656 |  | 
|  | 657 | /* | 
|  | 658 | * Clear out all the kernel space mappings, except for the first | 
|  | 659 | * memory bank, up to the end of the vmalloc region. | 
|  | 660 | */ | 
|  | 661 | for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); | 
|  | 662 | addr < VMALLOC_END; addr += PGDIR_SIZE) | 
|  | 663 | pmd_clear(pmd_off_k(addr)); | 
|  | 664 | } | 
|  | 665 |  | 
|  | 666 | /* | 
|  | 667 | * Reserve the various regions of node 0 | 
|  | 668 | */ | 
|  | 669 | void __init reserve_node_zero(pg_data_t *pgdat) | 
|  | 670 | { | 
|  | 671 | unsigned long res_size = 0; | 
|  | 672 |  | 
|  | 673 | /* | 
|  | 674 | * Register the kernel text and data with bootmem. | 
|  | 675 | * Note that this can only be in node 0. | 
|  | 676 | */ | 
|  | 677 | #ifdef CONFIG_XIP_KERNEL | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 678 | reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start, | 
|  | 679 | BOOTMEM_DEFAULT); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 680 | #else | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 681 | reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext, | 
|  | 682 | BOOTMEM_DEFAULT); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 683 | #endif | 
|  | 684 |  | 
|  | 685 | /* | 
|  | 686 | * Reserve the page tables.  These are already in use, | 
|  | 687 | * and can only be in node 0. | 
|  | 688 | */ | 
|  | 689 | reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 690 | PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 691 |  | 
|  | 692 | /* | 
|  | 693 | * Hmm... This should go elsewhere, but we really really need to | 
|  | 694 | * stop things allocating the low memory; ideally we need a better | 
|  | 695 | * implementation of GFP_DMA which does not assume that DMA-able | 
|  | 696 | * memory starts at zero. | 
|  | 697 | */ | 
|  | 698 | if (machine_is_integrator() || machine_is_cintegrator()) | 
|  | 699 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | 
|  | 700 |  | 
|  | 701 | /* | 
|  | 702 | * These should likewise go elsewhere.  They pre-reserve the | 
|  | 703 | * screen memory region at the start of main system memory. | 
|  | 704 | */ | 
|  | 705 | if (machine_is_edb7211()) | 
|  | 706 | res_size = 0x00020000; | 
|  | 707 | if (machine_is_p720t()) | 
|  | 708 | res_size = 0x00014000; | 
|  | 709 |  | 
| Ben Dooks | bbf6f28 | 2006-12-07 20:47:58 +0100 | [diff] [blame] | 710 | /* H1940 and RX3715 need to reserve this for suspend */ | 
|  | 711 |  | 
|  | 712 | if (machine_is_h1940() || machine_is_rx3715()) { | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 713 | reserve_bootmem_node(pgdat, 0x30003000, 0x1000, | 
|  | 714 | BOOTMEM_DEFAULT); | 
|  | 715 | reserve_bootmem_node(pgdat, 0x30081000, 0x1000, | 
|  | 716 | BOOTMEM_DEFAULT); | 
| Ben Dooks | 9073341 | 2006-12-06 01:50:24 +0100 | [diff] [blame] | 717 | } | 
|  | 718 |  | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 719 | #ifdef CONFIG_SA1111 | 
|  | 720 | /* | 
|  | 721 | * Because of the SA1111 DMA bug, we want to preserve our | 
|  | 722 | * precious DMA-able memory... | 
|  | 723 | */ | 
|  | 724 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | 
|  | 725 | #endif | 
|  | 726 | if (res_size) | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 727 | reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size, | 
|  | 728 | BOOTMEM_DEFAULT); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 729 | } | 
|  | 730 |  | 
|  | 731 | /* | 
|  | 732 | * Set up device the mappings.  Since we clear out the page tables for all | 
|  | 733 | * mappings above VMALLOC_END, we will remove any debug device mappings. | 
|  | 734 | * This means you have to be careful how you debug this function, or any | 
|  | 735 | * called function.  This means you can't use any function or debugging | 
|  | 736 | * method which may touch any device, otherwise the kernel _will_ crash. | 
|  | 737 | */ | 
|  | 738 | static void __init devicemaps_init(struct machine_desc *mdesc) | 
|  | 739 | { | 
|  | 740 | struct map_desc map; | 
|  | 741 | unsigned long addr; | 
|  | 742 | void *vectors; | 
|  | 743 |  | 
|  | 744 | /* | 
|  | 745 | * Allocate the vector page early. | 
|  | 746 | */ | 
|  | 747 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); | 
|  | 748 | BUG_ON(!vectors); | 
|  | 749 |  | 
|  | 750 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | 
|  | 751 | pmd_clear(pmd_off_k(addr)); | 
|  | 752 |  | 
|  | 753 | /* | 
|  | 754 | * Map the kernel if it is XIP. | 
|  | 755 | * It is always first in the modulearea. | 
|  | 756 | */ | 
|  | 757 | #ifdef CONFIG_XIP_KERNEL | 
|  | 758 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); | 
|  | 759 | map.virtual = MODULE_START; | 
|  | 760 | map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; | 
|  | 761 | map.type = MT_ROM; | 
|  | 762 | create_mapping(&map); | 
|  | 763 | #endif | 
|  | 764 |  | 
|  | 765 | /* | 
|  | 766 | * Map the cache flushing regions. | 
|  | 767 | */ | 
|  | 768 | #ifdef FLUSH_BASE | 
|  | 769 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); | 
|  | 770 | map.virtual = FLUSH_BASE; | 
|  | 771 | map.length = SZ_1M; | 
|  | 772 | map.type = MT_CACHECLEAN; | 
|  | 773 | create_mapping(&map); | 
|  | 774 | #endif | 
|  | 775 | #ifdef FLUSH_BASE_MINICACHE | 
|  | 776 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); | 
|  | 777 | map.virtual = FLUSH_BASE_MINICACHE; | 
|  | 778 | map.length = SZ_1M; | 
|  | 779 | map.type = MT_MINICLEAN; | 
|  | 780 | create_mapping(&map); | 
|  | 781 | #endif | 
|  | 782 |  | 
|  | 783 | /* | 
|  | 784 | * Create a mapping for the machine vectors at the high-vectors | 
|  | 785 | * location (0xffff0000).  If we aren't using high-vectors, also | 
|  | 786 | * create a mapping at the low-vectors virtual address. | 
|  | 787 | */ | 
|  | 788 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 
|  | 789 | map.virtual = 0xffff0000; | 
|  | 790 | map.length = PAGE_SIZE; | 
|  | 791 | map.type = MT_HIGH_VECTORS; | 
|  | 792 | create_mapping(&map); | 
|  | 793 |  | 
|  | 794 | if (!vectors_high()) { | 
|  | 795 | map.virtual = 0; | 
|  | 796 | map.type = MT_LOW_VECTORS; | 
|  | 797 | create_mapping(&map); | 
|  | 798 | } | 
|  | 799 |  | 
|  | 800 | /* | 
|  | 801 | * Ask the machine support to map in the statically mapped devices. | 
|  | 802 | */ | 
|  | 803 | if (mdesc->map_io) | 
|  | 804 | mdesc->map_io(); | 
|  | 805 |  | 
|  | 806 | /* | 
|  | 807 | * Finally flush the caches and tlb to ensure that we're in a | 
|  | 808 | * consistent state wrt the writebuffer.  This also ensures that | 
|  | 809 | * any write-allocated cache lines in the vector page are written | 
|  | 810 | * back.  After this point, we can start to touch devices again. | 
|  | 811 | */ | 
|  | 812 | local_flush_tlb_all(); | 
|  | 813 | flush_cache_all(); | 
|  | 814 | } | 
|  | 815 |  | 
|  | 816 | /* | 
|  | 817 | * paging_init() sets up the page tables, initialises the zone memory | 
|  | 818 | * maps, and sets up the zero page, bad page and bad page tables. | 
|  | 819 | */ | 
|  | 820 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | 
|  | 821 | { | 
|  | 822 | void *zero_page; | 
|  | 823 |  | 
|  | 824 | build_mem_type_table(); | 
| Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 825 | sanity_check_meminfo(mi); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 826 | prepare_page_table(mi); | 
|  | 827 | bootmem_init(mi); | 
|  | 828 | devicemaps_init(mdesc); | 
|  | 829 |  | 
|  | 830 | top_pmd = pmd_off_k(0xffff0000); | 
|  | 831 |  | 
|  | 832 | /* | 
|  | 833 | * allocate the zero page.  Note that we count on this going ok. | 
|  | 834 | */ | 
|  | 835 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | 
|  | 836 | memzero(zero_page, PAGE_SIZE); | 
|  | 837 | empty_zero_page = virt_to_page(zero_page); | 
|  | 838 | flush_dcache_page(empty_zero_page); | 
|  | 839 | } | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 840 |  | 
|  | 841 | /* | 
|  | 842 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | 
|  | 843 | * the user-mode pages.  This will then ensure that we have predictable | 
|  | 844 | * results when turning the mmu off | 
|  | 845 | */ | 
|  | 846 | void setup_mm_for_reboot(char mode) | 
|  | 847 | { | 
|  | 848 | unsigned long base_pmdval; | 
|  | 849 | pgd_t *pgd; | 
|  | 850 | int i; | 
|  | 851 |  | 
|  | 852 | if (current->mm && current->mm->pgd) | 
|  | 853 | pgd = current->mm->pgd; | 
|  | 854 | else | 
|  | 855 | pgd = init_mm.pgd; | 
|  | 856 |  | 
|  | 857 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | 
|  | 858 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | 
|  | 859 | base_pmdval |= PMD_BIT4; | 
|  | 860 |  | 
|  | 861 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { | 
|  | 862 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; | 
|  | 863 | pmd_t *pmd; | 
|  | 864 |  | 
|  | 865 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); | 
|  | 866 | pmd[0] = __pmd(pmdval); | 
|  | 867 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | 
|  | 868 | flush_pmd_entry(pmd); | 
|  | 869 | } | 
|  | 870 | } |