| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/arm/mm/mm-armv.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1998-2002 Russell King | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | * | 
|  | 10 | *  Page table sludge for ARM v3 and v4 processor architectures. | 
|  | 11 | */ | 
|  | 12 | #include <linux/config.h> | 
|  | 13 | #include <linux/module.h> | 
|  | 14 | #include <linux/mm.h> | 
|  | 15 | #include <linux/init.h> | 
|  | 16 | #include <linux/bootmem.h> | 
|  | 17 | #include <linux/highmem.h> | 
|  | 18 | #include <linux/nodemask.h> | 
|  | 19 |  | 
|  | 20 | #include <asm/pgalloc.h> | 
|  | 21 | #include <asm/page.h> | 
|  | 22 | #include <asm/io.h> | 
|  | 23 | #include <asm/setup.h> | 
|  | 24 | #include <asm/tlbflush.h> | 
|  | 25 |  | 
|  | 26 | #include <asm/mach/map.h> | 
|  | 27 |  | 
|  | 28 | #define CPOLICY_UNCACHED	0 | 
|  | 29 | #define CPOLICY_BUFFERED	1 | 
|  | 30 | #define CPOLICY_WRITETHROUGH	2 | 
|  | 31 | #define CPOLICY_WRITEBACK	3 | 
|  | 32 | #define CPOLICY_WRITEALLOC	4 | 
|  | 33 |  | 
|  | 34 | static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; | 
|  | 35 | static unsigned int ecc_mask __initdata = 0; | 
|  | 36 | pgprot_t pgprot_kernel; | 
|  | 37 |  | 
|  | 38 | EXPORT_SYMBOL(pgprot_kernel); | 
|  | 39 |  | 
| Russell King | c4e1f6f | 2005-05-10 10:40:19 +0100 | [diff] [blame] | 40 | pmd_t *top_pmd; | 
|  | 41 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | struct cachepolicy { | 
|  | 43 | const char	policy[16]; | 
|  | 44 | unsigned int	cr_mask; | 
|  | 45 | unsigned int	pmd; | 
|  | 46 | unsigned int	pte; | 
|  | 47 | }; | 
|  | 48 |  | 
|  | 49 | static struct cachepolicy cache_policies[] __initdata = { | 
|  | 50 | { | 
|  | 51 | .policy		= "uncached", | 
|  | 52 | .cr_mask	= CR_W|CR_C, | 
|  | 53 | .pmd		= PMD_SECT_UNCACHED, | 
|  | 54 | .pte		= 0, | 
|  | 55 | }, { | 
|  | 56 | .policy		= "buffered", | 
|  | 57 | .cr_mask	= CR_C, | 
|  | 58 | .pmd		= PMD_SECT_BUFFERED, | 
|  | 59 | .pte		= PTE_BUFFERABLE, | 
|  | 60 | }, { | 
|  | 61 | .policy		= "writethrough", | 
|  | 62 | .cr_mask	= 0, | 
|  | 63 | .pmd		= PMD_SECT_WT, | 
|  | 64 | .pte		= PTE_CACHEABLE, | 
|  | 65 | }, { | 
|  | 66 | .policy		= "writeback", | 
|  | 67 | .cr_mask	= 0, | 
|  | 68 | .pmd		= PMD_SECT_WB, | 
|  | 69 | .pte		= PTE_BUFFERABLE|PTE_CACHEABLE, | 
|  | 70 | }, { | 
|  | 71 | .policy		= "writealloc", | 
|  | 72 | .cr_mask	= 0, | 
|  | 73 | .pmd		= PMD_SECT_WBWA, | 
|  | 74 | .pte		= PTE_BUFFERABLE|PTE_CACHEABLE, | 
|  | 75 | } | 
|  | 76 | }; | 
|  | 77 |  | 
|  | 78 | /* | 
|  | 79 | * These are useful for identifing cache coherency | 
|  | 80 | * problems by allowing the cache or the cache and | 
|  | 81 | * writebuffer to be turned off.  (Note: the write | 
|  | 82 | * buffer should not be on and the cache off). | 
|  | 83 | */ | 
|  | 84 | static void __init early_cachepolicy(char **p) | 
|  | 85 | { | 
|  | 86 | int i; | 
|  | 87 |  | 
|  | 88 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | 
|  | 89 | int len = strlen(cache_policies[i].policy); | 
|  | 90 |  | 
|  | 91 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { | 
|  | 92 | cachepolicy = i; | 
|  | 93 | cr_alignment &= ~cache_policies[i].cr_mask; | 
|  | 94 | cr_no_alignment &= ~cache_policies[i].cr_mask; | 
|  | 95 | *p += len; | 
|  | 96 | break; | 
|  | 97 | } | 
|  | 98 | } | 
|  | 99 | if (i == ARRAY_SIZE(cache_policies)) | 
|  | 100 | printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); | 
|  | 101 | flush_cache_all(); | 
|  | 102 | set_cr(cr_alignment); | 
|  | 103 | } | 
|  | 104 |  | 
|  | 105 | static void __init early_nocache(char **__unused) | 
|  | 106 | { | 
|  | 107 | char *p = "buffered"; | 
|  | 108 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | 
|  | 109 | early_cachepolicy(&p); | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | static void __init early_nowrite(char **__unused) | 
|  | 113 | { | 
|  | 114 | char *p = "uncached"; | 
|  | 115 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | 
|  | 116 | early_cachepolicy(&p); | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | static void __init early_ecc(char **p) | 
|  | 120 | { | 
|  | 121 | if (memcmp(*p, "on", 2) == 0) { | 
|  | 122 | ecc_mask = PMD_PROTECTION; | 
|  | 123 | *p += 2; | 
|  | 124 | } else if (memcmp(*p, "off", 3) == 0) { | 
|  | 125 | ecc_mask = 0; | 
|  | 126 | *p += 3; | 
|  | 127 | } | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | __early_param("nocache", early_nocache); | 
|  | 131 | __early_param("nowb", early_nowrite); | 
|  | 132 | __early_param("cachepolicy=", early_cachepolicy); | 
|  | 133 | __early_param("ecc=", early_ecc); | 
|  | 134 |  | 
|  | 135 | static int __init noalign_setup(char *__unused) | 
|  | 136 | { | 
|  | 137 | cr_alignment &= ~CR_A; | 
|  | 138 | cr_no_alignment &= ~CR_A; | 
|  | 139 | set_cr(cr_alignment); | 
|  | 140 | return 1; | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | __setup("noalign", noalign_setup); | 
|  | 144 |  | 
|  | 145 | #define FIRST_KERNEL_PGD_NR	(FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | 
|  | 146 |  | 
| Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 147 | static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) | 
|  | 148 | { | 
|  | 149 | return pmd_offset(pgd, virt); | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | static inline pmd_t *pmd_off_k(unsigned long virt) | 
|  | 153 | { | 
|  | 154 | return pmd_off(pgd_offset_k(virt), virt); | 
|  | 155 | } | 
|  | 156 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | /* | 
|  | 158 | * need to get a 16k page for level 1 | 
|  | 159 | */ | 
|  | 160 | pgd_t *get_pgd_slow(struct mm_struct *mm) | 
|  | 161 | { | 
|  | 162 | pgd_t *new_pgd, *init_pgd; | 
|  | 163 | pmd_t *new_pmd, *init_pmd; | 
|  | 164 | pte_t *new_pte, *init_pte; | 
|  | 165 |  | 
|  | 166 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); | 
|  | 167 | if (!new_pgd) | 
|  | 168 | goto no_pgd; | 
|  | 169 |  | 
|  | 170 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | 
|  | 171 |  | 
| Russell King | a343e60 | 2005-06-27 14:08:56 +0100 | [diff] [blame] | 172 | /* | 
|  | 173 | * Copy over the kernel and IO PGD entries | 
|  | 174 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | init_pgd = pgd_offset_k(0); | 
| Russell King | a343e60 | 2005-06-27 14:08:56 +0100 | [diff] [blame] | 176 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | 
|  | 177 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | 
|  | 178 |  | 
|  | 179 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 |  | 
|  | 181 | if (!vectors_high()) { | 
|  | 182 | /* | 
|  | 183 | * This lock is here just to satisfy pmd_alloc and pte_lock | 
|  | 184 | */ | 
|  | 185 | spin_lock(&mm->page_table_lock); | 
|  | 186 |  | 
|  | 187 | /* | 
|  | 188 | * On ARM, first page must always be allocated since it | 
|  | 189 | * contains the machine vectors. | 
|  | 190 | */ | 
|  | 191 | new_pmd = pmd_alloc(mm, new_pgd, 0); | 
|  | 192 | if (!new_pmd) | 
|  | 193 | goto no_pmd; | 
|  | 194 |  | 
|  | 195 | new_pte = pte_alloc_map(mm, new_pmd, 0); | 
|  | 196 | if (!new_pte) | 
|  | 197 | goto no_pte; | 
|  | 198 |  | 
|  | 199 | init_pmd = pmd_offset(init_pgd, 0); | 
|  | 200 | init_pte = pte_offset_map_nested(init_pmd, 0); | 
|  | 201 | set_pte(new_pte, *init_pte); | 
|  | 202 | pte_unmap_nested(init_pte); | 
|  | 203 | pte_unmap(new_pte); | 
|  | 204 |  | 
|  | 205 | spin_unlock(&mm->page_table_lock); | 
|  | 206 | } | 
|  | 207 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | return new_pgd; | 
|  | 209 |  | 
|  | 210 | no_pte: | 
|  | 211 | spin_unlock(&mm->page_table_lock); | 
|  | 212 | pmd_free(new_pmd); | 
|  | 213 | free_pages((unsigned long)new_pgd, 2); | 
|  | 214 | return NULL; | 
|  | 215 |  | 
|  | 216 | no_pmd: | 
|  | 217 | spin_unlock(&mm->page_table_lock); | 
|  | 218 | free_pages((unsigned long)new_pgd, 2); | 
|  | 219 | return NULL; | 
|  | 220 |  | 
|  | 221 | no_pgd: | 
|  | 222 | return NULL; | 
|  | 223 | } | 
|  | 224 |  | 
|  | 225 | void free_pgd_slow(pgd_t *pgd) | 
|  | 226 | { | 
|  | 227 | pmd_t *pmd; | 
|  | 228 | struct page *pte; | 
|  | 229 |  | 
|  | 230 | if (!pgd) | 
|  | 231 | return; | 
|  | 232 |  | 
|  | 233 | /* pgd is always present and good */ | 
| Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 234 | pmd = pmd_off(pgd, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | if (pmd_none(*pmd)) | 
|  | 236 | goto free; | 
|  | 237 | if (pmd_bad(*pmd)) { | 
|  | 238 | pmd_ERROR(*pmd); | 
|  | 239 | pmd_clear(pmd); | 
|  | 240 | goto free; | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 | pte = pmd_page(*pmd); | 
|  | 244 | pmd_clear(pmd); | 
|  | 245 | dec_page_state(nr_page_table_pages); | 
|  | 246 | pte_free(pte); | 
|  | 247 | pmd_free(pmd); | 
|  | 248 | free: | 
|  | 249 | free_pages((unsigned long) pgd, 2); | 
|  | 250 | } | 
|  | 251 |  | 
|  | 252 | /* | 
|  | 253 | * Create a SECTION PGD between VIRT and PHYS in domain | 
|  | 254 | * DOMAIN with protection PROT.  This operates on half- | 
|  | 255 | * pgdir entry increments. | 
|  | 256 | */ | 
|  | 257 | static inline void | 
|  | 258 | alloc_init_section(unsigned long virt, unsigned long phys, int prot) | 
|  | 259 | { | 
| Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 260 | pmd_t *pmdp = pmd_off_k(virt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | if (virt & (1 << 20)) | 
|  | 263 | pmdp++; | 
|  | 264 |  | 
|  | 265 | *pmdp = __pmd(phys | prot); | 
|  | 266 | flush_pmd_entry(pmdp); | 
|  | 267 | } | 
|  | 268 |  | 
|  | 269 | /* | 
|  | 270 | * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT | 
|  | 271 | */ | 
|  | 272 | static inline void | 
|  | 273 | alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) | 
|  | 274 | { | 
|  | 275 | int i; | 
|  | 276 |  | 
|  | 277 | for (i = 0; i < 16; i += 1) { | 
|  | 278 | alloc_init_section(virt, phys & SUPERSECTION_MASK, | 
|  | 279 | prot | PMD_SECT_SUPER); | 
|  | 280 |  | 
|  | 281 | virt += (PGDIR_SIZE / 2); | 
|  | 282 | phys += (PGDIR_SIZE / 2); | 
|  | 283 | } | 
|  | 284 | } | 
|  | 285 |  | 
|  | 286 | /* | 
|  | 287 | * Add a PAGE mapping between VIRT and PHYS in domain | 
|  | 288 | * DOMAIN with protection PROT.  Note that due to the | 
|  | 289 | * way we map the PTEs, we must allocate two PTE_SIZE'd | 
|  | 290 | * blocks - one for the Linux pte table, and one for | 
|  | 291 | * the hardware pte table. | 
|  | 292 | */ | 
|  | 293 | static inline void | 
|  | 294 | alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) | 
|  | 295 | { | 
| Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 296 | pmd_t *pmdp = pmd_off_k(virt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | pte_t *ptep; | 
|  | 298 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | if (pmd_none(*pmdp)) { | 
|  | 300 | unsigned long pmdval; | 
|  | 301 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * | 
|  | 302 | sizeof(pte_t)); | 
|  | 303 |  | 
|  | 304 | pmdval = __pa(ptep) | prot_l1; | 
|  | 305 | pmdp[0] = __pmd(pmdval); | 
|  | 306 | pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); | 
|  | 307 | flush_pmd_entry(pmdp); | 
|  | 308 | } | 
|  | 309 | ptep = pte_offset_kernel(pmdp, virt); | 
|  | 310 |  | 
|  | 311 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | /* | 
|  | 315 | * Clear any PGD mapping.  On a two-level page table system, | 
|  | 316 | * the clearance is done by the middle-level functions (pmd) | 
|  | 317 | * rather than the top-level (pgd) functions. | 
|  | 318 | */ | 
|  | 319 | static inline void clear_mapping(unsigned long virt) | 
|  | 320 | { | 
| Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 321 | pmd_clear(pmd_off_k(virt)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | } | 
|  | 323 |  | 
|  | 324 | struct mem_types { | 
|  | 325 | unsigned int	prot_pte; | 
|  | 326 | unsigned int	prot_l1; | 
|  | 327 | unsigned int	prot_sect; | 
|  | 328 | unsigned int	domain; | 
|  | 329 | }; | 
|  | 330 |  | 
|  | 331 | static struct mem_types mem_types[] __initdata = { | 
|  | 332 | [MT_DEVICE] = { | 
|  | 333 | .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 
|  | 334 | L_PTE_WRITE, | 
|  | 335 | .prot_l1   = PMD_TYPE_TABLE, | 
|  | 336 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | | 
|  | 337 | PMD_SECT_AP_WRITE, | 
|  | 338 | .domain    = DOMAIN_IO, | 
|  | 339 | }, | 
|  | 340 | [MT_CACHECLEAN] = { | 
|  | 341 | .prot_sect = PMD_TYPE_SECT, | 
|  | 342 | .domain    = DOMAIN_KERNEL, | 
|  | 343 | }, | 
|  | 344 | [MT_MINICLEAN] = { | 
|  | 345 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE, | 
|  | 346 | .domain    = DOMAIN_KERNEL, | 
|  | 347 | }, | 
|  | 348 | [MT_LOW_VECTORS] = { | 
|  | 349 | .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 
|  | 350 | L_PTE_EXEC, | 
|  | 351 | .prot_l1   = PMD_TYPE_TABLE, | 
|  | 352 | .domain    = DOMAIN_USER, | 
|  | 353 | }, | 
|  | 354 | [MT_HIGH_VECTORS] = { | 
|  | 355 | .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 
|  | 356 | L_PTE_USER | L_PTE_EXEC, | 
|  | 357 | .prot_l1   = PMD_TYPE_TABLE, | 
|  | 358 | .domain    = DOMAIN_USER, | 
|  | 359 | }, | 
|  | 360 | [MT_MEMORY] = { | 
|  | 361 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 
|  | 362 | .domain    = DOMAIN_KERNEL, | 
|  | 363 | }, | 
|  | 364 | [MT_ROM] = { | 
|  | 365 | .prot_sect = PMD_TYPE_SECT, | 
|  | 366 | .domain    = DOMAIN_KERNEL, | 
|  | 367 | }, | 
|  | 368 | [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | 
|  | 369 | .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 
|  | 370 | L_PTE_WRITE, | 
|  | 371 | .prot_l1   = PMD_TYPE_TABLE, | 
|  | 372 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | | 
|  | 373 | PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | | 
|  | 374 | PMD_SECT_TEX(1), | 
|  | 375 | .domain    = DOMAIN_IO, | 
|  | 376 | } | 
|  | 377 | }; | 
|  | 378 |  | 
|  | 379 | /* | 
|  | 380 | * Adjust the PMD section entries according to the CPU in use. | 
|  | 381 | */ | 
|  | 382 | static void __init build_mem_type_table(void) | 
|  | 383 | { | 
|  | 384 | struct cachepolicy *cp; | 
|  | 385 | unsigned int cr = get_cr(); | 
|  | 386 | int cpu_arch = cpu_architecture(); | 
|  | 387 | int i; | 
|  | 388 |  | 
|  | 389 | #if defined(CONFIG_CPU_DCACHE_DISABLE) | 
|  | 390 | if (cachepolicy > CPOLICY_BUFFERED) | 
|  | 391 | cachepolicy = CPOLICY_BUFFERED; | 
|  | 392 | #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | 
|  | 393 | if (cachepolicy > CPOLICY_WRITETHROUGH) | 
|  | 394 | cachepolicy = CPOLICY_WRITETHROUGH; | 
|  | 395 | #endif | 
|  | 396 | if (cpu_arch < CPU_ARCH_ARMv5) { | 
|  | 397 | if (cachepolicy >= CPOLICY_WRITEALLOC) | 
|  | 398 | cachepolicy = CPOLICY_WRITEBACK; | 
|  | 399 | ecc_mask = 0; | 
|  | 400 | } | 
|  | 401 |  | 
|  | 402 | if (cpu_arch <= CPU_ARCH_ARMv5) { | 
|  | 403 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | 
|  | 404 | if (mem_types[i].prot_l1) | 
|  | 405 | mem_types[i].prot_l1 |= PMD_BIT4; | 
|  | 406 | if (mem_types[i].prot_sect) | 
|  | 407 | mem_types[i].prot_sect |= PMD_BIT4; | 
|  | 408 | } | 
|  | 409 | } | 
|  | 410 |  | 
|  | 411 | /* | 
|  | 412 | * ARMv6 and above have extended page tables. | 
|  | 413 | */ | 
|  | 414 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | 
|  | 415 | /* | 
|  | 416 | * bit 4 becomes XN which we must clear for the | 
|  | 417 | * kernel memory mapping. | 
|  | 418 | */ | 
|  | 419 | mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; | 
|  | 420 | mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; | 
|  | 421 | /* | 
| George G. Davis | ca31515 | 2005-04-29 22:08:35 +0100 | [diff] [blame] | 422 | * Mark cache clean areas and XIP ROM read only | 
|  | 423 | * from SVC mode and no access from userspace. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | */ | 
| George G. Davis | ca31515 | 2005-04-29 22:08:35 +0100 | [diff] [blame] | 425 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 
|  | 427 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 
|  | 428 | } | 
|  | 429 |  | 
|  | 430 | cp = &cache_policies[cachepolicy]; | 
|  | 431 |  | 
|  | 432 | if (cpu_arch >= CPU_ARCH_ARMv5) { | 
|  | 433 | mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; | 
|  | 434 | mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; | 
|  | 435 | } else { | 
|  | 436 | mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte; | 
|  | 437 | mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte; | 
|  | 438 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | 
|  | 439 | } | 
|  | 440 |  | 
|  | 441 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 
|  | 442 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 
|  | 443 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | 
|  | 444 | mem_types[MT_ROM].prot_sect |= cp->pmd; | 
|  | 445 |  | 
|  | 446 | for (i = 0; i < 16; i++) { | 
|  | 447 | unsigned long v = pgprot_val(protection_map[i]); | 
|  | 448 | v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | cp->pte; | 
|  | 449 | protection_map[i] = __pgprot(v); | 
|  | 450 | } | 
|  | 451 |  | 
|  | 452 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 
|  | 453 | L_PTE_DIRTY | L_PTE_WRITE | | 
|  | 454 | L_PTE_EXEC | cp->pte); | 
|  | 455 |  | 
|  | 456 | switch (cp->pmd) { | 
|  | 457 | case PMD_SECT_WT: | 
|  | 458 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; | 
|  | 459 | break; | 
|  | 460 | case PMD_SECT_WB: | 
|  | 461 | case PMD_SECT_WBWA: | 
|  | 462 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; | 
|  | 463 | break; | 
|  | 464 | } | 
|  | 465 | printk("Memory policy: ECC %sabled, Data cache %s\n", | 
|  | 466 | ecc_mask ? "en" : "dis", cp->policy); | 
|  | 467 | } | 
|  | 468 |  | 
|  | 469 | #define vectors_base()	(vectors_high() ? 0xffff0000 : 0) | 
|  | 470 |  | 
|  | 471 | /* | 
|  | 472 | * Create the page directory entries and any necessary | 
|  | 473 | * page tables for the mapping specified by `md'.  We | 
|  | 474 | * are able to cope here with varying sizes and address | 
|  | 475 | * offsets, and we take full advantage of sections and | 
|  | 476 | * supersections. | 
|  | 477 | */ | 
|  | 478 | static void __init create_mapping(struct map_desc *md) | 
|  | 479 | { | 
|  | 480 | unsigned long virt, length; | 
|  | 481 | int prot_sect, prot_l1, domain; | 
|  | 482 | pgprot_t prot_pte; | 
|  | 483 | long off; | 
|  | 484 |  | 
|  | 485 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 
|  | 486 | printk(KERN_WARNING "BUG: not creating mapping for " | 
|  | 487 | "0x%08lx at 0x%08lx in user region\n", | 
|  | 488 | md->physical, md->virtual); | 
|  | 489 | return; | 
|  | 490 | } | 
|  | 491 |  | 
|  | 492 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | 
|  | 493 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | 
|  | 494 | printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx " | 
|  | 495 | "overlaps vmalloc space\n", | 
|  | 496 | md->physical, md->virtual); | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | domain	  = mem_types[md->type].domain; | 
|  | 500 | prot_pte  = __pgprot(mem_types[md->type].prot_pte); | 
|  | 501 | prot_l1   = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); | 
|  | 502 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); | 
|  | 503 |  | 
|  | 504 | virt   = md->virtual; | 
|  | 505 | off    = md->physical - virt; | 
|  | 506 | length = md->length; | 
|  | 507 |  | 
|  | 508 | if (mem_types[md->type].prot_l1 == 0 && | 
|  | 509 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | 
|  | 510 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 
|  | 511 | "be mapped using pages, ignoring.\n", | 
|  | 512 | md->physical, md->virtual); | 
|  | 513 | return; | 
|  | 514 | } | 
|  | 515 |  | 
|  | 516 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { | 
|  | 517 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | 
|  | 518 |  | 
|  | 519 | virt   += PAGE_SIZE; | 
|  | 520 | length -= PAGE_SIZE; | 
|  | 521 | } | 
|  | 522 |  | 
|  | 523 | /* N.B.	ARMv6 supersections are only defined to work with domain 0. | 
|  | 524 | *	Since domain assignments can in fact be arbitrary, the | 
|  | 525 | *	'domain == 0' check below is required to insure that ARMv6 | 
|  | 526 | *	supersections are only allocated for domain 0 regardless | 
|  | 527 | *	of the actual domain assignments in use. | 
|  | 528 | */ | 
|  | 529 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { | 
|  | 530 | /* Align to supersection boundary */ | 
|  | 531 | while ((virt & ~SUPERSECTION_MASK || (virt + off) & | 
|  | 532 | ~SUPERSECTION_MASK) && length >= (PGDIR_SIZE / 2)) { | 
|  | 533 | alloc_init_section(virt, virt + off, prot_sect); | 
|  | 534 |  | 
|  | 535 | virt   += (PGDIR_SIZE / 2); | 
|  | 536 | length -= (PGDIR_SIZE / 2); | 
|  | 537 | } | 
|  | 538 |  | 
|  | 539 | while (length >= SUPERSECTION_SIZE) { | 
|  | 540 | alloc_init_supersection(virt, virt + off, prot_sect); | 
|  | 541 |  | 
|  | 542 | virt   += SUPERSECTION_SIZE; | 
|  | 543 | length -= SUPERSECTION_SIZE; | 
|  | 544 | } | 
|  | 545 | } | 
|  | 546 |  | 
|  | 547 | /* | 
|  | 548 | * A section mapping covers half a "pgdir" entry. | 
|  | 549 | */ | 
|  | 550 | while (length >= (PGDIR_SIZE / 2)) { | 
|  | 551 | alloc_init_section(virt, virt + off, prot_sect); | 
|  | 552 |  | 
|  | 553 | virt   += (PGDIR_SIZE / 2); | 
|  | 554 | length -= (PGDIR_SIZE / 2); | 
|  | 555 | } | 
|  | 556 |  | 
|  | 557 | while (length >= PAGE_SIZE) { | 
|  | 558 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | 
|  | 559 |  | 
|  | 560 | virt   += PAGE_SIZE; | 
|  | 561 | length -= PAGE_SIZE; | 
|  | 562 | } | 
|  | 563 | } | 
|  | 564 |  | 
|  | 565 | /* | 
|  | 566 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | 
|  | 567 | * the user-mode pages.  This will then ensure that we have predictable | 
|  | 568 | * results when turning the mmu off | 
|  | 569 | */ | 
|  | 570 | void setup_mm_for_reboot(char mode) | 
|  | 571 | { | 
|  | 572 | unsigned long pmdval; | 
|  | 573 | pgd_t *pgd; | 
|  | 574 | pmd_t *pmd; | 
|  | 575 | int i; | 
|  | 576 | int cpu_arch = cpu_architecture(); | 
|  | 577 |  | 
|  | 578 | if (current->mm && current->mm->pgd) | 
|  | 579 | pgd = current->mm->pgd; | 
|  | 580 | else | 
|  | 581 | pgd = init_mm.pgd; | 
|  | 582 |  | 
|  | 583 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) { | 
|  | 584 | pmdval = (i << PGDIR_SHIFT) | | 
|  | 585 | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | | 
|  | 586 | PMD_TYPE_SECT; | 
|  | 587 | if (cpu_arch <= CPU_ARCH_ARMv5) | 
|  | 588 | pmdval |= PMD_BIT4; | 
| Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 589 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | pmd[0] = __pmd(pmdval); | 
|  | 591 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | 
|  | 592 | flush_pmd_entry(pmd); | 
|  | 593 | } | 
|  | 594 | } | 
|  | 595 |  | 
|  | 596 | extern void _stext, _etext; | 
|  | 597 |  | 
|  | 598 | /* | 
|  | 599 | * Setup initial mappings.  We use the page we allocated for zero page to hold | 
|  | 600 | * the mappings, which will get overwritten by the vectors in traps_init(). | 
|  | 601 | * The mappings must be in virtual address order. | 
|  | 602 | */ | 
|  | 603 | void __init memtable_init(struct meminfo *mi) | 
|  | 604 | { | 
|  | 605 | struct map_desc *init_maps, *p, *q; | 
|  | 606 | unsigned long address = 0; | 
|  | 607 | int i; | 
|  | 608 |  | 
|  | 609 | build_mem_type_table(); | 
|  | 610 |  | 
|  | 611 | init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE); | 
|  | 612 |  | 
|  | 613 | #ifdef CONFIG_XIP_KERNEL | 
|  | 614 | p->physical   = CONFIG_XIP_PHYS_ADDR & PMD_MASK; | 
|  | 615 | p->virtual    = (unsigned long)&_stext & PMD_MASK; | 
|  | 616 | p->length     = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK; | 
|  | 617 | p->type       = MT_ROM; | 
|  | 618 | p ++; | 
|  | 619 | #endif | 
|  | 620 |  | 
|  | 621 | for (i = 0; i < mi->nr_banks; i++) { | 
|  | 622 | if (mi->bank[i].size == 0) | 
|  | 623 | continue; | 
|  | 624 |  | 
|  | 625 | p->physical   = mi->bank[i].start; | 
|  | 626 | p->virtual    = __phys_to_virt(p->physical); | 
|  | 627 | p->length     = mi->bank[i].size; | 
|  | 628 | p->type       = MT_MEMORY; | 
|  | 629 | p ++; | 
|  | 630 | } | 
|  | 631 |  | 
|  | 632 | #ifdef FLUSH_BASE | 
|  | 633 | p->physical   = FLUSH_BASE_PHYS; | 
|  | 634 | p->virtual    = FLUSH_BASE; | 
|  | 635 | p->length     = PGDIR_SIZE; | 
|  | 636 | p->type       = MT_CACHECLEAN; | 
|  | 637 | p ++; | 
|  | 638 | #endif | 
|  | 639 |  | 
|  | 640 | #ifdef FLUSH_BASE_MINICACHE | 
|  | 641 | p->physical   = FLUSH_BASE_PHYS + PGDIR_SIZE; | 
|  | 642 | p->virtual    = FLUSH_BASE_MINICACHE; | 
|  | 643 | p->length     = PGDIR_SIZE; | 
|  | 644 | p->type       = MT_MINICLEAN; | 
|  | 645 | p ++; | 
|  | 646 | #endif | 
|  | 647 |  | 
|  | 648 | /* | 
|  | 649 | * Go through the initial mappings, but clear out any | 
|  | 650 | * pgdir entries that are not in the description. | 
|  | 651 | */ | 
|  | 652 | q = init_maps; | 
|  | 653 | do { | 
|  | 654 | if (address < q->virtual || q == p) { | 
|  | 655 | clear_mapping(address); | 
|  | 656 | address += PGDIR_SIZE; | 
|  | 657 | } else { | 
|  | 658 | create_mapping(q); | 
|  | 659 |  | 
|  | 660 | address = q->virtual + q->length; | 
|  | 661 | address = (address + PGDIR_SIZE - 1) & PGDIR_MASK; | 
|  | 662 |  | 
|  | 663 | q ++; | 
|  | 664 | } | 
|  | 665 | } while (address != 0); | 
|  | 666 |  | 
|  | 667 | /* | 
|  | 668 | * Create a mapping for the machine vectors at the high-vectors | 
|  | 669 | * location (0xffff0000).  If we aren't using high-vectors, also | 
|  | 670 | * create a mapping at the low-vectors virtual address. | 
|  | 671 | */ | 
|  | 672 | init_maps->physical   = virt_to_phys(init_maps); | 
|  | 673 | init_maps->virtual    = 0xffff0000; | 
|  | 674 | init_maps->length     = PAGE_SIZE; | 
|  | 675 | init_maps->type       = MT_HIGH_VECTORS; | 
|  | 676 | create_mapping(init_maps); | 
|  | 677 |  | 
|  | 678 | if (!vectors_high()) { | 
|  | 679 | init_maps->virtual = 0; | 
|  | 680 | init_maps->type = MT_LOW_VECTORS; | 
|  | 681 | create_mapping(init_maps); | 
|  | 682 | } | 
|  | 683 |  | 
|  | 684 | flush_cache_all(); | 
| Russell King | 564c90a | 2005-06-28 13:46:09 +0100 | [diff] [blame] | 685 | local_flush_tlb_all(); | 
| Russell King | c4e1f6f | 2005-05-10 10:40:19 +0100 | [diff] [blame] | 686 |  | 
| Russell King | 8711a1b | 2005-05-16 23:36:22 +0100 | [diff] [blame] | 687 | top_pmd = pmd_off_k(0xffff0000); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | } | 
|  | 689 |  | 
|  | 690 | /* | 
|  | 691 | * Create the architecture specific mappings | 
|  | 692 | */ | 
|  | 693 | void __init iotable_init(struct map_desc *io_desc, int nr) | 
|  | 694 | { | 
|  | 695 | int i; | 
|  | 696 |  | 
|  | 697 | for (i = 0; i < nr; i++) | 
|  | 698 | create_mapping(io_desc + i); | 
|  | 699 | } |