| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/arm/mm/mmu.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1995-2005 Russell King | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License version 2 as | 
 | 8 |  * published by the Free Software Foundation. | 
 | 9 |  */ | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 10 | #include <linux/module.h> | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 11 | #include <linux/kernel.h> | 
 | 12 | #include <linux/errno.h> | 
 | 13 | #include <linux/init.h> | 
 | 14 | #include <linux/bootmem.h> | 
 | 15 | #include <linux/mman.h> | 
 | 16 | #include <linux/nodemask.h> | 
| Russell King | ceb683d | 2010-03-25 18:47:20 +0000 | [diff] [blame] | 17 | #include <linux/sort.h> | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 18 |  | 
| Russell King | 0ba8b9b | 2008-08-10 18:08:10 +0100 | [diff] [blame] | 19 | #include <asm/cputype.h> | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 20 | #include <asm/mach-types.h> | 
| Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 21 | #include <asm/sections.h> | 
| Nicolas Pitre | 3f973e2 | 2008-11-04 00:48:42 -0500 | [diff] [blame] | 22 | #include <asm/cachetype.h> | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 23 | #include <asm/setup.h> | 
 | 24 | #include <asm/sizes.h> | 
| Russell King | e616c59 | 2009-09-27 20:55:43 +0100 | [diff] [blame] | 25 | #include <asm/smp_plat.h> | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 26 | #include <asm/tlb.h> | 
| Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 27 | #include <asm/highmem.h> | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 28 |  | 
 | 29 | #include <asm/mach/arch.h> | 
 | 30 | #include <asm/mach/map.h> | 
 | 31 |  | 
 | 32 | #include "mm.h" | 
 | 33 |  | 
 | 34 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
 | 35 |  | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 36 | /* | 
 | 37 |  * empty_zero_page is a special page that is used for | 
 | 38 |  * zero-initialized data and COW. | 
 | 39 |  */ | 
 | 40 | struct page *empty_zero_page; | 
| Aneesh Kumar K.V | 3653f3a | 2008-04-29 08:11:12 -0400 | [diff] [blame] | 41 | EXPORT_SYMBOL(empty_zero_page); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 42 |  | 
 | 43 | /* | 
 | 44 |  * The pmd table for the upper-most set of pages. | 
 | 45 |  */ | 
 | 46 | pmd_t *top_pmd; | 
 | 47 |  | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 48 | #define CPOLICY_UNCACHED	0 | 
 | 49 | #define CPOLICY_BUFFERED	1 | 
 | 50 | #define CPOLICY_WRITETHROUGH	2 | 
 | 51 | #define CPOLICY_WRITEBACK	3 | 
 | 52 | #define CPOLICY_WRITEALLOC	4 | 
 | 53 |  | 
 | 54 | static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; | 
 | 55 | static unsigned int ecc_mask __initdata = 0; | 
| Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 56 | pgprot_t pgprot_user; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 57 | pgprot_t pgprot_kernel; | 
 | 58 |  | 
| Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 59 | EXPORT_SYMBOL(pgprot_user); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 60 | EXPORT_SYMBOL(pgprot_kernel); | 
 | 61 |  | 
 | 62 | struct cachepolicy { | 
 | 63 | 	const char	policy[16]; | 
 | 64 | 	unsigned int	cr_mask; | 
 | 65 | 	unsigned int	pmd; | 
 | 66 | 	unsigned int	pte; | 
 | 67 | }; | 
 | 68 |  | 
 | 69 | static struct cachepolicy cache_policies[] __initdata = { | 
 | 70 | 	{ | 
 | 71 | 		.policy		= "uncached", | 
 | 72 | 		.cr_mask	= CR_W|CR_C, | 
 | 73 | 		.pmd		= PMD_SECT_UNCACHED, | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 74 | 		.pte		= L_PTE_MT_UNCACHED, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 75 | 	}, { | 
 | 76 | 		.policy		= "buffered", | 
 | 77 | 		.cr_mask	= CR_C, | 
 | 78 | 		.pmd		= PMD_SECT_BUFFERED, | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 79 | 		.pte		= L_PTE_MT_BUFFERABLE, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 80 | 	}, { | 
 | 81 | 		.policy		= "writethrough", | 
 | 82 | 		.cr_mask	= 0, | 
 | 83 | 		.pmd		= PMD_SECT_WT, | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 84 | 		.pte		= L_PTE_MT_WRITETHROUGH, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 85 | 	}, { | 
 | 86 | 		.policy		= "writeback", | 
 | 87 | 		.cr_mask	= 0, | 
 | 88 | 		.pmd		= PMD_SECT_WB, | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 89 | 		.pte		= L_PTE_MT_WRITEBACK, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 90 | 	}, { | 
 | 91 | 		.policy		= "writealloc", | 
 | 92 | 		.cr_mask	= 0, | 
 | 93 | 		.pmd		= PMD_SECT_WBWA, | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 94 | 		.pte		= L_PTE_MT_WRITEALLOC, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 95 | 	} | 
 | 96 | }; | 
 | 97 |  | 
 | 98 | /* | 
| Simon Arlott | 6cbdc8c | 2007-05-11 20:40:30 +0100 | [diff] [blame] | 99 |  * These are useful for identifying cache coherency | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 100 |  * problems by allowing the cache or the cache and | 
 | 101 |  * writebuffer to be turned off.  (Note: the write | 
 | 102 |  * buffer should not be on and the cache off). | 
 | 103 |  */ | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 104 | static int __init early_cachepolicy(char *p) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 105 | { | 
 | 106 | 	int i; | 
 | 107 |  | 
 | 108 | 	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | 
 | 109 | 		int len = strlen(cache_policies[i].policy); | 
 | 110 |  | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 111 | 		if (memcmp(p, cache_policies[i].policy, len) == 0) { | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 112 | 			cachepolicy = i; | 
 | 113 | 			cr_alignment &= ~cache_policies[i].cr_mask; | 
 | 114 | 			cr_no_alignment &= ~cache_policies[i].cr_mask; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 115 | 			break; | 
 | 116 | 		} | 
 | 117 | 	} | 
 | 118 | 	if (i == ARRAY_SIZE(cache_policies)) | 
 | 119 | 		printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); | 
| Russell King | 4b46d64 | 2009-11-01 17:44:24 +0000 | [diff] [blame] | 120 | 	/* | 
 | 121 | 	 * This restriction is partly to do with the way we boot; it is | 
 | 122 | 	 * unpredictable to have memory mapped using two different sets of | 
 | 123 | 	 * memory attributes (shared, type, and cache attribs).  We can not | 
 | 124 | 	 * change these attributes once the initial assembly has setup the | 
 | 125 | 	 * page tables. | 
 | 126 | 	 */ | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 127 | 	if (cpu_architecture() >= CPU_ARCH_ARMv6) { | 
 | 128 | 		printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); | 
 | 129 | 		cachepolicy = CPOLICY_WRITEBACK; | 
 | 130 | 	} | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 131 | 	flush_cache_all(); | 
 | 132 | 	set_cr(cr_alignment); | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 133 | 	return 0; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 134 | } | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 135 | early_param("cachepolicy", early_cachepolicy); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 136 |  | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 137 | static int __init early_nocache(char *__unused) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 138 | { | 
 | 139 | 	char *p = "buffered"; | 
 | 140 | 	printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 141 | 	early_cachepolicy(p); | 
 | 142 | 	return 0; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 143 | } | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 144 | early_param("nocache", early_nocache); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 145 |  | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 146 | static int __init early_nowrite(char *__unused) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 147 | { | 
 | 148 | 	char *p = "uncached"; | 
 | 149 | 	printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 150 | 	early_cachepolicy(p); | 
 | 151 | 	return 0; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 152 | } | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 153 | early_param("nowb", early_nowrite); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 154 |  | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 155 | static int __init early_ecc(char *p) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 156 | { | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 157 | 	if (memcmp(p, "on", 2) == 0) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 158 | 		ecc_mask = PMD_PROTECTION; | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 159 | 	else if (memcmp(p, "off", 3) == 0) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 160 | 		ecc_mask = 0; | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 161 | 	return 0; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 162 | } | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 163 | early_param("ecc", early_ecc); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 164 |  | 
 | 165 | static int __init noalign_setup(char *__unused) | 
 | 166 | { | 
 | 167 | 	cr_alignment &= ~CR_A; | 
 | 168 | 	cr_no_alignment &= ~CR_A; | 
 | 169 | 	set_cr(cr_alignment); | 
 | 170 | 	return 1; | 
 | 171 | } | 
 | 172 | __setup("noalign", noalign_setup); | 
 | 173 |  | 
| Russell King | 255d1f8 | 2006-12-18 00:12:47 +0000 | [diff] [blame] | 174 | #ifndef CONFIG_SMP | 
 | 175 | void adjust_cr(unsigned long mask, unsigned long set) | 
 | 176 | { | 
 | 177 | 	unsigned long flags; | 
 | 178 |  | 
 | 179 | 	mask &= ~CR_A; | 
 | 180 |  | 
 | 181 | 	set &= mask; | 
 | 182 |  | 
 | 183 | 	local_irq_save(flags); | 
 | 184 |  | 
 | 185 | 	cr_no_alignment = (cr_no_alignment & ~mask) | set; | 
 | 186 | 	cr_alignment = (cr_alignment & ~mask) | set; | 
 | 187 |  | 
 | 188 | 	set_cr((get_cr() & ~mask) | set); | 
 | 189 |  | 
 | 190 | 	local_irq_restore(flags); | 
 | 191 | } | 
 | 192 | #endif | 
 | 193 |  | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 194 | #define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 195 | #define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 196 |  | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 197 | static struct mem_type mem_types[] = { | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 198 | 	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */ | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 199 | 		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | | 
 | 200 | 				  L_PTE_SHARED, | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 201 | 		.prot_l1	= PMD_TYPE_TABLE, | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 202 | 		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S, | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 203 | 		.domain		= DOMAIN_IO, | 
 | 204 | 	}, | 
 | 205 | 	[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 206 | 		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 207 | 		.prot_l1	= PMD_TYPE_TABLE, | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 208 | 		.prot_sect	= PROT_SECT_DEVICE, | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 209 | 		.domain		= DOMAIN_IO, | 
 | 210 | 	}, | 
 | 211 | 	[MT_DEVICE_CACHED] = {	  /* ioremap_cached */ | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 212 | 		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 213 | 		.prot_l1	= PMD_TYPE_TABLE, | 
 | 214 | 		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_WB, | 
 | 215 | 		.domain		= DOMAIN_IO, | 
 | 216 | 	},	 | 
| Lennert Buytenhek | 1ad77a8 | 2008-09-05 13:17:11 +0100 | [diff] [blame] | 217 | 	[MT_DEVICE_WC] = {	/* ioremap_wc */ | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 218 | 		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 219 | 		.prot_l1	= PMD_TYPE_TABLE, | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 220 | 		.prot_sect	= PROT_SECT_DEVICE, | 
| Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 221 | 		.domain		= DOMAIN_IO, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 222 | 	}, | 
| Russell King | ebb4c65 | 2008-11-09 11:18:36 +0000 | [diff] [blame] | 223 | 	[MT_UNCACHED] = { | 
 | 224 | 		.prot_pte	= PROT_PTE_DEVICE, | 
 | 225 | 		.prot_l1	= PMD_TYPE_TABLE, | 
 | 226 | 		.prot_sect	= PMD_TYPE_SECT | PMD_SECT_XN, | 
 | 227 | 		.domain		= DOMAIN_IO, | 
 | 228 | 	}, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 229 | 	[MT_CACHECLEAN] = { | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 230 | 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 231 | 		.domain    = DOMAIN_KERNEL, | 
 | 232 | 	}, | 
 | 233 | 	[MT_MINICLEAN] = { | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 234 | 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 235 | 		.domain    = DOMAIN_KERNEL, | 
 | 236 | 	}, | 
 | 237 | 	[MT_LOW_VECTORS] = { | 
 | 238 | 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 
 | 239 | 				L_PTE_EXEC, | 
 | 240 | 		.prot_l1   = PMD_TYPE_TABLE, | 
 | 241 | 		.domain    = DOMAIN_USER, | 
 | 242 | 	}, | 
 | 243 | 	[MT_HIGH_VECTORS] = { | 
 | 244 | 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 
 | 245 | 				L_PTE_USER | L_PTE_EXEC, | 
 | 246 | 		.prot_l1   = PMD_TYPE_TABLE, | 
 | 247 | 		.domain    = DOMAIN_USER, | 
 | 248 | 	}, | 
 | 249 | 	[MT_MEMORY] = { | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 250 | 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 251 | 		.domain    = DOMAIN_KERNEL, | 
 | 252 | 	}, | 
 | 253 | 	[MT_ROM] = { | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 254 | 		.prot_sect = PMD_TYPE_SECT, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 255 | 		.domain    = DOMAIN_KERNEL, | 
 | 256 | 	}, | 
| Paul Walmsley | e4707dd | 2009-03-12 20:11:43 +0100 | [diff] [blame] | 257 | 	[MT_MEMORY_NONCACHED] = { | 
 | 258 | 		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 
 | 259 | 		.domain    = DOMAIN_KERNEL, | 
 | 260 | 	}, | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 261 | }; | 
 | 262 |  | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 263 | const struct mem_type *get_mem_type(unsigned int type) | 
 | 264 | { | 
 | 265 | 	return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; | 
 | 266 | } | 
| Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 267 | EXPORT_SYMBOL(get_mem_type); | 
| Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 268 |  | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 269 | /* | 
 | 270 |  * Adjust the PMD section entries according to the CPU in use. | 
 | 271 |  */ | 
 | 272 | static void __init build_mem_type_table(void) | 
 | 273 | { | 
 | 274 | 	struct cachepolicy *cp; | 
 | 275 | 	unsigned int cr = get_cr(); | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 276 | 	unsigned int user_pgprot, kern_pgprot, vecs_pgprot; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 277 | 	int cpu_arch = cpu_architecture(); | 
 | 278 | 	int i; | 
 | 279 |  | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 280 | 	if (cpu_arch < CPU_ARCH_ARMv6) { | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 281 | #if defined(CONFIG_CPU_DCACHE_DISABLE) | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 282 | 		if (cachepolicy > CPOLICY_BUFFERED) | 
 | 283 | 			cachepolicy = CPOLICY_BUFFERED; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 284 | #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 285 | 		if (cachepolicy > CPOLICY_WRITETHROUGH) | 
 | 286 | 			cachepolicy = CPOLICY_WRITETHROUGH; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 287 | #endif | 
| Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 288 | 	} | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 289 | 	if (cpu_arch < CPU_ARCH_ARMv5) { | 
 | 290 | 		if (cachepolicy >= CPOLICY_WRITEALLOC) | 
 | 291 | 			cachepolicy = CPOLICY_WRITEBACK; | 
 | 292 | 		ecc_mask = 0; | 
 | 293 | 	} | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 294 | #ifdef CONFIG_SMP | 
 | 295 | 	cachepolicy = CPOLICY_WRITEALLOC; | 
 | 296 | #endif | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 297 |  | 
 | 298 | 	/* | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 299 | 	 * Strip out features not present on earlier architectures. | 
 | 300 | 	 * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those | 
 | 301 | 	 * without extended page tables don't have the 'Shared' bit. | 
| Lennert Buytenhek | 1ad77a8 | 2008-09-05 13:17:11 +0100 | [diff] [blame] | 302 | 	 */ | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 303 | 	if (cpu_arch < CPU_ARCH_ARMv5) | 
 | 304 | 		for (i = 0; i < ARRAY_SIZE(mem_types); i++) | 
 | 305 | 			mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); | 
 | 306 | 	if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) | 
 | 307 | 		for (i = 0; i < ARRAY_SIZE(mem_types); i++) | 
 | 308 | 			mem_types[i].prot_sect &= ~PMD_SECT_S; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 309 |  | 
 | 310 | 	/* | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 311 | 	 * ARMv5 and lower, bit 4 must be set for page tables (was: cache | 
 | 312 | 	 * "update-able on write" bit on ARM610).  However, Xscale and | 
 | 313 | 	 * Xscale3 require this bit to be cleared. | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 314 | 	 */ | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 315 | 	if (cpu_is_xscale() || cpu_is_xsc3()) { | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 316 | 		for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 317 | 			mem_types[i].prot_sect &= ~PMD_BIT4; | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 318 | 			mem_types[i].prot_l1 &= ~PMD_BIT4; | 
 | 319 | 		} | 
 | 320 | 	} else if (cpu_arch < CPU_ARCH_ARMv6) { | 
 | 321 | 		for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 322 | 			if (mem_types[i].prot_l1) | 
 | 323 | 				mem_types[i].prot_l1 |= PMD_BIT4; | 
| Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 324 | 			if (mem_types[i].prot_sect) | 
 | 325 | 				mem_types[i].prot_sect |= PMD_BIT4; | 
 | 326 | 		} | 
 | 327 | 	} | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 328 |  | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 329 | 	/* | 
 | 330 | 	 * Mark the device areas according to the CPU/architecture. | 
 | 331 | 	 */ | 
 | 332 | 	if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { | 
 | 333 | 		if (!cpu_is_xsc3()) { | 
 | 334 | 			/* | 
 | 335 | 			 * Mark device regions on ARMv6+ as execute-never | 
 | 336 | 			 * to prevent speculative instruction fetches. | 
 | 337 | 			 */ | 
 | 338 | 			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; | 
 | 339 | 			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; | 
 | 340 | 			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; | 
 | 341 | 			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; | 
 | 342 | 		} | 
 | 343 | 		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { | 
 | 344 | 			/* | 
 | 345 | 			 * For ARMv7 with TEX remapping, | 
 | 346 | 			 * - shared device is SXCB=1100 | 
 | 347 | 			 * - nonshared device is SXCB=0100 | 
 | 348 | 			 * - write combine device mem is SXCB=0001 | 
 | 349 | 			 * (Uncached Normal memory) | 
 | 350 | 			 */ | 
 | 351 | 			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); | 
 | 352 | 			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); | 
 | 353 | 			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; | 
 | 354 | 		} else if (cpu_is_xsc3()) { | 
 | 355 | 			/* | 
 | 356 | 			 * For Xscale3, | 
 | 357 | 			 * - shared device is TEXCB=00101 | 
 | 358 | 			 * - nonshared device is TEXCB=01000 | 
 | 359 | 			 * - write combine device mem is TEXCB=00100 | 
 | 360 | 			 * (Inner/Outer Uncacheable in xsc3 parlance) | 
 | 361 | 			 */ | 
 | 362 | 			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; | 
 | 363 | 			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); | 
 | 364 | 			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | 
 | 365 | 		} else { | 
 | 366 | 			/* | 
 | 367 | 			 * For ARMv6 and ARMv7 without TEX remapping, | 
 | 368 | 			 * - shared device is TEXCB=00001 | 
 | 369 | 			 * - nonshared device is TEXCB=01000 | 
 | 370 | 			 * - write combine device mem is TEXCB=00100 | 
 | 371 | 			 * (Uncached Normal in ARMv6 parlance). | 
 | 372 | 			 */ | 
 | 373 | 			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | 
 | 374 | 			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); | 
 | 375 | 			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | 
 | 376 | 		} | 
 | 377 | 	} else { | 
 | 378 | 		/* | 
 | 379 | 		 * On others, write combining is "Uncached/Buffered" | 
 | 380 | 		 */ | 
 | 381 | 		mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; | 
 | 382 | 	} | 
 | 383 |  | 
 | 384 | 	/* | 
 | 385 | 	 * Now deal with the memory-type mappings | 
 | 386 | 	 */ | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 387 | 	cp = &cache_policies[cachepolicy]; | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 388 | 	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 
 | 389 |  | 
 | 390 | #ifndef CONFIG_SMP | 
 | 391 | 	/* | 
 | 392 | 	 * Only use write-through for non-SMP systems | 
 | 393 | 	 */ | 
 | 394 | 	if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | 
 | 395 | 		vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | 
 | 396 | #endif | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 397 |  | 
 | 398 | 	/* | 
 | 399 | 	 * Enable CPU-specific coherency if supported. | 
 | 400 | 	 * (Only available on XSC3 at the moment.) | 
 | 401 | 	 */ | 
| Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 402 | 	if (arch_is_coherent() && cpu_is_xsc3()) | 
 | 403 | 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 404 |  | 
 | 405 | 	/* | 
 | 406 | 	 * ARMv6 and above have extended page tables. | 
 | 407 | 	 */ | 
 | 408 | 	if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | 
 | 409 | 		/* | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 410 | 		 * Mark cache clean areas and XIP ROM read only | 
 | 411 | 		 * from SVC mode and no access from userspace. | 
 | 412 | 		 */ | 
 | 413 | 		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 
 | 414 | 		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 
 | 415 | 		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 
 | 416 |  | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 417 | #ifdef CONFIG_SMP | 
 | 418 | 		/* | 
 | 419 | 		 * Mark memory with the "shared" attribute for SMP systems | 
 | 420 | 		 */ | 
 | 421 | 		user_pgprot |= L_PTE_SHARED; | 
 | 422 | 		kern_pgprot |= L_PTE_SHARED; | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 423 | 		vecs_pgprot |= L_PTE_SHARED; | 
| Russell King | 85b3cce | 2010-04-09 15:00:11 +0100 | [diff] [blame] | 424 | 		mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; | 
 | 425 | 		mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; | 
 | 426 | 		mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | 
 | 427 | 		mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 428 | 		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 
| Paul Walmsley | e4707dd | 2009-03-12 20:11:43 +0100 | [diff] [blame] | 429 | 		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 430 | #endif | 
 | 431 | 	} | 
 | 432 |  | 
| Paul Walmsley | e4707dd | 2009-03-12 20:11:43 +0100 | [diff] [blame] | 433 | 	/* | 
 | 434 | 	 * Non-cacheable Normal - intended for memory areas that must | 
 | 435 | 	 * not cause dirty cache line writebacks when used | 
 | 436 | 	 */ | 
 | 437 | 	if (cpu_arch >= CPU_ARCH_ARMv6) { | 
 | 438 | 		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { | 
 | 439 | 			/* Non-cacheable Normal is XCB = 001 */ | 
 | 440 | 			mem_types[MT_MEMORY_NONCACHED].prot_sect |= | 
 | 441 | 				PMD_SECT_BUFFERED; | 
 | 442 | 		} else { | 
 | 443 | 			/* For both ARMv6 and non-TEX-remapping ARMv7 */ | 
 | 444 | 			mem_types[MT_MEMORY_NONCACHED].prot_sect |= | 
 | 445 | 				PMD_SECT_TEX(1); | 
 | 446 | 		} | 
 | 447 | 	} else { | 
 | 448 | 		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; | 
 | 449 | 	} | 
 | 450 |  | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 451 | 	for (i = 0; i < 16; i++) { | 
 | 452 | 		unsigned long v = pgprot_val(protection_map[i]); | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 453 | 		protection_map[i] = __pgprot(v | user_pgprot); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 454 | 	} | 
 | 455 |  | 
| Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 456 | 	mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; | 
 | 457 | 	mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 458 |  | 
| Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 459 | 	pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 460 | 	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 
| Russell King | 6dc995a | 2009-12-24 10:16:21 +0000 | [diff] [blame] | 461 | 				 L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 462 |  | 
 | 463 | 	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 
 | 464 | 	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 
 | 465 | 	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | 
 | 466 | 	mem_types[MT_ROM].prot_sect |= cp->pmd; | 
 | 467 |  | 
 | 468 | 	switch (cp->pmd) { | 
 | 469 | 	case PMD_SECT_WT: | 
 | 470 | 		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; | 
 | 471 | 		break; | 
 | 472 | 	case PMD_SECT_WB: | 
 | 473 | 	case PMD_SECT_WBWA: | 
 | 474 | 		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; | 
 | 475 | 		break; | 
 | 476 | 	} | 
 | 477 | 	printk("Memory policy: ECC %sabled, Data cache %s\n", | 
 | 478 | 		ecc_mask ? "en" : "dis", cp->policy); | 
| Russell King | 2497f0a | 2007-04-21 09:59:44 +0100 | [diff] [blame] | 479 |  | 
 | 480 | 	for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | 
 | 481 | 		struct mem_type *t = &mem_types[i]; | 
 | 482 | 		if (t->prot_l1) | 
 | 483 | 			t->prot_l1 |= PMD_DOMAIN(t->domain); | 
 | 484 | 		if (t->prot_sect) | 
 | 485 | 			t->prot_sect |= PMD_DOMAIN(t->domain); | 
 | 486 | 	} | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 487 | } | 
 | 488 |  | 
 | 489 | #define vectors_base()	(vectors_high() ? 0xffff0000 : 0) | 
 | 490 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 491 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | 
 | 492 | 				  unsigned long end, unsigned long pfn, | 
 | 493 | 				  const struct mem_type *type) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 494 | { | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 495 | 	pte_t *pte; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 496 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 497 | 	if (pmd_none(*pmd)) { | 
 | 498 | 		pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); | 
 | 499 | 		__pmd_populate(pmd, __pa(pte) | type->prot_l1); | 
 | 500 | 	} | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 501 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 502 | 	pte = pte_offset_kernel(pmd, addr); | 
 | 503 | 	do { | 
| Russell King | 40d192b | 2008-09-06 21:15:56 +0100 | [diff] [blame] | 504 | 		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 505 | 		pfn++; | 
 | 506 | 	} while (pte++, addr += PAGE_SIZE, addr != end); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 507 | } | 
 | 508 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 509 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, | 
 | 510 | 				      unsigned long end, unsigned long phys, | 
 | 511 | 				      const struct mem_type *type) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 512 | { | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 513 | 	pmd_t *pmd = pmd_offset(pgd, addr); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 514 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 515 | 	/* | 
 | 516 | 	 * Try a section mapping - end, addr and phys must all be aligned | 
 | 517 | 	 * to a section boundary.  Note that PMDs refer to the individual | 
 | 518 | 	 * L1 entries, whereas PGDs refer to a group of L1 entries making | 
 | 519 | 	 * up one logical pointer to an L2 table. | 
 | 520 | 	 */ | 
 | 521 | 	if (((addr | end | phys) & ~SECTION_MASK) == 0) { | 
 | 522 | 		pmd_t *p = pmd; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 523 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 524 | 		if (addr & SECTION_SIZE) | 
 | 525 | 			pmd++; | 
 | 526 |  | 
 | 527 | 		do { | 
 | 528 | 			*pmd = __pmd(phys | type->prot_sect); | 
 | 529 | 			phys += SECTION_SIZE; | 
 | 530 | 		} while (pmd++, addr += SECTION_SIZE, addr != end); | 
 | 531 |  | 
 | 532 | 		flush_pmd_entry(p); | 
 | 533 | 	} else { | 
 | 534 | 		/* | 
 | 535 | 		 * No need to loop; pte's aren't interested in the | 
 | 536 | 		 * individual L1 entries. | 
 | 537 | 		 */ | 
 | 538 | 		alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 539 | 	} | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 540 | } | 
 | 541 |  | 
| Russell King | 4a56c1e | 2007-04-21 10:16:48 +0100 | [diff] [blame] | 542 | static void __init create_36bit_mapping(struct map_desc *md, | 
 | 543 | 					const struct mem_type *type) | 
 | 544 | { | 
 | 545 | 	unsigned long phys, addr, length, end; | 
 | 546 | 	pgd_t *pgd; | 
 | 547 |  | 
 | 548 | 	addr = md->virtual; | 
 | 549 | 	phys = (unsigned long)__pfn_to_phys(md->pfn); | 
 | 550 | 	length = PAGE_ALIGN(md->length); | 
 | 551 |  | 
 | 552 | 	if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { | 
 | 553 | 		printk(KERN_ERR "MM: CPU does not support supersection " | 
 | 554 | 		       "mapping for 0x%08llx at 0x%08lx\n", | 
 | 555 | 		       __pfn_to_phys((u64)md->pfn), addr); | 
 | 556 | 		return; | 
 | 557 | 	} | 
 | 558 |  | 
 | 559 | 	/* N.B.	ARMv6 supersections are only defined to work with domain 0. | 
 | 560 | 	 *	Since domain assignments can in fact be arbitrary, the | 
 | 561 | 	 *	'domain == 0' check below is required to insure that ARMv6 | 
 | 562 | 	 *	supersections are only allocated for domain 0 regardless | 
 | 563 | 	 *	of the actual domain assignments in use. | 
 | 564 | 	 */ | 
 | 565 | 	if (type->domain) { | 
 | 566 | 		printk(KERN_ERR "MM: invalid domain in supersection " | 
 | 567 | 		       "mapping for 0x%08llx at 0x%08lx\n", | 
 | 568 | 		       __pfn_to_phys((u64)md->pfn), addr); | 
 | 569 | 		return; | 
 | 570 | 	} | 
 | 571 |  | 
 | 572 | 	if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { | 
 | 573 | 		printk(KERN_ERR "MM: cannot create mapping for " | 
 | 574 | 		       "0x%08llx at 0x%08lx invalid alignment\n", | 
 | 575 | 		       __pfn_to_phys((u64)md->pfn), addr); | 
 | 576 | 		return; | 
 | 577 | 	} | 
 | 578 |  | 
 | 579 | 	/* | 
 | 580 | 	 * Shift bits [35:32] of address into bits [23:20] of PMD | 
 | 581 | 	 * (See ARMv6 spec). | 
 | 582 | 	 */ | 
 | 583 | 	phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); | 
 | 584 |  | 
 | 585 | 	pgd = pgd_offset_k(addr); | 
 | 586 | 	end = addr + length; | 
 | 587 | 	do { | 
 | 588 | 		pmd_t *pmd = pmd_offset(pgd, addr); | 
 | 589 | 		int i; | 
 | 590 |  | 
 | 591 | 		for (i = 0; i < 16; i++) | 
 | 592 | 			*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); | 
 | 593 |  | 
 | 594 | 		addr += SUPERSECTION_SIZE; | 
 | 595 | 		phys += SUPERSECTION_SIZE; | 
 | 596 | 		pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; | 
 | 597 | 	} while (addr != end); | 
 | 598 | } | 
 | 599 |  | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 600 | /* | 
 | 601 |  * Create the page directory entries and any necessary | 
 | 602 |  * page tables for the mapping specified by `md'.  We | 
 | 603 |  * are able to cope here with varying sizes and address | 
 | 604 |  * offsets, and we take full advantage of sections and | 
 | 605 |  * supersections. | 
 | 606 |  */ | 
| Russell King | a222712 | 2010-03-25 18:56:05 +0000 | [diff] [blame] | 607 | static void __init create_mapping(struct map_desc *md) | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 608 | { | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 609 | 	unsigned long phys, addr, length, end; | 
| Russell King | d5c9817 | 2007-04-21 10:05:32 +0100 | [diff] [blame] | 610 | 	const struct mem_type *type; | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 611 | 	pgd_t *pgd; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 612 |  | 
 | 613 | 	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 
 | 614 | 		printk(KERN_WARNING "BUG: not creating mapping for " | 
 | 615 | 		       "0x%08llx at 0x%08lx in user region\n", | 
 | 616 | 		       __pfn_to_phys((u64)md->pfn), md->virtual); | 
 | 617 | 		return; | 
 | 618 | 	} | 
 | 619 |  | 
 | 620 | 	if ((md->type == MT_DEVICE || md->type == MT_ROM) && | 
 | 621 | 	    md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | 
 | 622 | 		printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " | 
 | 623 | 		       "overlaps vmalloc space\n", | 
 | 624 | 		       __pfn_to_phys((u64)md->pfn), md->virtual); | 
 | 625 | 	} | 
 | 626 |  | 
| Russell King | d5c9817 | 2007-04-21 10:05:32 +0100 | [diff] [blame] | 627 | 	type = &mem_types[md->type]; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 628 |  | 
 | 629 | 	/* | 
 | 630 | 	 * Catch 36-bit addresses | 
 | 631 | 	 */ | 
| Russell King | 4a56c1e | 2007-04-21 10:16:48 +0100 | [diff] [blame] | 632 | 	if (md->pfn >= 0x100000) { | 
 | 633 | 		create_36bit_mapping(md, type); | 
 | 634 | 		return; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 635 | 	} | 
 | 636 |  | 
| Russell King | 7b9c7b4 | 2007-07-04 21:16:33 +0100 | [diff] [blame] | 637 | 	addr = md->virtual & PAGE_MASK; | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 638 | 	phys = (unsigned long)__pfn_to_phys(md->pfn); | 
| Russell King | 7b9c7b4 | 2007-07-04 21:16:33 +0100 | [diff] [blame] | 639 | 	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 640 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 641 | 	if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 642 | 		printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 
 | 643 | 		       "be mapped using pages, ignoring.\n", | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 644 | 		       __pfn_to_phys(md->pfn), addr); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 645 | 		return; | 
 | 646 | 	} | 
 | 647 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 648 | 	pgd = pgd_offset_k(addr); | 
 | 649 | 	end = addr + length; | 
 | 650 | 	do { | 
 | 651 | 		unsigned long next = pgd_addr_end(addr, end); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 652 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 653 | 		alloc_init_section(pgd, addr, next, phys, type); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 654 |  | 
| Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 655 | 		phys += next - addr; | 
 | 656 | 		addr = next; | 
 | 657 | 	} while (pgd++, addr != end); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 658 | } | 
 | 659 |  | 
 | 660 | /* | 
 | 661 |  * Create the architecture specific mappings | 
 | 662 |  */ | 
 | 663 | void __init iotable_init(struct map_desc *io_desc, int nr) | 
 | 664 | { | 
 | 665 | 	int i; | 
 | 666 |  | 
 | 667 | 	for (i = 0; i < nr; i++) | 
 | 668 | 		create_mapping(io_desc + i); | 
 | 669 | } | 
 | 670 |  | 
| Russell King | 6c5da7a | 2008-09-30 19:31:44 +0100 | [diff] [blame] | 671 | static unsigned long __initdata vmalloc_reserve = SZ_128M; | 
 | 672 |  | 
 | 673 | /* | 
 | 674 |  * vmalloc=size forces the vmalloc area to be exactly 'size' | 
 | 675 |  * bytes. This can be used to increase (or decrease) the vmalloc | 
 | 676 |  * area - the default is 128m. | 
 | 677 |  */ | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 678 | static int __init early_vmalloc(char *arg) | 
| Russell King | 6c5da7a | 2008-09-30 19:31:44 +0100 | [diff] [blame] | 679 | { | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 680 | 	vmalloc_reserve = memparse(arg, NULL); | 
| Russell King | 6c5da7a | 2008-09-30 19:31:44 +0100 | [diff] [blame] | 681 |  | 
 | 682 | 	if (vmalloc_reserve < SZ_16M) { | 
 | 683 | 		vmalloc_reserve = SZ_16M; | 
 | 684 | 		printk(KERN_WARNING | 
 | 685 | 			"vmalloc area too small, limiting to %luMB\n", | 
 | 686 | 			vmalloc_reserve >> 20); | 
 | 687 | 	} | 
| Nicolas Pitre | 9210807 | 2008-09-19 10:43:06 -0400 | [diff] [blame] | 688 |  | 
 | 689 | 	if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { | 
 | 690 | 		vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); | 
 | 691 | 		printk(KERN_WARNING | 
 | 692 | 			"vmalloc area is too big, limiting to %luMB\n", | 
 | 693 | 			vmalloc_reserve >> 20); | 
 | 694 | 	} | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 695 | 	return 0; | 
| Russell King | 6c5da7a | 2008-09-30 19:31:44 +0100 | [diff] [blame] | 696 | } | 
| Jeremy Kerr | 2b0d8c2 | 2010-01-11 23:17:34 +0100 | [diff] [blame] | 697 | early_param("vmalloc", early_vmalloc); | 
| Russell King | 6c5da7a | 2008-09-30 19:31:44 +0100 | [diff] [blame] | 698 |  | 
 | 699 | #define VMALLOC_MIN	(void *)(VMALLOC_END - vmalloc_reserve) | 
 | 700 |  | 
| Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 701 | static void __init sanity_check_meminfo(void) | 
| Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 702 | { | 
| Russell King | dde5828 | 2009-08-15 12:36:00 +0100 | [diff] [blame] | 703 | 	int i, j, highmem = 0; | 
| Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 704 |  | 
| Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 705 | 	for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | 
| Nicolas Pitre | a1bbaec | 2008-09-02 11:44:21 -0400 | [diff] [blame] | 706 | 		struct membank *bank = &meminfo.bank[j]; | 
 | 707 | 		*bank = meminfo.bank[i]; | 
 | 708 |  | 
 | 709 | #ifdef CONFIG_HIGHMEM | 
| Russell King | dde5828 | 2009-08-15 12:36:00 +0100 | [diff] [blame] | 710 | 		if (__va(bank->start) > VMALLOC_MIN || | 
 | 711 | 		    __va(bank->start) < (void *)PAGE_OFFSET) | 
 | 712 | 			highmem = 1; | 
 | 713 |  | 
 | 714 | 		bank->highmem = highmem; | 
 | 715 |  | 
| Nicolas Pitre | a1bbaec | 2008-09-02 11:44:21 -0400 | [diff] [blame] | 716 | 		/* | 
 | 717 | 		 * Split those memory banks which are partially overlapping | 
 | 718 | 		 * the vmalloc area greatly simplifying things later. | 
 | 719 | 		 */ | 
 | 720 | 		if (__va(bank->start) < VMALLOC_MIN && | 
 | 721 | 		    bank->size > VMALLOC_MIN - __va(bank->start)) { | 
 | 722 | 			if (meminfo.nr_banks >= NR_BANKS) { | 
 | 723 | 				printk(KERN_CRIT "NR_BANKS too low, " | 
 | 724 | 						 "ignoring high memory\n"); | 
 | 725 | 			} else { | 
 | 726 | 				memmove(bank + 1, bank, | 
 | 727 | 					(meminfo.nr_banks - i) * sizeof(*bank)); | 
 | 728 | 				meminfo.nr_banks++; | 
 | 729 | 				i++; | 
 | 730 | 				bank[1].size -= VMALLOC_MIN - __va(bank->start); | 
 | 731 | 				bank[1].start = __pa(VMALLOC_MIN - 1) + 1; | 
| Russell King | dde5828 | 2009-08-15 12:36:00 +0100 | [diff] [blame] | 732 | 				bank[1].highmem = highmem = 1; | 
| Nicolas Pitre | a1bbaec | 2008-09-02 11:44:21 -0400 | [diff] [blame] | 733 | 				j++; | 
 | 734 | 			} | 
 | 735 | 			bank->size = VMALLOC_MIN - __va(bank->start); | 
 | 736 | 		} | 
 | 737 | #else | 
| Russell King | 041d785 | 2009-09-27 17:40:42 +0100 | [diff] [blame] | 738 | 		bank->highmem = highmem; | 
 | 739 |  | 
| Nicolas Pitre | a1bbaec | 2008-09-02 11:44:21 -0400 | [diff] [blame] | 740 | 		/* | 
 | 741 | 		 * Check whether this memory bank would entirely overlap | 
 | 742 | 		 * the vmalloc area. | 
 | 743 | 		 */ | 
| Nicolas Pitre | 3fd9825 | 2009-02-18 22:29:22 +0100 | [diff] [blame] | 744 | 		if (__va(bank->start) >= VMALLOC_MIN || | 
| Mikael Pettersson | f0bba9f | 2009-03-28 19:18:05 +0100 | [diff] [blame] | 745 | 		    __va(bank->start) < (void *)PAGE_OFFSET) { | 
| Nicolas Pitre | a1bbaec | 2008-09-02 11:44:21 -0400 | [diff] [blame] | 746 | 			printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " | 
 | 747 | 			       "(vmalloc region overlap).\n", | 
 | 748 | 			       bank->start, bank->start + bank->size - 1); | 
 | 749 | 			continue; | 
 | 750 | 		} | 
 | 751 |  | 
 | 752 | 		/* | 
 | 753 | 		 * Check whether this memory bank would partially overlap | 
 | 754 | 		 * the vmalloc area. | 
 | 755 | 		 */ | 
 | 756 | 		if (__va(bank->start + bank->size) > VMALLOC_MIN || | 
 | 757 | 		    __va(bank->start + bank->size) < __va(bank->start)) { | 
 | 758 | 			unsigned long newsize = VMALLOC_MIN - __va(bank->start); | 
 | 759 | 			printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " | 
 | 760 | 			       "to -%.8lx (vmalloc region overlap).\n", | 
 | 761 | 			       bank->start, bank->start + bank->size - 1, | 
 | 762 | 			       bank->start + newsize - 1); | 
 | 763 | 			bank->size = newsize; | 
 | 764 | 		} | 
 | 765 | #endif | 
 | 766 | 		j++; | 
| Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 767 | 	} | 
| Russell King | e616c59 | 2009-09-27 20:55:43 +0100 | [diff] [blame] | 768 | #ifdef CONFIG_HIGHMEM | 
 | 769 | 	if (highmem) { | 
 | 770 | 		const char *reason = NULL; | 
 | 771 |  | 
 | 772 | 		if (cache_is_vipt_aliasing()) { | 
 | 773 | 			/* | 
 | 774 | 			 * Interactions between kmap and other mappings | 
 | 775 | 			 * make highmem support with aliasing VIPT caches | 
 | 776 | 			 * rather difficult. | 
 | 777 | 			 */ | 
 | 778 | 			reason = "with VIPT aliasing cache"; | 
 | 779 | #ifdef CONFIG_SMP | 
 | 780 | 		} else if (tlb_ops_need_broadcast()) { | 
 | 781 | 			/* | 
 | 782 | 			 * kmap_high needs to occasionally flush TLB entries, | 
 | 783 | 			 * however, if the TLB entries need to be broadcast | 
 | 784 | 			 * we may deadlock: | 
 | 785 | 			 *  kmap_high(irqs off)->flush_all_zero_pkmaps-> | 
 | 786 | 			 *  flush_tlb_kernel_range->smp_call_function_many | 
 | 787 | 			 *   (must not be called with irqs off) | 
 | 788 | 			 */ | 
 | 789 | 			reason = "without hardware TLB ops broadcasting"; | 
 | 790 | #endif | 
 | 791 | 		} | 
 | 792 | 		if (reason) { | 
 | 793 | 			printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", | 
 | 794 | 				reason); | 
 | 795 | 			while (j > 0 && meminfo.bank[j - 1].highmem) | 
 | 796 | 				j--; | 
 | 797 | 		} | 
 | 798 | 	} | 
 | 799 | #endif | 
| Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 800 | 	meminfo.nr_banks = j; | 
| Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 801 | } | 
 | 802 |  | 
| Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 803 | static inline void prepare_page_table(void) | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 804 | { | 
 | 805 | 	unsigned long addr; | 
 | 806 |  | 
 | 807 | 	/* | 
 | 808 | 	 * Clear out all the mappings below the kernel image. | 
 | 809 | 	 */ | 
| Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 810 | 	for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 811 | 		pmd_clear(pmd_off_k(addr)); | 
 | 812 |  | 
 | 813 | #ifdef CONFIG_XIP_KERNEL | 
 | 814 | 	/* The XIP kernel is mapped in the module area -- skip over it */ | 
| Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 815 | 	addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 816 | #endif | 
 | 817 | 	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) | 
 | 818 | 		pmd_clear(pmd_off_k(addr)); | 
 | 819 |  | 
 | 820 | 	/* | 
 | 821 | 	 * Clear out all the kernel space mappings, except for the first | 
 | 822 | 	 * memory bank, up to the end of the vmalloc region. | 
 | 823 | 	 */ | 
| Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 824 | 	for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 825 | 	     addr < VMALLOC_END; addr += PGDIR_SIZE) | 
 | 826 | 		pmd_clear(pmd_off_k(addr)); | 
 | 827 | } | 
 | 828 |  | 
 | 829 | /* | 
 | 830 |  * Reserve the various regions of node 0 | 
 | 831 |  */ | 
 | 832 | void __init reserve_node_zero(pg_data_t *pgdat) | 
 | 833 | { | 
 | 834 | 	unsigned long res_size = 0; | 
 | 835 |  | 
 | 836 | 	/* | 
 | 837 | 	 * Register the kernel text and data with bootmem. | 
 | 838 | 	 * Note that this can only be in node 0. | 
 | 839 | 	 */ | 
 | 840 | #ifdef CONFIG_XIP_KERNEL | 
| Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 841 | 	reserve_bootmem_node(pgdat, __pa(_data), _end - _data, | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 842 | 			BOOTMEM_DEFAULT); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 843 | #else | 
| Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 844 | 	reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 845 | 			BOOTMEM_DEFAULT); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 846 | #endif | 
 | 847 |  | 
 | 848 | 	/* | 
 | 849 | 	 * Reserve the page tables.  These are already in use, | 
 | 850 | 	 * and can only be in node 0. | 
 | 851 | 	 */ | 
 | 852 | 	reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 853 | 			     PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 854 |  | 
 | 855 | 	/* | 
 | 856 | 	 * Hmm... This should go elsewhere, but we really really need to | 
 | 857 | 	 * stop things allocating the low memory; ideally we need a better | 
 | 858 | 	 * implementation of GFP_DMA which does not assume that DMA-able | 
 | 859 | 	 * memory starts at zero. | 
 | 860 | 	 */ | 
 | 861 | 	if (machine_is_integrator() || machine_is_cintegrator()) | 
 | 862 | 		res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | 
 | 863 |  | 
 | 864 | 	/* | 
 | 865 | 	 * These should likewise go elsewhere.  They pre-reserve the | 
 | 866 | 	 * screen memory region at the start of main system memory. | 
 | 867 | 	 */ | 
 | 868 | 	if (machine_is_edb7211()) | 
 | 869 | 		res_size = 0x00020000; | 
 | 870 | 	if (machine_is_p720t()) | 
 | 871 | 		res_size = 0x00014000; | 
 | 872 |  | 
| Vasily Khoruzhick | 0741b7d | 2010-05-11 09:55:10 +0300 | [diff] [blame] | 873 | 	/* H1940, RX3715 and RX1950 need to reserve this for suspend */ | 
| Ben Dooks | bbf6f28 | 2006-12-07 20:47:58 +0100 | [diff] [blame] | 874 |  | 
| Vasily Khoruzhick | 0741b7d | 2010-05-11 09:55:10 +0300 | [diff] [blame] | 875 | 	if (machine_is_h1940() || machine_is_rx3715() | 
 | 876 | 		|| machine_is_rx1950()) { | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 877 | 		reserve_bootmem_node(pgdat, 0x30003000, 0x1000, | 
 | 878 | 				BOOTMEM_DEFAULT); | 
 | 879 | 		reserve_bootmem_node(pgdat, 0x30081000, 0x1000, | 
 | 880 | 				BOOTMEM_DEFAULT); | 
| Ben Dooks | 9073341 | 2006-12-06 01:50:24 +0100 | [diff] [blame] | 881 | 	} | 
 | 882 |  | 
| Marek Vasut | 81854f8 | 2009-03-28 12:37:42 +0100 | [diff] [blame] | 883 | 	if (machine_is_palmld() || machine_is_palmtx()) { | 
 | 884 | 		reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, | 
 | 885 | 				BOOTMEM_EXCLUSIVE); | 
 | 886 | 		reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, | 
 | 887 | 				BOOTMEM_EXCLUSIVE); | 
 | 888 | 	} | 
 | 889 |  | 
| Tomáš Čech | d0a92fd | 2009-09-11 13:57:02 +0200 | [diff] [blame] | 890 | 	if (machine_is_treo680() || machine_is_centro()) { | 
| Tomas 'Sleep_Walker' Cech | e6c3f4b | 2009-05-18 15:24:14 +0200 | [diff] [blame] | 891 | 		reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, | 
 | 892 | 				BOOTMEM_EXCLUSIVE); | 
 | 893 | 		reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, | 
 | 894 | 				BOOTMEM_EXCLUSIVE); | 
 | 895 | 	} | 
 | 896 |  | 
| Marek Vasut | 81854f8 | 2009-03-28 12:37:42 +0100 | [diff] [blame] | 897 | 	if (machine_is_palmt5()) | 
 | 898 | 		reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, | 
 | 899 | 				BOOTMEM_EXCLUSIVE); | 
 | 900 |  | 
| Linus Walleij | d98aac7 | 2009-04-27 10:21:46 +0100 | [diff] [blame] | 901 | 	/* | 
 | 902 | 	 * U300 - This platform family can share physical memory | 
 | 903 | 	 * between two ARM cpus, one running Linux and the other | 
 | 904 | 	 * running another OS. | 
 | 905 | 	 */ | 
 | 906 | 	if (machine_is_u300()) { | 
 | 907 | #ifdef CONFIG_MACH_U300_SINGLE_RAM | 
 | 908 | #if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) &&	\ | 
 | 909 | 	CONFIG_MACH_U300_2MB_ALIGNMENT_FIX | 
 | 910 | 		res_size = 0x00100000; | 
 | 911 | #endif | 
 | 912 | #endif | 
 | 913 | 	} | 
 | 914 |  | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 915 | #ifdef CONFIG_SA1111 | 
 | 916 | 	/* | 
 | 917 | 	 * Because of the SA1111 DMA bug, we want to preserve our | 
 | 918 | 	 * precious DMA-able memory... | 
 | 919 | 	 */ | 
 | 920 | 	res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | 
 | 921 | #endif | 
 | 922 | 	if (res_size) | 
| Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 923 | 		reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size, | 
 | 924 | 				BOOTMEM_DEFAULT); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 925 | } | 
 | 926 |  | 
 | 927 | /* | 
 | 928 |  * Set up device the mappings.  Since we clear out the page tables for all | 
 | 929 |  * mappings above VMALLOC_END, we will remove any debug device mappings. | 
 | 930 |  * This means you have to be careful how you debug this function, or any | 
 | 931 |  * called function.  This means you can't use any function or debugging | 
 | 932 |  * method which may touch any device, otherwise the kernel _will_ crash. | 
 | 933 |  */ | 
 | 934 | static void __init devicemaps_init(struct machine_desc *mdesc) | 
 | 935 | { | 
 | 936 | 	struct map_desc map; | 
 | 937 | 	unsigned long addr; | 
 | 938 | 	void *vectors; | 
 | 939 |  | 
 | 940 | 	/* | 
 | 941 | 	 * Allocate the vector page early. | 
 | 942 | 	 */ | 
 | 943 | 	vectors = alloc_bootmem_low_pages(PAGE_SIZE); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 944 |  | 
 | 945 | 	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | 
 | 946 | 		pmd_clear(pmd_off_k(addr)); | 
 | 947 |  | 
 | 948 | 	/* | 
 | 949 | 	 * Map the kernel if it is XIP. | 
 | 950 | 	 * It is always first in the modulearea. | 
 | 951 | 	 */ | 
 | 952 | #ifdef CONFIG_XIP_KERNEL | 
 | 953 | 	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); | 
| Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 954 | 	map.virtual = MODULES_VADDR; | 
| Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 955 | 	map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 956 | 	map.type = MT_ROM; | 
 | 957 | 	create_mapping(&map); | 
 | 958 | #endif | 
 | 959 |  | 
 | 960 | 	/* | 
 | 961 | 	 * Map the cache flushing regions. | 
 | 962 | 	 */ | 
 | 963 | #ifdef FLUSH_BASE | 
 | 964 | 	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); | 
 | 965 | 	map.virtual = FLUSH_BASE; | 
 | 966 | 	map.length = SZ_1M; | 
 | 967 | 	map.type = MT_CACHECLEAN; | 
 | 968 | 	create_mapping(&map); | 
 | 969 | #endif | 
 | 970 | #ifdef FLUSH_BASE_MINICACHE | 
 | 971 | 	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); | 
 | 972 | 	map.virtual = FLUSH_BASE_MINICACHE; | 
 | 973 | 	map.length = SZ_1M; | 
 | 974 | 	map.type = MT_MINICLEAN; | 
 | 975 | 	create_mapping(&map); | 
 | 976 | #endif | 
 | 977 |  | 
 | 978 | 	/* | 
 | 979 | 	 * Create a mapping for the machine vectors at the high-vectors | 
 | 980 | 	 * location (0xffff0000).  If we aren't using high-vectors, also | 
 | 981 | 	 * create a mapping at the low-vectors virtual address. | 
 | 982 | 	 */ | 
 | 983 | 	map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 
 | 984 | 	map.virtual = 0xffff0000; | 
 | 985 | 	map.length = PAGE_SIZE; | 
 | 986 | 	map.type = MT_HIGH_VECTORS; | 
 | 987 | 	create_mapping(&map); | 
 | 988 |  | 
 | 989 | 	if (!vectors_high()) { | 
 | 990 | 		map.virtual = 0; | 
 | 991 | 		map.type = MT_LOW_VECTORS; | 
 | 992 | 		create_mapping(&map); | 
 | 993 | 	} | 
 | 994 |  | 
 | 995 | 	/* | 
 | 996 | 	 * Ask the machine support to map in the statically mapped devices. | 
 | 997 | 	 */ | 
 | 998 | 	if (mdesc->map_io) | 
 | 999 | 		mdesc->map_io(); | 
 | 1000 |  | 
 | 1001 | 	/* | 
 | 1002 | 	 * Finally flush the caches and tlb to ensure that we're in a | 
 | 1003 | 	 * consistent state wrt the writebuffer.  This also ensures that | 
 | 1004 | 	 * any write-allocated cache lines in the vector page are written | 
 | 1005 | 	 * back.  After this point, we can start to touch devices again. | 
 | 1006 | 	 */ | 
 | 1007 | 	local_flush_tlb_all(); | 
 | 1008 | 	flush_cache_all(); | 
 | 1009 | } | 
 | 1010 |  | 
| Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 1011 | static void __init kmap_init(void) | 
 | 1012 | { | 
 | 1013 | #ifdef CONFIG_HIGHMEM | 
 | 1014 | 	pmd_t *pmd = pmd_off_k(PKMAP_BASE); | 
 | 1015 | 	pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); | 
 | 1016 | 	BUG_ON(!pmd_none(*pmd) || !pte); | 
 | 1017 | 	__pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE); | 
 | 1018 | 	pkmap_page_table = pte + PTRS_PER_PTE; | 
 | 1019 | #endif | 
 | 1020 | } | 
 | 1021 |  | 
| Russell King | a222712 | 2010-03-25 18:56:05 +0000 | [diff] [blame] | 1022 | static inline void map_memory_bank(struct membank *bank) | 
 | 1023 | { | 
 | 1024 | 	struct map_desc map; | 
 | 1025 |  | 
 | 1026 | 	map.pfn = bank_pfn_start(bank); | 
 | 1027 | 	map.virtual = __phys_to_virt(bank_phys_start(bank)); | 
 | 1028 | 	map.length = bank_phys_size(bank); | 
 | 1029 | 	map.type = MT_MEMORY; | 
 | 1030 |  | 
 | 1031 | 	create_mapping(&map); | 
 | 1032 | } | 
 | 1033 |  | 
 | 1034 | static void __init map_lowmem(void) | 
 | 1035 | { | 
 | 1036 | 	struct meminfo *mi = &meminfo; | 
 | 1037 | 	int i; | 
 | 1038 |  | 
 | 1039 | 	/* Map all the lowmem memory banks. */ | 
 | 1040 | 	for (i = 0; i < mi->nr_banks; i++) { | 
 | 1041 | 		struct membank *bank = &mi->bank[i]; | 
 | 1042 |  | 
 | 1043 | 		if (!bank->highmem) | 
 | 1044 | 			map_memory_bank(bank); | 
 | 1045 | 	} | 
 | 1046 | } | 
 | 1047 |  | 
| Russell King | ceb683d | 2010-03-25 18:47:20 +0000 | [diff] [blame] | 1048 | static int __init meminfo_cmp(const void *_a, const void *_b) | 
 | 1049 | { | 
 | 1050 | 	const struct membank *a = _a, *b = _b; | 
 | 1051 | 	long cmp = bank_pfn_start(a) - bank_pfn_start(b); | 
 | 1052 | 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; | 
 | 1053 | } | 
 | 1054 |  | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1055 | /* | 
 | 1056 |  * paging_init() sets up the page tables, initialises the zone memory | 
 | 1057 |  * maps, and sets up the zero page, bad page and bad page tables. | 
 | 1058 |  */ | 
| Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 1059 | void __init paging_init(struct machine_desc *mdesc) | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1060 | { | 
 | 1061 | 	void *zero_page; | 
 | 1062 |  | 
| Russell King | ceb683d | 2010-03-25 18:47:20 +0000 | [diff] [blame] | 1063 | 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); | 
 | 1064 |  | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1065 | 	build_mem_type_table(); | 
| Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 1066 | 	sanity_check_meminfo(); | 
 | 1067 | 	prepare_page_table(); | 
| Russell King | a222712 | 2010-03-25 18:56:05 +0000 | [diff] [blame] | 1068 | 	map_lowmem(); | 
| Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 1069 | 	bootmem_init(); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1070 | 	devicemaps_init(mdesc); | 
| Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 1071 | 	kmap_init(); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1072 |  | 
 | 1073 | 	top_pmd = pmd_off_k(0xffff0000); | 
 | 1074 |  | 
 | 1075 | 	/* | 
| Julia Lawall | 6ce1b87 | 2008-12-01 14:15:41 -0800 | [diff] [blame] | 1076 | 	 * allocate the zero page.  Note that this always succeeds and | 
 | 1077 | 	 * returns a zeroed result. | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1078 | 	 */ | 
 | 1079 | 	zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1080 | 	empty_zero_page = virt_to_page(zero_page); | 
| Russell King | 421fe93 | 2009-10-25 10:23:04 +0000 | [diff] [blame] | 1081 | 	__flush_dcache_page(NULL, empty_zero_page); | 
| Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1082 | } | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 1083 |  | 
 | 1084 | /* | 
 | 1085 |  * In order to soft-boot, we need to insert a 1:1 mapping in place of | 
 | 1086 |  * the user-mode pages.  This will then ensure that we have predictable | 
 | 1087 |  * results when turning the mmu off | 
 | 1088 |  */ | 
 | 1089 | void setup_mm_for_reboot(char mode) | 
 | 1090 | { | 
 | 1091 | 	unsigned long base_pmdval; | 
 | 1092 | 	pgd_t *pgd; | 
 | 1093 | 	int i; | 
 | 1094 |  | 
| Mika Westerberg | 3f2d4f5 | 2010-04-13 07:01:46 +0100 | [diff] [blame] | 1095 | 	/* | 
 | 1096 | 	 * We need to access to user-mode page tables here. For kernel threads | 
 | 1097 | 	 * we don't have any user-mode mappings so we use the context that we | 
 | 1098 | 	 * "borrowed". | 
 | 1099 | 	 */ | 
 | 1100 | 	pgd = current->active_mm->pgd; | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 1101 |  | 
 | 1102 | 	base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | 
 | 1103 | 	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | 
 | 1104 | 		base_pmdval |= PMD_BIT4; | 
 | 1105 |  | 
 | 1106 | 	for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { | 
 | 1107 | 		unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; | 
 | 1108 | 		pmd_t *pmd; | 
 | 1109 |  | 
 | 1110 | 		pmd = pmd_off(pgd, i << PGDIR_SHIFT); | 
 | 1111 | 		pmd[0] = __pmd(pmdval); | 
 | 1112 | 		pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | 
 | 1113 | 		flush_pmd_entry(pmd); | 
 | 1114 | 	} | 
| Tony Lindgren | ad3e6c0 | 2010-01-19 16:42:12 +0100 | [diff] [blame] | 1115 |  | 
 | 1116 | 	local_flush_tlb_all(); | 
| Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 1117 | } |