| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/i386/mm/init.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1995  Linus Torvalds | 
|  | 5 | * | 
|  | 6 | *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | #include <linux/config.h> | 
|  | 10 | #include <linux/module.h> | 
|  | 11 | #include <linux/signal.h> | 
|  | 12 | #include <linux/sched.h> | 
|  | 13 | #include <linux/kernel.h> | 
|  | 14 | #include <linux/errno.h> | 
|  | 15 | #include <linux/string.h> | 
|  | 16 | #include <linux/types.h> | 
|  | 17 | #include <linux/ptrace.h> | 
|  | 18 | #include <linux/mman.h> | 
|  | 19 | #include <linux/mm.h> | 
|  | 20 | #include <linux/hugetlb.h> | 
|  | 21 | #include <linux/swap.h> | 
|  | 22 | #include <linux/smp.h> | 
|  | 23 | #include <linux/init.h> | 
|  | 24 | #include <linux/highmem.h> | 
|  | 25 | #include <linux/pagemap.h> | 
|  | 26 | #include <linux/bootmem.h> | 
|  | 27 | #include <linux/slab.h> | 
|  | 28 | #include <linux/proc_fs.h> | 
|  | 29 | #include <linux/efi.h> | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 30 | #include <linux/memory_hotplug.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 |  | 
|  | 32 | #include <asm/processor.h> | 
|  | 33 | #include <asm/system.h> | 
|  | 34 | #include <asm/uaccess.h> | 
|  | 35 | #include <asm/pgtable.h> | 
|  | 36 | #include <asm/dma.h> | 
|  | 37 | #include <asm/fixmap.h> | 
|  | 38 | #include <asm/e820.h> | 
|  | 39 | #include <asm/apic.h> | 
|  | 40 | #include <asm/tlb.h> | 
|  | 41 | #include <asm/tlbflush.h> | 
|  | 42 | #include <asm/sections.h> | 
|  | 43 |  | 
|  | 44 | unsigned int __VMALLOC_RESERVE = 128 << 20; | 
|  | 45 |  | 
|  | 46 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
|  | 47 | unsigned long highstart_pfn, highend_pfn; | 
|  | 48 |  | 
|  | 49 | static int noinline do_test_wp_bit(void); | 
|  | 50 |  | 
|  | 51 | /* | 
|  | 52 | * Creates a middle page table and puts a pointer to it in the | 
|  | 53 | * given global directory entry. This only returns the gd entry | 
|  | 54 | * in non-PAE compilation mode, since the middle layer is folded. | 
|  | 55 | */ | 
|  | 56 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | 
|  | 57 | { | 
|  | 58 | pud_t *pud; | 
|  | 59 | pmd_t *pmd_table; | 
|  | 60 |  | 
|  | 61 | #ifdef CONFIG_X86_PAE | 
|  | 62 | pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 
|  | 63 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | 
|  | 64 | pud = pud_offset(pgd, 0); | 
|  | 65 | if (pmd_table != pmd_offset(pud, 0)) | 
|  | 66 | BUG(); | 
|  | 67 | #else | 
|  | 68 | pud = pud_offset(pgd, 0); | 
|  | 69 | pmd_table = pmd_offset(pud, 0); | 
|  | 70 | #endif | 
|  | 71 |  | 
|  | 72 | return pmd_table; | 
|  | 73 | } | 
|  | 74 |  | 
|  | 75 | /* | 
|  | 76 | * Create a page table and place a pointer to it in a middle page | 
|  | 77 | * directory entry. | 
|  | 78 | */ | 
|  | 79 | static pte_t * __init one_page_table_init(pmd_t *pmd) | 
|  | 80 | { | 
|  | 81 | if (pmd_none(*pmd)) { | 
|  | 82 | pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 
|  | 83 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); | 
|  | 84 | if (page_table != pte_offset_kernel(pmd, 0)) | 
|  | 85 | BUG(); | 
|  | 86 |  | 
|  | 87 | return page_table; | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | return pte_offset_kernel(pmd, 0); | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | /* | 
|  | 94 | * This function initializes a certain range of kernel virtual memory | 
|  | 95 | * with new bootmem page tables, everywhere page tables are missing in | 
|  | 96 | * the given range. | 
|  | 97 | */ | 
|  | 98 |  | 
|  | 99 | /* | 
|  | 100 | * NOTE: The pagetables are allocated contiguous on the physical space | 
|  | 101 | * so we can cache the place of the first one and move around without | 
|  | 102 | * checking the pgd every time. | 
|  | 103 | */ | 
|  | 104 | static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base) | 
|  | 105 | { | 
|  | 106 | pgd_t *pgd; | 
|  | 107 | pud_t *pud; | 
|  | 108 | pmd_t *pmd; | 
|  | 109 | int pgd_idx, pmd_idx; | 
|  | 110 | unsigned long vaddr; | 
|  | 111 |  | 
|  | 112 | vaddr = start; | 
|  | 113 | pgd_idx = pgd_index(vaddr); | 
|  | 114 | pmd_idx = pmd_index(vaddr); | 
|  | 115 | pgd = pgd_base + pgd_idx; | 
|  | 116 |  | 
|  | 117 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | 
|  | 118 | if (pgd_none(*pgd)) | 
|  | 119 | one_md_table_init(pgd); | 
|  | 120 | pud = pud_offset(pgd, vaddr); | 
|  | 121 | pmd = pmd_offset(pud, vaddr); | 
|  | 122 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { | 
|  | 123 | if (pmd_none(*pmd)) | 
|  | 124 | one_page_table_init(pmd); | 
|  | 125 |  | 
|  | 126 | vaddr += PMD_SIZE; | 
|  | 127 | } | 
|  | 128 | pmd_idx = 0; | 
|  | 129 | } | 
|  | 130 | } | 
|  | 131 |  | 
|  | 132 | static inline int is_kernel_text(unsigned long addr) | 
|  | 133 | { | 
|  | 134 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) | 
|  | 135 | return 1; | 
|  | 136 | return 0; | 
|  | 137 | } | 
|  | 138 |  | 
|  | 139 | /* | 
|  | 140 | * This maps the physical memory to kernel virtual address space, a total | 
|  | 141 | * of max_low_pfn pages, by creating page tables starting from address | 
|  | 142 | * PAGE_OFFSET. | 
|  | 143 | */ | 
|  | 144 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | 
|  | 145 | { | 
|  | 146 | unsigned long pfn; | 
|  | 147 | pgd_t *pgd; | 
|  | 148 | pmd_t *pmd; | 
|  | 149 | pte_t *pte; | 
|  | 150 | int pgd_idx, pmd_idx, pte_ofs; | 
|  | 151 |  | 
|  | 152 | pgd_idx = pgd_index(PAGE_OFFSET); | 
|  | 153 | pgd = pgd_base + pgd_idx; | 
|  | 154 | pfn = 0; | 
|  | 155 |  | 
|  | 156 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | 
|  | 157 | pmd = one_md_table_init(pgd); | 
|  | 158 | if (pfn >= max_low_pfn) | 
|  | 159 | continue; | 
|  | 160 | for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) { | 
|  | 161 | unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET; | 
|  | 162 |  | 
|  | 163 | /* Map with big pages if possible, otherwise create normal page tables. */ | 
|  | 164 | if (cpu_has_pse) { | 
|  | 165 | unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; | 
|  | 166 |  | 
|  | 167 | if (is_kernel_text(address) || is_kernel_text(address2)) | 
|  | 168 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); | 
|  | 169 | else | 
|  | 170 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE)); | 
|  | 171 | pfn += PTRS_PER_PTE; | 
|  | 172 | } else { | 
|  | 173 | pte = one_page_table_init(pmd); | 
|  | 174 |  | 
|  | 175 | for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) { | 
|  | 176 | if (is_kernel_text(address)) | 
|  | 177 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | 
|  | 178 | else | 
|  | 179 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL)); | 
|  | 180 | } | 
|  | 181 | } | 
|  | 182 | } | 
|  | 183 | } | 
|  | 184 | } | 
|  | 185 |  | 
|  | 186 | static inline int page_kills_ppro(unsigned long pagenr) | 
|  | 187 | { | 
|  | 188 | if (pagenr >= 0x70000 && pagenr <= 0x7003F) | 
|  | 189 | return 1; | 
|  | 190 | return 0; | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | extern int is_available_memory(efi_memory_desc_t *); | 
|  | 194 |  | 
| Dave Hansen | 5b505b9 | 2005-06-23 00:07:41 -0700 | [diff] [blame] | 195 | int page_is_ram(unsigned long pagenr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | { | 
|  | 197 | int i; | 
|  | 198 | unsigned long addr, end; | 
|  | 199 |  | 
|  | 200 | if (efi_enabled) { | 
|  | 201 | efi_memory_desc_t *md; | 
| Matt Tolentino | 7ae65fd | 2005-09-03 15:56:27 -0700 | [diff] [blame] | 202 | void *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 |  | 
| Matt Tolentino | 7ae65fd | 2005-09-03 15:56:27 -0700 | [diff] [blame] | 204 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 
|  | 205 | md = p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | if (!is_available_memory(md)) | 
|  | 207 | continue; | 
|  | 208 | addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT; | 
|  | 209 | end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT; | 
|  | 210 |  | 
|  | 211 | if ((pagenr >= addr) && (pagenr < end)) | 
|  | 212 | return 1; | 
|  | 213 | } | 
|  | 214 | return 0; | 
|  | 215 | } | 
|  | 216 |  | 
|  | 217 | for (i = 0; i < e820.nr_map; i++) { | 
|  | 218 |  | 
|  | 219 | if (e820.map[i].type != E820_RAM)	/* not usable memory */ | 
|  | 220 | continue; | 
|  | 221 | /* | 
|  | 222 | *	!!!FIXME!!! Some BIOSen report areas as RAM that | 
|  | 223 | *	are not. Notably the 640->1Mb area. We need a sanity | 
|  | 224 | *	check here. | 
|  | 225 | */ | 
|  | 226 | addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT; | 
|  | 227 | end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT; | 
|  | 228 | if  ((pagenr >= addr) && (pagenr < end)) | 
|  | 229 | return 1; | 
|  | 230 | } | 
|  | 231 | return 0; | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | #ifdef CONFIG_HIGHMEM | 
|  | 235 | pte_t *kmap_pte; | 
|  | 236 | pgprot_t kmap_prot; | 
|  | 237 |  | 
|  | 238 | #define kmap_get_fixmap_pte(vaddr)					\ | 
|  | 239 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr)) | 
|  | 240 |  | 
|  | 241 | static void __init kmap_init(void) | 
|  | 242 | { | 
|  | 243 | unsigned long kmap_vstart; | 
|  | 244 |  | 
|  | 245 | /* cache the first kmap pte */ | 
|  | 246 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | 
|  | 247 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | 
|  | 248 |  | 
|  | 249 | kmap_prot = PAGE_KERNEL; | 
|  | 250 | } | 
|  | 251 |  | 
|  | 252 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | 
|  | 253 | { | 
|  | 254 | pgd_t *pgd; | 
|  | 255 | pud_t *pud; | 
|  | 256 | pmd_t *pmd; | 
|  | 257 | pte_t *pte; | 
|  | 258 | unsigned long vaddr; | 
|  | 259 |  | 
|  | 260 | vaddr = PKMAP_BASE; | 
|  | 261 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | 
|  | 262 |  | 
|  | 263 | pgd = swapper_pg_dir + pgd_index(vaddr); | 
|  | 264 | pud = pud_offset(pgd, vaddr); | 
|  | 265 | pmd = pmd_offset(pud, vaddr); | 
|  | 266 | pte = pte_offset_kernel(pmd, vaddr); | 
|  | 267 | pkmap_page_table = pte; | 
|  | 268 | } | 
|  | 269 |  | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 270 | void __devinit free_new_highpage(struct page *page) | 
|  | 271 | { | 
|  | 272 | set_page_count(page, 1); | 
|  | 273 | __free_page(page); | 
|  | 274 | totalhigh_pages++; | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | { | 
|  | 279 | if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { | 
|  | 280 | ClearPageReserved(page); | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 281 | free_new_highpage(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | } else | 
|  | 283 | SetPageReserved(page); | 
|  | 284 | } | 
|  | 285 |  | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 286 | static int add_one_highpage_hotplug(struct page *page, unsigned long pfn) | 
|  | 287 | { | 
|  | 288 | free_new_highpage(page); | 
|  | 289 | totalram_pages++; | 
|  | 290 | #ifdef CONFIG_FLATMEM | 
|  | 291 | max_mapnr = max(pfn, max_mapnr); | 
|  | 292 | #endif | 
|  | 293 | num_physpages++; | 
|  | 294 | return 0; | 
|  | 295 | } | 
|  | 296 |  | 
|  | 297 | /* | 
|  | 298 | * Not currently handling the NUMA case. | 
|  | 299 | * Assuming single node and all memory that | 
|  | 300 | * has been added dynamically that would be | 
|  | 301 | * onlined here is in HIGHMEM | 
|  | 302 | */ | 
|  | 303 | void online_page(struct page *page) | 
|  | 304 | { | 
|  | 305 | ClearPageReserved(page); | 
|  | 306 | add_one_highpage_hotplug(page, page_to_pfn(page)); | 
|  | 307 | } | 
|  | 308 |  | 
|  | 309 |  | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 310 | #ifdef CONFIG_NUMA | 
|  | 311 | extern void set_highmem_pages_init(int); | 
|  | 312 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | static void __init set_highmem_pages_init(int bad_ppro) | 
|  | 314 | { | 
|  | 315 | int pfn; | 
|  | 316 | for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 317 | add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | totalram_pages += totalhigh_pages; | 
|  | 319 | } | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 320 | #endif /* CONFIG_FLATMEM */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 |  | 
|  | 322 | #else | 
|  | 323 | #define kmap_init() do { } while (0) | 
|  | 324 | #define permanent_kmaps_init(pgd_base) do { } while (0) | 
|  | 325 | #define set_highmem_pages_init(bad_ppro) do { } while (0) | 
|  | 326 | #endif /* CONFIG_HIGHMEM */ | 
|  | 327 |  | 
|  | 328 | unsigned long long __PAGE_KERNEL = _PAGE_KERNEL; | 
| Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 329 | EXPORT_SYMBOL(__PAGE_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; | 
|  | 331 |  | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 332 | #ifdef CONFIG_NUMA | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | extern void __init remap_numa_kva(void); | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 334 | #else | 
|  | 335 | #define remap_numa_kva() do {} while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | #endif | 
|  | 337 |  | 
|  | 338 | static void __init pagetable_init (void) | 
|  | 339 | { | 
|  | 340 | unsigned long vaddr; | 
|  | 341 | pgd_t *pgd_base = swapper_pg_dir; | 
|  | 342 |  | 
|  | 343 | #ifdef CONFIG_X86_PAE | 
|  | 344 | int i; | 
|  | 345 | /* Init entries of the first-level page table to the zero page */ | 
|  | 346 | for (i = 0; i < PTRS_PER_PGD; i++) | 
|  | 347 | set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); | 
|  | 348 | #endif | 
|  | 349 |  | 
|  | 350 | /* Enable PSE if available */ | 
|  | 351 | if (cpu_has_pse) { | 
|  | 352 | set_in_cr4(X86_CR4_PSE); | 
|  | 353 | } | 
|  | 354 |  | 
|  | 355 | /* Enable PGE if available */ | 
|  | 356 | if (cpu_has_pge) { | 
|  | 357 | set_in_cr4(X86_CR4_PGE); | 
|  | 358 | __PAGE_KERNEL |= _PAGE_GLOBAL; | 
|  | 359 | __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL; | 
|  | 360 | } | 
|  | 361 |  | 
|  | 362 | kernel_physical_mapping_init(pgd_base); | 
|  | 363 | remap_numa_kva(); | 
|  | 364 |  | 
|  | 365 | /* | 
|  | 366 | * Fixed mappings, only the page table structure has to be | 
|  | 367 | * created - mappings will be set by set_fixmap(): | 
|  | 368 | */ | 
|  | 369 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 
|  | 370 | page_table_range_init(vaddr, 0, pgd_base); | 
|  | 371 |  | 
|  | 372 | permanent_kmaps_init(pgd_base); | 
|  | 373 |  | 
|  | 374 | #ifdef CONFIG_X86_PAE | 
|  | 375 | /* | 
|  | 376 | * Add low memory identity-mappings - SMP needs it when | 
|  | 377 | * starting up on an AP from real-mode. In the non-PAE | 
|  | 378 | * case we already have these mappings through head.S. | 
|  | 379 | * All user-space mappings are explicitly cleared after | 
|  | 380 | * SMP startup. | 
|  | 381 | */ | 
| Zachary Amsden | c9b02a2 | 2005-09-03 15:56:40 -0700 | [diff] [blame] | 382 | set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | #endif | 
|  | 384 | } | 
|  | 385 |  | 
| Pavel Machek | 648be31 | 2005-06-25 14:55:09 -0700 | [diff] [blame] | 386 | #ifdef CONFIG_SOFTWARE_SUSPEND | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | /* | 
|  | 388 | * Swap suspend & friends need this for resume because things like the intel-agp | 
|  | 389 | * driver might have split up a kernel 4MB mapping. | 
|  | 390 | */ | 
|  | 391 | char __nosavedata swsusp_pg_dir[PAGE_SIZE] | 
|  | 392 | __attribute__ ((aligned (PAGE_SIZE))); | 
|  | 393 |  | 
|  | 394 | static inline void save_pg_dir(void) | 
|  | 395 | { | 
|  | 396 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); | 
|  | 397 | } | 
|  | 398 | #else | 
|  | 399 | static inline void save_pg_dir(void) | 
|  | 400 | { | 
|  | 401 | } | 
|  | 402 | #endif | 
|  | 403 |  | 
|  | 404 | void zap_low_mappings (void) | 
|  | 405 | { | 
|  | 406 | int i; | 
|  | 407 |  | 
|  | 408 | save_pg_dir(); | 
|  | 409 |  | 
|  | 410 | /* | 
|  | 411 | * Zap initial low-memory mappings. | 
|  | 412 | * | 
|  | 413 | * Note that "pgd_clear()" doesn't do it for | 
|  | 414 | * us, because pgd_clear() is a no-op on i386. | 
|  | 415 | */ | 
|  | 416 | for (i = 0; i < USER_PTRS_PER_PGD; i++) | 
|  | 417 | #ifdef CONFIG_X86_PAE | 
|  | 418 | set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); | 
|  | 419 | #else | 
|  | 420 | set_pgd(swapper_pg_dir+i, __pgd(0)); | 
|  | 421 | #endif | 
|  | 422 | flush_tlb_all(); | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | static int disable_nx __initdata = 0; | 
| Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 426 | u64 __supported_pte_mask __read_mostly = ~_PAGE_NX; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 |  | 
|  | 428 | /* | 
|  | 429 | * noexec = on|off | 
|  | 430 | * | 
|  | 431 | * Control non executable mappings. | 
|  | 432 | * | 
|  | 433 | * on      Enable | 
|  | 434 | * off     Disable | 
|  | 435 | */ | 
|  | 436 | void __init noexec_setup(const char *str) | 
|  | 437 | { | 
|  | 438 | if (!strncmp(str, "on",2) && cpu_has_nx) { | 
|  | 439 | __supported_pte_mask |= _PAGE_NX; | 
|  | 440 | disable_nx = 0; | 
|  | 441 | } else if (!strncmp(str,"off",3)) { | 
|  | 442 | disable_nx = 1; | 
|  | 443 | __supported_pte_mask &= ~_PAGE_NX; | 
|  | 444 | } | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | int nx_enabled = 0; | 
|  | 448 | #ifdef CONFIG_X86_PAE | 
|  | 449 |  | 
|  | 450 | static void __init set_nx(void) | 
|  | 451 | { | 
|  | 452 | unsigned int v[4], l, h; | 
|  | 453 |  | 
|  | 454 | if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { | 
|  | 455 | cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); | 
|  | 456 | if ((v[3] & (1 << 20)) && !disable_nx) { | 
|  | 457 | rdmsr(MSR_EFER, l, h); | 
|  | 458 | l |= EFER_NX; | 
|  | 459 | wrmsr(MSR_EFER, l, h); | 
|  | 460 | nx_enabled = 1; | 
|  | 461 | __supported_pte_mask |= _PAGE_NX; | 
|  | 462 | } | 
|  | 463 | } | 
|  | 464 | } | 
|  | 465 |  | 
|  | 466 | /* | 
|  | 467 | * Enables/disables executability of a given kernel page and | 
|  | 468 | * returns the previous setting. | 
|  | 469 | */ | 
|  | 470 | int __init set_kernel_exec(unsigned long vaddr, int enable) | 
|  | 471 | { | 
|  | 472 | pte_t *pte; | 
|  | 473 | int ret = 1; | 
|  | 474 |  | 
|  | 475 | if (!nx_enabled) | 
|  | 476 | goto out; | 
|  | 477 |  | 
|  | 478 | pte = lookup_address(vaddr); | 
|  | 479 | BUG_ON(!pte); | 
|  | 480 |  | 
|  | 481 | if (!pte_exec_kernel(*pte)) | 
|  | 482 | ret = 0; | 
|  | 483 |  | 
|  | 484 | if (enable) | 
|  | 485 | pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); | 
|  | 486 | else | 
|  | 487 | pte->pte_high |= 1 << (_PAGE_BIT_NX - 32); | 
|  | 488 | __flush_tlb_all(); | 
|  | 489 | out: | 
|  | 490 | return ret; | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | #endif | 
|  | 494 |  | 
|  | 495 | /* | 
|  | 496 | * paging_init() sets up the page tables - note that the first 8MB are | 
|  | 497 | * already mapped by head.S. | 
|  | 498 | * | 
|  | 499 | * This routines also unmaps the page at virtual kernel address 0, so | 
|  | 500 | * that we can trap those pesky NULL-reference errors in the kernel. | 
|  | 501 | */ | 
|  | 502 | void __init paging_init(void) | 
|  | 503 | { | 
|  | 504 | #ifdef CONFIG_X86_PAE | 
|  | 505 | set_nx(); | 
|  | 506 | if (nx_enabled) | 
|  | 507 | printk("NX (Execute Disable) protection: active\n"); | 
|  | 508 | #endif | 
|  | 509 |  | 
|  | 510 | pagetable_init(); | 
|  | 511 |  | 
|  | 512 | load_cr3(swapper_pg_dir); | 
|  | 513 |  | 
|  | 514 | #ifdef CONFIG_X86_PAE | 
|  | 515 | /* | 
|  | 516 | * We will bail out later - printk doesn't work right now so | 
|  | 517 | * the user would just see a hanging kernel. | 
|  | 518 | */ | 
|  | 519 | if (cpu_has_pae) | 
|  | 520 | set_in_cr4(X86_CR4_PAE); | 
|  | 521 | #endif | 
|  | 522 | __flush_tlb_all(); | 
|  | 523 |  | 
|  | 524 | kmap_init(); | 
|  | 525 | } | 
|  | 526 |  | 
|  | 527 | /* | 
|  | 528 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | 
|  | 529 | * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This | 
|  | 530 | * used to involve black magic jumps to work around some nasty CPU bugs, | 
|  | 531 | * but fortunately the switch to using exceptions got rid of all that. | 
|  | 532 | */ | 
|  | 533 |  | 
|  | 534 | static void __init test_wp_bit(void) | 
|  | 535 | { | 
|  | 536 | printk("Checking if this processor honours the WP bit even in supervisor mode... "); | 
|  | 537 |  | 
|  | 538 | /* Any page-aligned address will do, the test is non-destructive */ | 
|  | 539 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | 
|  | 540 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | 
|  | 541 | clear_fixmap(FIX_WP_TEST); | 
|  | 542 |  | 
|  | 543 | if (!boot_cpu_data.wp_works_ok) { | 
|  | 544 | printk("No.\n"); | 
|  | 545 | #ifdef CONFIG_X86_WP_WORKS_OK | 
|  | 546 | panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); | 
|  | 547 | #endif | 
|  | 548 | } else { | 
|  | 549 | printk("Ok.\n"); | 
|  | 550 | } | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | static void __init set_max_mapnr_init(void) | 
|  | 554 | { | 
|  | 555 | #ifdef CONFIG_HIGHMEM | 
|  | 556 | num_physpages = highend_pfn; | 
|  | 557 | #else | 
|  | 558 | num_physpages = max_low_pfn; | 
|  | 559 | #endif | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 560 | #ifdef CONFIG_FLATMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | max_mapnr = num_physpages; | 
|  | 562 | #endif | 
|  | 563 | } | 
|  | 564 |  | 
|  | 565 | static struct kcore_list kcore_mem, kcore_vmalloc; | 
|  | 566 |  | 
|  | 567 | void __init mem_init(void) | 
|  | 568 | { | 
|  | 569 | extern int ppro_with_ram_bug(void); | 
|  | 570 | int codesize, reservedpages, datasize, initsize; | 
|  | 571 | int tmp; | 
|  | 572 | int bad_ppro; | 
|  | 573 |  | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 574 | #ifdef CONFIG_FLATMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | if (!mem_map) | 
|  | 576 | BUG(); | 
|  | 577 | #endif | 
|  | 578 |  | 
|  | 579 | bad_ppro = ppro_with_ram_bug(); | 
|  | 580 |  | 
|  | 581 | #ifdef CONFIG_HIGHMEM | 
|  | 582 | /* check that fixmap and pkmap do not overlap */ | 
|  | 583 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | 
|  | 584 | printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n"); | 
|  | 585 | printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", | 
|  | 586 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); | 
|  | 587 | BUG(); | 
|  | 588 | } | 
|  | 589 | #endif | 
|  | 590 |  | 
|  | 591 | set_max_mapnr_init(); | 
|  | 592 |  | 
|  | 593 | #ifdef CONFIG_HIGHMEM | 
|  | 594 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | 
|  | 595 | #else | 
|  | 596 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | 
|  | 597 | #endif | 
|  | 598 |  | 
|  | 599 | /* this will put all low memory onto the freelists */ | 
|  | 600 | totalram_pages += free_all_bootmem(); | 
|  | 601 |  | 
|  | 602 | reservedpages = 0; | 
|  | 603 | for (tmp = 0; tmp < max_low_pfn; tmp++) | 
|  | 604 | /* | 
|  | 605 | * Only count reserved RAM pages | 
|  | 606 | */ | 
|  | 607 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | 
|  | 608 | reservedpages++; | 
|  | 609 |  | 
|  | 610 | set_highmem_pages_init(bad_ppro); | 
|  | 611 |  | 
|  | 612 | codesize =  (unsigned long) &_etext - (unsigned long) &_text; | 
|  | 613 | datasize =  (unsigned long) &_edata - (unsigned long) &_etext; | 
|  | 614 | initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin; | 
|  | 615 |  | 
|  | 616 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | 
|  | 617 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | 
|  | 618 | VMALLOC_END-VMALLOC_START); | 
|  | 619 |  | 
|  | 620 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | 
|  | 621 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 
|  | 622 | num_physpages << (PAGE_SHIFT-10), | 
|  | 623 | codesize >> 10, | 
|  | 624 | reservedpages << (PAGE_SHIFT-10), | 
|  | 625 | datasize >> 10, | 
|  | 626 | initsize >> 10, | 
|  | 627 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | 
|  | 628 | ); | 
|  | 629 |  | 
|  | 630 | #ifdef CONFIG_X86_PAE | 
|  | 631 | if (!cpu_has_pae) | 
|  | 632 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); | 
|  | 633 | #endif | 
|  | 634 | if (boot_cpu_data.wp_works_ok < 0) | 
|  | 635 | test_wp_bit(); | 
|  | 636 |  | 
|  | 637 | /* | 
|  | 638 | * Subtle. SMP is doing it's boot stuff late (because it has to | 
|  | 639 | * fork idle threads) - but it also needs low mappings for the | 
|  | 640 | * protected-mode entry to work. We zap these entries only after | 
|  | 641 | * the WP-bit has been tested. | 
|  | 642 | */ | 
|  | 643 | #ifndef CONFIG_SMP | 
|  | 644 | zap_low_mappings(); | 
|  | 645 | #endif | 
|  | 646 | } | 
|  | 647 |  | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 648 | /* | 
|  | 649 | * this is for the non-NUMA, single node SMP system case. | 
|  | 650 | * Specifically, in the case of x86, we will always add | 
|  | 651 | * memory to the highmem for now. | 
|  | 652 | */ | 
|  | 653 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
|  | 654 | int add_memory(u64 start, u64 size) | 
|  | 655 | { | 
|  | 656 | struct pglist_data *pgdata = &contig_page_data; | 
|  | 657 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; | 
|  | 658 | unsigned long start_pfn = start >> PAGE_SHIFT; | 
|  | 659 | unsigned long nr_pages = size >> PAGE_SHIFT; | 
|  | 660 |  | 
|  | 661 | return __add_pages(zone, start_pfn, nr_pages); | 
|  | 662 | } | 
|  | 663 |  | 
|  | 664 | int remove_memory(u64 start, u64 size) | 
|  | 665 | { | 
|  | 666 | return -EINVAL; | 
|  | 667 | } | 
|  | 668 | #endif | 
|  | 669 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 670 | kmem_cache_t *pgd_cache; | 
|  | 671 | kmem_cache_t *pmd_cache; | 
|  | 672 |  | 
|  | 673 | void __init pgtable_cache_init(void) | 
|  | 674 | { | 
|  | 675 | if (PTRS_PER_PMD > 1) { | 
|  | 676 | pmd_cache = kmem_cache_create("pmd", | 
|  | 677 | PTRS_PER_PMD*sizeof(pmd_t), | 
|  | 678 | PTRS_PER_PMD*sizeof(pmd_t), | 
|  | 679 | 0, | 
|  | 680 | pmd_ctor, | 
|  | 681 | NULL); | 
|  | 682 | if (!pmd_cache) | 
|  | 683 | panic("pgtable_cache_init(): cannot create pmd cache"); | 
|  | 684 | } | 
|  | 685 | pgd_cache = kmem_cache_create("pgd", | 
|  | 686 | PTRS_PER_PGD*sizeof(pgd_t), | 
|  | 687 | PTRS_PER_PGD*sizeof(pgd_t), | 
|  | 688 | 0, | 
|  | 689 | pgd_ctor, | 
|  | 690 | PTRS_PER_PMD == 1 ? pgd_dtor : NULL); | 
|  | 691 | if (!pgd_cache) | 
|  | 692 | panic("pgtable_cache_init(): Cannot create pgd cache"); | 
|  | 693 | } | 
|  | 694 |  | 
|  | 695 | /* | 
|  | 696 | * This function cannot be __init, since exceptions don't work in that | 
|  | 697 | * section.  Put this after the callers, so that it cannot be inlined. | 
|  | 698 | */ | 
|  | 699 | static int noinline do_test_wp_bit(void) | 
|  | 700 | { | 
|  | 701 | char tmp_reg; | 
|  | 702 | int flag; | 
|  | 703 |  | 
|  | 704 | __asm__ __volatile__( | 
|  | 705 | "	movb %0,%1	\n" | 
|  | 706 | "1:	movb %1,%0	\n" | 
|  | 707 | "	xorl %2,%2	\n" | 
|  | 708 | "2:			\n" | 
|  | 709 | ".section __ex_table,\"a\"\n" | 
|  | 710 | "	.align 4	\n" | 
|  | 711 | "	.long 1b,2b	\n" | 
|  | 712 | ".previous		\n" | 
|  | 713 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), | 
|  | 714 | "=q" (tmp_reg), | 
|  | 715 | "=r" (flag) | 
|  | 716 | :"2" (1) | 
|  | 717 | :"memory"); | 
|  | 718 |  | 
|  | 719 | return flag; | 
|  | 720 | } | 
|  | 721 |  | 
|  | 722 | void free_initmem(void) | 
|  | 723 | { | 
|  | 724 | unsigned long addr; | 
|  | 725 |  | 
|  | 726 | addr = (unsigned long)(&__init_begin); | 
|  | 727 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 
|  | 728 | ClearPageReserved(virt_to_page(addr)); | 
|  | 729 | set_page_count(virt_to_page(addr), 1); | 
|  | 730 | memset((void *)addr, 0xcc, PAGE_SIZE); | 
|  | 731 | free_page(addr); | 
|  | 732 | totalram_pages++; | 
|  | 733 | } | 
|  | 734 | printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10); | 
|  | 735 | } | 
|  | 736 |  | 
|  | 737 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 738 | void free_initrd_mem(unsigned long start, unsigned long end) | 
|  | 739 | { | 
|  | 740 | if (start < end) | 
|  | 741 | printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | 
|  | 742 | for (; start < end; start += PAGE_SIZE) { | 
|  | 743 | ClearPageReserved(virt_to_page(start)); | 
|  | 744 | set_page_count(virt_to_page(start), 1); | 
|  | 745 | free_page(start); | 
|  | 746 | totalram_pages++; | 
|  | 747 | } | 
|  | 748 | } | 
|  | 749 | #endif |