| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/i386/mm/init.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1995  Linus Torvalds | 
|  | 5 | * | 
|  | 6 | *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | 
|  | 7 | */ | 
|  | 8 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/module.h> | 
|  | 10 | #include <linux/signal.h> | 
|  | 11 | #include <linux/sched.h> | 
|  | 12 | #include <linux/kernel.h> | 
|  | 13 | #include <linux/errno.h> | 
|  | 14 | #include <linux/string.h> | 
|  | 15 | #include <linux/types.h> | 
|  | 16 | #include <linux/ptrace.h> | 
|  | 17 | #include <linux/mman.h> | 
|  | 18 | #include <linux/mm.h> | 
|  | 19 | #include <linux/hugetlb.h> | 
|  | 20 | #include <linux/swap.h> | 
|  | 21 | #include <linux/smp.h> | 
|  | 22 | #include <linux/init.h> | 
|  | 23 | #include <linux/highmem.h> | 
|  | 24 | #include <linux/pagemap.h> | 
| Jan Beulich | 6fb1475 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 25 | #include <linux/pfn.h> | 
| Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 26 | #include <linux/poison.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/bootmem.h> | 
|  | 28 | #include <linux/slab.h> | 
|  | 29 | #include <linux/proc_fs.h> | 
|  | 30 | #include <linux/efi.h> | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 31 | #include <linux/memory_hotplug.h> | 
| Adrian Bunk | 27d99f7 | 2005-11-13 16:06:51 -0800 | [diff] [blame] | 32 | #include <linux/initrd.h> | 
| Shaohua Li | 55b2355 | 2006-06-23 02:04:49 -0700 | [diff] [blame] | 33 | #include <linux/cpumask.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 |  | 
|  | 35 | #include <asm/processor.h> | 
|  | 36 | #include <asm/system.h> | 
|  | 37 | #include <asm/uaccess.h> | 
|  | 38 | #include <asm/pgtable.h> | 
|  | 39 | #include <asm/dma.h> | 
|  | 40 | #include <asm/fixmap.h> | 
|  | 41 | #include <asm/e820.h> | 
|  | 42 | #include <asm/apic.h> | 
|  | 43 | #include <asm/tlb.h> | 
|  | 44 | #include <asm/tlbflush.h> | 
|  | 45 | #include <asm/sections.h> | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 46 | #include <asm/paravirt.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
|  | 48 | unsigned int __VMALLOC_RESERVE = 128 << 20; | 
|  | 49 |  | 
|  | 50 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
|  | 51 | unsigned long highstart_pfn, highend_pfn; | 
|  | 52 |  | 
|  | 53 | static int noinline do_test_wp_bit(void); | 
|  | 54 |  | 
|  | 55 | /* | 
|  | 56 | * Creates a middle page table and puts a pointer to it in the | 
|  | 57 | * given global directory entry. This only returns the gd entry | 
|  | 58 | * in non-PAE compilation mode, since the middle layer is folded. | 
|  | 59 | */ | 
|  | 60 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | 
|  | 61 | { | 
|  | 62 | pud_t *pud; | 
|  | 63 | pmd_t *pmd_table; | 
|  | 64 |  | 
|  | 65 | #ifdef CONFIG_X86_PAE | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 66 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { | 
|  | 67 | pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 
|  | 68 |  | 
|  | 69 | paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); | 
|  | 70 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | 
|  | 71 | pud = pud_offset(pgd, 0); | 
|  | 72 | if (pmd_table != pmd_offset(pud, 0)) | 
|  | 73 | BUG(); | 
|  | 74 | } | 
|  | 75 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | pud = pud_offset(pgd, 0); | 
|  | 77 | pmd_table = pmd_offset(pud, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | return pmd_table; | 
|  | 79 | } | 
|  | 80 |  | 
|  | 81 | /* | 
|  | 82 | * Create a page table and place a pointer to it in a middle page | 
|  | 83 | * directory entry. | 
|  | 84 | */ | 
|  | 85 | static pte_t * __init one_page_table_init(pmd_t *pmd) | 
|  | 86 | { | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 87 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 89 |  | 
| Jeremy Fitzhardinge | fdb4c33 | 2007-07-17 18:37:03 -0700 | [diff] [blame] | 90 | paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 92 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | } | 
|  | 94 |  | 
|  | 95 | return pte_offset_kernel(pmd, 0); | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | /* | 
|  | 99 | * This function initializes a certain range of kernel virtual memory | 
|  | 100 | * with new bootmem page tables, everywhere page tables are missing in | 
|  | 101 | * the given range. | 
|  | 102 | */ | 
|  | 103 |  | 
|  | 104 | /* | 
|  | 105 | * NOTE: The pagetables are allocated contiguous on the physical space | 
|  | 106 | * so we can cache the place of the first one and move around without | 
|  | 107 | * checking the pgd every time. | 
|  | 108 | */ | 
|  | 109 | static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base) | 
|  | 110 | { | 
|  | 111 | pgd_t *pgd; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | pmd_t *pmd; | 
|  | 113 | int pgd_idx, pmd_idx; | 
|  | 114 | unsigned long vaddr; | 
|  | 115 |  | 
|  | 116 | vaddr = start; | 
|  | 117 | pgd_idx = pgd_index(vaddr); | 
|  | 118 | pmd_idx = pmd_index(vaddr); | 
|  | 119 | pgd = pgd_base + pgd_idx; | 
|  | 120 |  | 
|  | 121 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 122 | pmd = one_md_table_init(pgd); | 
|  | 123 | pmd = pmd + pmd_index(vaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 125 | one_page_table_init(pmd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 |  | 
|  | 127 | vaddr += PMD_SIZE; | 
|  | 128 | } | 
|  | 129 | pmd_idx = 0; | 
|  | 130 | } | 
|  | 131 | } | 
|  | 132 |  | 
|  | 133 | static inline int is_kernel_text(unsigned long addr) | 
|  | 134 | { | 
|  | 135 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) | 
|  | 136 | return 1; | 
|  | 137 | return 0; | 
|  | 138 | } | 
|  | 139 |  | 
|  | 140 | /* | 
|  | 141 | * This maps the physical memory to kernel virtual address space, a total | 
|  | 142 | * of max_low_pfn pages, by creating page tables starting from address | 
|  | 143 | * PAGE_OFFSET. | 
|  | 144 | */ | 
|  | 145 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | 
|  | 146 | { | 
|  | 147 | unsigned long pfn; | 
|  | 148 | pgd_t *pgd; | 
|  | 149 | pmd_t *pmd; | 
|  | 150 | pte_t *pte; | 
|  | 151 | int pgd_idx, pmd_idx, pte_ofs; | 
|  | 152 |  | 
|  | 153 | pgd_idx = pgd_index(PAGE_OFFSET); | 
|  | 154 | pgd = pgd_base + pgd_idx; | 
|  | 155 | pfn = 0; | 
|  | 156 |  | 
|  | 157 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | 
|  | 158 | pmd = one_md_table_init(pgd); | 
|  | 159 | if (pfn >= max_low_pfn) | 
|  | 160 | continue; | 
|  | 161 | for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) { | 
|  | 162 | unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET; | 
|  | 163 |  | 
|  | 164 | /* Map with big pages if possible, otherwise create normal page tables. */ | 
|  | 165 | if (cpu_has_pse) { | 
|  | 166 | unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | if (is_kernel_text(address) || is_kernel_text(address2)) | 
|  | 168 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); | 
|  | 169 | else | 
|  | 170 | set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE)); | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 171 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | pfn += PTRS_PER_PTE; | 
|  | 173 | } else { | 
|  | 174 | pte = one_page_table_init(pmd); | 
|  | 175 |  | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 176 | for (pte_ofs = 0; | 
|  | 177 | pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; | 
|  | 178 | pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { | 
|  | 179 | if (is_kernel_text(address)) | 
|  | 180 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | 
|  | 181 | else | 
|  | 182 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | } | 
|  | 184 | } | 
|  | 185 | } | 
|  | 186 | } | 
|  | 187 | } | 
|  | 188 |  | 
|  | 189 | static inline int page_kills_ppro(unsigned long pagenr) | 
|  | 190 | { | 
|  | 191 | if (pagenr >= 0x70000 && pagenr <= 0x7003F) | 
|  | 192 | return 1; | 
|  | 193 | return 0; | 
|  | 194 | } | 
|  | 195 |  | 
| Dave Hansen | 5b505b9 | 2005-06-23 00:07:41 -0700 | [diff] [blame] | 196 | int page_is_ram(unsigned long pagenr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | { | 
|  | 198 | int i; | 
|  | 199 | unsigned long addr, end; | 
|  | 200 |  | 
|  | 201 | if (efi_enabled) { | 
|  | 202 | efi_memory_desc_t *md; | 
| Matt Tolentino | 7ae65fd | 2005-09-03 15:56:27 -0700 | [diff] [blame] | 203 | void *p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 |  | 
| Matt Tolentino | 7ae65fd | 2005-09-03 15:56:27 -0700 | [diff] [blame] | 205 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 
|  | 206 | md = p; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | if (!is_available_memory(md)) | 
|  | 208 | continue; | 
|  | 209 | addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT; | 
|  | 210 | end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT; | 
|  | 211 |  | 
|  | 212 | if ((pagenr >= addr) && (pagenr < end)) | 
|  | 213 | return 1; | 
|  | 214 | } | 
|  | 215 | return 0; | 
|  | 216 | } | 
|  | 217 |  | 
|  | 218 | for (i = 0; i < e820.nr_map; i++) { | 
|  | 219 |  | 
|  | 220 | if (e820.map[i].type != E820_RAM)	/* not usable memory */ | 
|  | 221 | continue; | 
|  | 222 | /* | 
|  | 223 | *	!!!FIXME!!! Some BIOSen report areas as RAM that | 
|  | 224 | *	are not. Notably the 640->1Mb area. We need a sanity | 
|  | 225 | *	check here. | 
|  | 226 | */ | 
|  | 227 | addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT; | 
|  | 228 | end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT; | 
|  | 229 | if  ((pagenr >= addr) && (pagenr < end)) | 
|  | 230 | return 1; | 
|  | 231 | } | 
|  | 232 | return 0; | 
|  | 233 | } | 
|  | 234 |  | 
|  | 235 | #ifdef CONFIG_HIGHMEM | 
|  | 236 | pte_t *kmap_pte; | 
|  | 237 | pgprot_t kmap_prot; | 
|  | 238 |  | 
|  | 239 | #define kmap_get_fixmap_pte(vaddr)					\ | 
|  | 240 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr)) | 
|  | 241 |  | 
|  | 242 | static void __init kmap_init(void) | 
|  | 243 | { | 
|  | 244 | unsigned long kmap_vstart; | 
|  | 245 |  | 
|  | 246 | /* cache the first kmap pte */ | 
|  | 247 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | 
|  | 248 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | 
|  | 249 |  | 
|  | 250 | kmap_prot = PAGE_KERNEL; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | 
|  | 254 | { | 
|  | 255 | pgd_t *pgd; | 
|  | 256 | pud_t *pud; | 
|  | 257 | pmd_t *pmd; | 
|  | 258 | pte_t *pte; | 
|  | 259 | unsigned long vaddr; | 
|  | 260 |  | 
|  | 261 | vaddr = PKMAP_BASE; | 
|  | 262 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | 
|  | 263 |  | 
|  | 264 | pgd = swapper_pg_dir + pgd_index(vaddr); | 
|  | 265 | pud = pud_offset(pgd, vaddr); | 
|  | 266 | pmd = pmd_offset(pud, vaddr); | 
|  | 267 | pte = pte_offset_kernel(pmd, vaddr); | 
|  | 268 | pkmap_page_table = pte; | 
|  | 269 | } | 
|  | 270 |  | 
| Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 271 | static void __meminit free_new_highpage(struct page *page) | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 272 | { | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 273 | init_page_count(page); | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 274 | __free_page(page); | 
|  | 275 | totalhigh_pages++; | 
|  | 276 | } | 
|  | 277 |  | 
|  | 278 | void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | { | 
|  | 280 | if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { | 
|  | 281 | ClearPageReserved(page); | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 282 | free_new_highpage(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | } else | 
|  | 284 | SetPageReserved(page); | 
|  | 285 | } | 
|  | 286 |  | 
| Vivek Goyal | 0e0be25 | 2007-01-11 01:52:44 +0100 | [diff] [blame] | 287 | static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn) | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 288 | { | 
|  | 289 | free_new_highpage(page); | 
|  | 290 | totalram_pages++; | 
|  | 291 | #ifdef CONFIG_FLATMEM | 
|  | 292 | max_mapnr = max(pfn, max_mapnr); | 
|  | 293 | #endif | 
|  | 294 | num_physpages++; | 
|  | 295 | return 0; | 
|  | 296 | } | 
|  | 297 |  | 
|  | 298 | /* | 
|  | 299 | * Not currently handling the NUMA case. | 
|  | 300 | * Assuming single node and all memory that | 
|  | 301 | * has been added dynamically that would be | 
|  | 302 | * onlined here is in HIGHMEM | 
|  | 303 | */ | 
| Vivek Goyal | 0e0be25 | 2007-01-11 01:52:44 +0100 | [diff] [blame] | 304 | void __meminit online_page(struct page *page) | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 305 | { | 
|  | 306 | ClearPageReserved(page); | 
|  | 307 | add_one_highpage_hotplug(page, page_to_pfn(page)); | 
|  | 308 | } | 
|  | 309 |  | 
|  | 310 |  | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 311 | #ifdef CONFIG_NUMA | 
|  | 312 | extern void set_highmem_pages_init(int); | 
|  | 313 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | static void __init set_highmem_pages_init(int bad_ppro) | 
|  | 315 | { | 
|  | 316 | int pfn; | 
|  | 317 | for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 318 | add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | totalram_pages += totalhigh_pages; | 
|  | 320 | } | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 321 | #endif /* CONFIG_FLATMEM */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 |  | 
|  | 323 | #else | 
|  | 324 | #define kmap_init() do { } while (0) | 
|  | 325 | #define permanent_kmaps_init(pgd_base) do { } while (0) | 
|  | 326 | #define set_highmem_pages_init(bad_ppro) do { } while (0) | 
|  | 327 | #endif /* CONFIG_HIGHMEM */ | 
|  | 328 |  | 
|  | 329 | unsigned long long __PAGE_KERNEL = _PAGE_KERNEL; | 
| Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 330 | EXPORT_SYMBOL(__PAGE_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; | 
|  | 332 |  | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 333 | #ifdef CONFIG_NUMA | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | extern void __init remap_numa_kva(void); | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 335 | #else | 
|  | 336 | #define remap_numa_kva() do {} while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | #endif | 
|  | 338 |  | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 339 | void __init native_pagetable_setup_start(pgd_t *base) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | #ifdef CONFIG_X86_PAE | 
|  | 342 | int i; | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 343 |  | 
|  | 344 | /* | 
|  | 345 | * Init entries of the first-level page table to the | 
|  | 346 | * zero page, if they haven't already been set up. | 
|  | 347 | * | 
|  | 348 | * In a normal native boot, we'll be running on a | 
|  | 349 | * pagetable rooted in swapper_pg_dir, but not in PAE | 
|  | 350 | * mode, so this will end up clobbering the mappings | 
|  | 351 | * for the lower 24Mbytes of the address space, | 
|  | 352 | * without affecting the kernel address space. | 
|  | 353 | */ | 
|  | 354 | for (i = 0; i < USER_PTRS_PER_PGD; i++) | 
|  | 355 | set_pgd(&base[i], | 
|  | 356 | __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); | 
|  | 357 |  | 
|  | 358 | /* Make sure kernel address space is empty so that a pagetable | 
|  | 359 | will be allocated for it. */ | 
|  | 360 | memset(&base[USER_PTRS_PER_PGD], 0, | 
|  | 361 | KERNEL_PGD_PTRS * sizeof(pgd_t)); | 
| Zachary Amsden | c119ecc | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 362 | #else | 
|  | 363 | paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | #endif | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 365 | } | 
|  | 366 |  | 
|  | 367 | void __init native_pagetable_setup_done(pgd_t *base) | 
|  | 368 | { | 
|  | 369 | #ifdef CONFIG_X86_PAE | 
|  | 370 | /* | 
|  | 371 | * Add low memory identity-mappings - SMP needs it when | 
|  | 372 | * starting up on an AP from real-mode. In the non-PAE | 
|  | 373 | * case we already have these mappings through head.S. | 
|  | 374 | * All user-space mappings are explicitly cleared after | 
|  | 375 | * SMP startup. | 
|  | 376 | */ | 
|  | 377 | set_pgd(&base[0], base[USER_PTRS_PER_PGD]); | 
|  | 378 | #endif | 
|  | 379 | } | 
|  | 380 |  | 
|  | 381 | /* | 
|  | 382 | * Build a proper pagetable for the kernel mappings.  Up until this | 
|  | 383 | * point, we've been running on some set of pagetables constructed by | 
|  | 384 | * the boot process. | 
|  | 385 | * | 
|  | 386 | * If we're booting on native hardware, this will be a pagetable | 
|  | 387 | * constructed in arch/i386/kernel/head.S, and not running in PAE mode | 
|  | 388 | * (even if we'll end up running in PAE).  The root of the pagetable | 
|  | 389 | * will be swapper_pg_dir. | 
|  | 390 | * | 
|  | 391 | * If we're booting paravirtualized under a hypervisor, then there are | 
|  | 392 | * more options: we may already be running PAE, and the pagetable may | 
|  | 393 | * or may not be based in swapper_pg_dir.  In any case, | 
|  | 394 | * paravirt_pagetable_setup_start() will set up swapper_pg_dir | 
|  | 395 | * appropriately for the rest of the initialization to work. | 
|  | 396 | * | 
|  | 397 | * In general, pagetable_init() assumes that the pagetable may already | 
|  | 398 | * be partially populated, and so it avoids stomping on any existing | 
|  | 399 | * mappings. | 
|  | 400 | */ | 
|  | 401 | static void __init pagetable_init (void) | 
|  | 402 | { | 
|  | 403 | unsigned long vaddr, end; | 
|  | 404 | pgd_t *pgd_base = swapper_pg_dir; | 
|  | 405 |  | 
|  | 406 | paravirt_pagetable_setup_start(pgd_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 |  | 
|  | 408 | /* Enable PSE if available */ | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 409 | if (cpu_has_pse) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | set_in_cr4(X86_CR4_PSE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 |  | 
|  | 412 | /* Enable PGE if available */ | 
|  | 413 | if (cpu_has_pge) { | 
|  | 414 | set_in_cr4(X86_CR4_PGE); | 
|  | 415 | __PAGE_KERNEL |= _PAGE_GLOBAL; | 
|  | 416 | __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL; | 
|  | 417 | } | 
|  | 418 |  | 
|  | 419 | kernel_physical_mapping_init(pgd_base); | 
|  | 420 | remap_numa_kva(); | 
|  | 421 |  | 
|  | 422 | /* | 
|  | 423 | * Fixed mappings, only the page table structure has to be | 
|  | 424 | * created - mappings will be set by set_fixmap(): | 
|  | 425 | */ | 
|  | 426 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 427 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | 
|  | 428 | page_table_range_init(vaddr, end, pgd_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 |  | 
|  | 430 | permanent_kmaps_init(pgd_base); | 
|  | 431 |  | 
| Jeremy Fitzhardinge | b239fb2 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 432 | paravirt_pagetable_setup_done(pgd_base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | } | 
|  | 434 |  | 
| Shaohua Li | 55b2355 | 2006-06-23 02:04:49 -0700 | [diff] [blame] | 435 | #if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | /* | 
|  | 437 | * Swap suspend & friends need this for resume because things like the intel-agp | 
|  | 438 | * driver might have split up a kernel 4MB mapping. | 
|  | 439 | */ | 
|  | 440 | char __nosavedata swsusp_pg_dir[PAGE_SIZE] | 
|  | 441 | __attribute__ ((aligned (PAGE_SIZE))); | 
|  | 442 |  | 
|  | 443 | static inline void save_pg_dir(void) | 
|  | 444 | { | 
|  | 445 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); | 
|  | 446 | } | 
|  | 447 | #else | 
|  | 448 | static inline void save_pg_dir(void) | 
|  | 449 | { | 
|  | 450 | } | 
|  | 451 | #endif | 
|  | 452 |  | 
|  | 453 | void zap_low_mappings (void) | 
|  | 454 | { | 
|  | 455 | int i; | 
|  | 456 |  | 
|  | 457 | save_pg_dir(); | 
|  | 458 |  | 
|  | 459 | /* | 
|  | 460 | * Zap initial low-memory mappings. | 
|  | 461 | * | 
|  | 462 | * Note that "pgd_clear()" doesn't do it for | 
|  | 463 | * us, because pgd_clear() is a no-op on i386. | 
|  | 464 | */ | 
|  | 465 | for (i = 0; i < USER_PTRS_PER_PGD; i++) | 
|  | 466 | #ifdef CONFIG_X86_PAE | 
|  | 467 | set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); | 
|  | 468 | #else | 
|  | 469 | set_pgd(swapper_pg_dir+i, __pgd(0)); | 
|  | 470 | #endif | 
|  | 471 | flush_tlb_all(); | 
|  | 472 | } | 
|  | 473 |  | 
| Jan Beulich | d5321ab | 2007-07-21 17:10:26 +0200 | [diff] [blame] | 474 | int nx_enabled = 0; | 
|  | 475 |  | 
|  | 476 | #ifdef CONFIG_X86_PAE | 
|  | 477 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | static int disable_nx __initdata = 0; | 
| Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 479 | u64 __supported_pte_mask __read_mostly = ~_PAGE_NX; | 
| Jeremy Fitzhardinge | bdef40a | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 480 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 |  | 
|  | 482 | /* | 
|  | 483 | * noexec = on|off | 
|  | 484 | * | 
|  | 485 | * Control non executable mappings. | 
|  | 486 | * | 
|  | 487 | * on      Enable | 
|  | 488 | * off     Disable | 
|  | 489 | */ | 
| Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 490 | static int __init noexec_setup(char *str) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | { | 
| Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 492 | if (!str || !strcmp(str, "on")) { | 
|  | 493 | if (cpu_has_nx) { | 
|  | 494 | __supported_pte_mask |= _PAGE_NX; | 
|  | 495 | disable_nx = 0; | 
|  | 496 | } | 
|  | 497 | } else if (!strcmp(str,"off")) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | disable_nx = 1; | 
|  | 499 | __supported_pte_mask &= ~_PAGE_NX; | 
| Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 500 | } else | 
|  | 501 | return -EINVAL; | 
|  | 502 |  | 
|  | 503 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | } | 
| Rusty Russell | 1a3f239 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 505 | early_param("noexec", noexec_setup); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | static void __init set_nx(void) | 
|  | 508 | { | 
|  | 509 | unsigned int v[4], l, h; | 
|  | 510 |  | 
|  | 511 | if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { | 
|  | 512 | cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); | 
|  | 513 | if ((v[3] & (1 << 20)) && !disable_nx) { | 
|  | 514 | rdmsr(MSR_EFER, l, h); | 
|  | 515 | l |= EFER_NX; | 
|  | 516 | wrmsr(MSR_EFER, l, h); | 
|  | 517 | nx_enabled = 1; | 
|  | 518 | __supported_pte_mask |= _PAGE_NX; | 
|  | 519 | } | 
|  | 520 | } | 
|  | 521 | } | 
|  | 522 |  | 
|  | 523 | /* | 
|  | 524 | * Enables/disables executability of a given kernel page and | 
|  | 525 | * returns the previous setting. | 
|  | 526 | */ | 
|  | 527 | int __init set_kernel_exec(unsigned long vaddr, int enable) | 
|  | 528 | { | 
|  | 529 | pte_t *pte; | 
|  | 530 | int ret = 1; | 
|  | 531 |  | 
|  | 532 | if (!nx_enabled) | 
|  | 533 | goto out; | 
|  | 534 |  | 
|  | 535 | pte = lookup_address(vaddr); | 
|  | 536 | BUG_ON(!pte); | 
|  | 537 |  | 
|  | 538 | if (!pte_exec_kernel(*pte)) | 
|  | 539 | ret = 0; | 
|  | 540 |  | 
|  | 541 | if (enable) | 
|  | 542 | pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); | 
|  | 543 | else | 
|  | 544 | pte->pte_high |= 1 << (_PAGE_BIT_NX - 32); | 
| Zachary Amsden | 789e6ac | 2006-09-30 23:29:38 -0700 | [diff] [blame] | 545 | pte_update_defer(&init_mm, vaddr, pte); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | __flush_tlb_all(); | 
|  | 547 | out: | 
|  | 548 | return ret; | 
|  | 549 | } | 
|  | 550 |  | 
|  | 551 | #endif | 
|  | 552 |  | 
|  | 553 | /* | 
|  | 554 | * paging_init() sets up the page tables - note that the first 8MB are | 
|  | 555 | * already mapped by head.S. | 
|  | 556 | * | 
|  | 557 | * This routines also unmaps the page at virtual kernel address 0, so | 
|  | 558 | * that we can trap those pesky NULL-reference errors in the kernel. | 
|  | 559 | */ | 
|  | 560 | void __init paging_init(void) | 
|  | 561 | { | 
|  | 562 | #ifdef CONFIG_X86_PAE | 
|  | 563 | set_nx(); | 
|  | 564 | if (nx_enabled) | 
|  | 565 | printk("NX (Execute Disable) protection: active\n"); | 
|  | 566 | #endif | 
|  | 567 |  | 
|  | 568 | pagetable_init(); | 
|  | 569 |  | 
|  | 570 | load_cr3(swapper_pg_dir); | 
|  | 571 |  | 
|  | 572 | #ifdef CONFIG_X86_PAE | 
|  | 573 | /* | 
|  | 574 | * We will bail out later - printk doesn't work right now so | 
|  | 575 | * the user would just see a hanging kernel. | 
|  | 576 | */ | 
|  | 577 | if (cpu_has_pae) | 
|  | 578 | set_in_cr4(X86_CR4_PAE); | 
|  | 579 | #endif | 
|  | 580 | __flush_tlb_all(); | 
|  | 581 |  | 
|  | 582 | kmap_init(); | 
|  | 583 | } | 
|  | 584 |  | 
|  | 585 | /* | 
|  | 586 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | 
|  | 587 | * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This | 
|  | 588 | * used to involve black magic jumps to work around some nasty CPU bugs, | 
|  | 589 | * but fortunately the switch to using exceptions got rid of all that. | 
|  | 590 | */ | 
|  | 591 |  | 
|  | 592 | static void __init test_wp_bit(void) | 
|  | 593 | { | 
|  | 594 | printk("Checking if this processor honours the WP bit even in supervisor mode... "); | 
|  | 595 |  | 
|  | 596 | /* Any page-aligned address will do, the test is non-destructive */ | 
|  | 597 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | 
|  | 598 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | 
|  | 599 | clear_fixmap(FIX_WP_TEST); | 
|  | 600 |  | 
|  | 601 | if (!boot_cpu_data.wp_works_ok) { | 
|  | 602 | printk("No.\n"); | 
|  | 603 | #ifdef CONFIG_X86_WP_WORKS_OK | 
|  | 604 | panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); | 
|  | 605 | #endif | 
|  | 606 | } else { | 
|  | 607 | printk("Ok.\n"); | 
|  | 608 | } | 
|  | 609 | } | 
|  | 610 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | static struct kcore_list kcore_mem, kcore_vmalloc; | 
|  | 612 |  | 
|  | 613 | void __init mem_init(void) | 
|  | 614 | { | 
|  | 615 | extern int ppro_with_ram_bug(void); | 
|  | 616 | int codesize, reservedpages, datasize, initsize; | 
|  | 617 | int tmp; | 
|  | 618 | int bad_ppro; | 
|  | 619 |  | 
| Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 620 | #ifdef CONFIG_FLATMEM | 
| Eric Sesterhenn | 8d8f3cb | 2006-10-03 23:34:58 +0200 | [diff] [blame] | 621 | BUG_ON(!mem_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | #endif | 
|  | 623 |  | 
|  | 624 | bad_ppro = ppro_with_ram_bug(); | 
|  | 625 |  | 
|  | 626 | #ifdef CONFIG_HIGHMEM | 
|  | 627 | /* check that fixmap and pkmap do not overlap */ | 
|  | 628 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | 
|  | 629 | printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n"); | 
|  | 630 | printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", | 
|  | 631 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); | 
|  | 632 | BUG(); | 
|  | 633 | } | 
|  | 634 | #endif | 
|  | 635 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | /* this will put all low memory onto the freelists */ | 
|  | 637 | totalram_pages += free_all_bootmem(); | 
|  | 638 |  | 
|  | 639 | reservedpages = 0; | 
|  | 640 | for (tmp = 0; tmp < max_low_pfn; tmp++) | 
|  | 641 | /* | 
|  | 642 | * Only count reserved RAM pages | 
|  | 643 | */ | 
|  | 644 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | 
|  | 645 | reservedpages++; | 
|  | 646 |  | 
|  | 647 | set_highmem_pages_init(bad_ppro); | 
|  | 648 |  | 
|  | 649 | codesize =  (unsigned long) &_etext - (unsigned long) &_text; | 
|  | 650 | datasize =  (unsigned long) &_edata - (unsigned long) &_etext; | 
|  | 651 | initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin; | 
|  | 652 |  | 
|  | 653 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | 
|  | 654 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | 
|  | 655 | VMALLOC_END-VMALLOC_START); | 
|  | 656 |  | 
|  | 657 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | 
|  | 658 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 
|  | 659 | num_physpages << (PAGE_SHIFT-10), | 
|  | 660 | codesize >> 10, | 
|  | 661 | reservedpages << (PAGE_SHIFT-10), | 
|  | 662 | datasize >> 10, | 
|  | 663 | initsize >> 10, | 
|  | 664 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | 
|  | 665 | ); | 
|  | 666 |  | 
| Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 667 | #if 1 /* double-sanity-check paranoia */ | 
|  | 668 | printk("virtual kernel memory layout:\n" | 
|  | 669 | "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n" | 
|  | 670 | #ifdef CONFIG_HIGHMEM | 
|  | 671 | "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n" | 
|  | 672 | #endif | 
|  | 673 | "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n" | 
|  | 674 | "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n" | 
|  | 675 | "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n" | 
|  | 676 | "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n" | 
|  | 677 | "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n", | 
|  | 678 | FIXADDR_START, FIXADDR_TOP, | 
|  | 679 | (FIXADDR_TOP - FIXADDR_START) >> 10, | 
|  | 680 |  | 
|  | 681 | #ifdef CONFIG_HIGHMEM | 
|  | 682 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | 
|  | 683 | (LAST_PKMAP*PAGE_SIZE) >> 10, | 
|  | 684 | #endif | 
|  | 685 |  | 
|  | 686 | VMALLOC_START, VMALLOC_END, | 
|  | 687 | (VMALLOC_END - VMALLOC_START) >> 20, | 
|  | 688 |  | 
|  | 689 | (unsigned long)__va(0), (unsigned long)high_memory, | 
|  | 690 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | 
|  | 691 |  | 
|  | 692 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | 
|  | 693 | ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, | 
|  | 694 |  | 
|  | 695 | (unsigned long)&_etext, (unsigned long)&_edata, | 
|  | 696 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | 
|  | 697 |  | 
|  | 698 | (unsigned long)&_text, (unsigned long)&_etext, | 
|  | 699 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | 
|  | 700 |  | 
|  | 701 | #ifdef CONFIG_HIGHMEM | 
|  | 702 | BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | 
|  | 703 | BUG_ON(VMALLOC_END                     > PKMAP_BASE); | 
|  | 704 | #endif | 
|  | 705 | BUG_ON(VMALLOC_START                   > VMALLOC_END); | 
|  | 706 | BUG_ON((unsigned long)high_memory      > VMALLOC_START); | 
|  | 707 | #endif /* double-sanity-check paranoia */ | 
|  | 708 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | #ifdef CONFIG_X86_PAE | 
|  | 710 | if (!cpu_has_pae) | 
|  | 711 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); | 
|  | 712 | #endif | 
|  | 713 | if (boot_cpu_data.wp_works_ok < 0) | 
|  | 714 | test_wp_bit(); | 
|  | 715 |  | 
|  | 716 | /* | 
|  | 717 | * Subtle. SMP is doing it's boot stuff late (because it has to | 
|  | 718 | * fork idle threads) - but it also needs low mappings for the | 
|  | 719 | * protected-mode entry to work. We zap these entries only after | 
|  | 720 | * the WP-bit has been tested. | 
|  | 721 | */ | 
|  | 722 | #ifndef CONFIG_SMP | 
|  | 723 | zap_low_mappings(); | 
|  | 724 | #endif | 
|  | 725 | } | 
|  | 726 |  | 
| KAMEZAWA Hiroyuki | ad8f579 | 2006-05-20 15:00:03 -0700 | [diff] [blame] | 727 | #ifdef CONFIG_MEMORY_HOTPLUG | 
| Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 728 | int arch_add_memory(int nid, u64 start, u64 size) | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 729 | { | 
| Yasunori Goto | 7c7e942 | 2006-12-22 01:11:13 -0800 | [diff] [blame] | 730 | struct pglist_data *pgdata = NODE_DATA(nid); | 
| Christoph Lameter | 776ed98 | 2006-09-25 23:31:09 -0700 | [diff] [blame] | 731 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 732 | unsigned long start_pfn = start >> PAGE_SHIFT; | 
|  | 733 | unsigned long nr_pages = size >> PAGE_SHIFT; | 
|  | 734 |  | 
|  | 735 | return __add_pages(zone, start_pfn, nr_pages); | 
|  | 736 | } | 
|  | 737 |  | 
|  | 738 | int remove_memory(u64 start, u64 size) | 
|  | 739 | { | 
|  | 740 | return -EINVAL; | 
|  | 741 | } | 
| Yasunori Goto | 7c7e942 | 2006-12-22 01:11:13 -0800 | [diff] [blame] | 742 | EXPORT_SYMBOL_GPL(remove_memory); | 
| Andi Kleen | 9d99aaa | 2006-04-07 19:49:15 +0200 | [diff] [blame] | 743 | #endif | 
| Dave Hansen | 05039b9 | 2005-10-29 18:16:57 -0700 | [diff] [blame] | 744 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 745 | struct kmem_cache *pmd_cache; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 |  | 
|  | 747 | void __init pgtable_cache_init(void) | 
|  | 748 | { | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 749 | size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t); | 
|  | 750 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | if (PTRS_PER_PMD > 1) { | 
|  | 752 | pmd_cache = kmem_cache_create("pmd", | 
|  | 753 | PTRS_PER_PMD*sizeof(pmd_t), | 
|  | 754 | PTRS_PER_PMD*sizeof(pmd_t), | 
| Akinobu Mita | 0e6b9c9 | 2007-05-08 00:23:13 -0700 | [diff] [blame] | 755 | SLAB_PANIC, | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 756 | pmd_ctor); | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 757 | if (!SHARED_KERNEL_PMD) { | 
|  | 758 | /* If we're in PAE mode and have a non-shared | 
|  | 759 | kernel pmd, then the pgd size must be a | 
|  | 760 | page size.  This is because the pgd_list | 
|  | 761 | links through the page structure, so there | 
|  | 762 | can only be one pgd per page for this to | 
|  | 763 | work. */ | 
|  | 764 | pgd_size = PAGE_SIZE; | 
|  | 765 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 766 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | } | 
|  | 768 |  | 
|  | 769 | /* | 
|  | 770 | * This function cannot be __init, since exceptions don't work in that | 
|  | 771 | * section.  Put this after the callers, so that it cannot be inlined. | 
|  | 772 | */ | 
|  | 773 | static int noinline do_test_wp_bit(void) | 
|  | 774 | { | 
|  | 775 | char tmp_reg; | 
|  | 776 | int flag; | 
|  | 777 |  | 
|  | 778 | __asm__ __volatile__( | 
|  | 779 | "	movb %0,%1	\n" | 
|  | 780 | "1:	movb %1,%0	\n" | 
|  | 781 | "	xorl %2,%2	\n" | 
|  | 782 | "2:			\n" | 
|  | 783 | ".section __ex_table,\"a\"\n" | 
|  | 784 | "	.align 4	\n" | 
|  | 785 | "	.long 1b,2b	\n" | 
|  | 786 | ".previous		\n" | 
|  | 787 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), | 
|  | 788 | "=q" (tmp_reg), | 
|  | 789 | "=r" (flag) | 
|  | 790 | :"2" (1) | 
|  | 791 | :"memory"); | 
|  | 792 |  | 
|  | 793 | return flag; | 
|  | 794 | } | 
|  | 795 |  | 
| Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 796 | #ifdef CONFIG_DEBUG_RODATA | 
|  | 797 |  | 
| Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 798 | void mark_rodata_ro(void) | 
|  | 799 | { | 
| Jan Beulich | 6fb1475 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 800 | unsigned long start = PFN_ALIGN(_text); | 
|  | 801 | unsigned long size = PFN_ALIGN(_etext) - start; | 
| Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 802 |  | 
| Arjan van de Ven | 0864a4e | 2007-06-20 22:23:21 -0700 | [diff] [blame] | 803 | #ifndef CONFIG_KPROBES | 
| Jan Beulich | 6fb1475 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 804 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 805 | /* It must still be possible to apply SMP alternatives. */ | 
|  | 806 | if (num_possible_cpus() <= 1) | 
|  | 807 | #endif | 
|  | 808 | { | 
|  | 809 | change_page_attr(virt_to_page(start), | 
|  | 810 | size >> PAGE_SHIFT, PAGE_KERNEL_RX); | 
|  | 811 | printk("Write protecting the kernel text: %luk\n", size >> 10); | 
|  | 812 | } | 
| Arjan van de Ven | 0864a4e | 2007-06-20 22:23:21 -0700 | [diff] [blame] | 813 | #endif | 
| Jan Beulich | 6fb1475 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 814 | start += size; | 
|  | 815 | size = (unsigned long)__end_rodata - start; | 
|  | 816 | change_page_attr(virt_to_page(start), | 
|  | 817 | size >> PAGE_SHIFT, PAGE_KERNEL_RO); | 
|  | 818 | printk("Write protecting the kernel read-only data: %luk\n", | 
|  | 819 | size >> 10); | 
| Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 820 |  | 
|  | 821 | /* | 
|  | 822 | * change_page_attr() requires a global_flush_tlb() call after it. | 
|  | 823 | * We do this after the printk so that if something went wrong in the | 
|  | 824 | * change, the printk gets out at least to give a better debug hint | 
|  | 825 | * of who is the culprit. | 
|  | 826 | */ | 
|  | 827 | global_flush_tlb(); | 
|  | 828 | } | 
|  | 829 | #endif | 
|  | 830 |  | 
| Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 831 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | 
|  | 832 | { | 
|  | 833 | unsigned long addr; | 
|  | 834 |  | 
|  | 835 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 
| Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 836 | ClearPageReserved(virt_to_page(addr)); | 
|  | 837 | init_page_count(virt_to_page(addr)); | 
|  | 838 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | 
|  | 839 | free_page(addr); | 
| Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 840 | totalram_pages++; | 
|  | 841 | } | 
| Jan Beulich | 6fb1475 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 842 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | 
| Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 843 | } | 
|  | 844 |  | 
|  | 845 | void free_initmem(void) | 
|  | 846 | { | 
|  | 847 | free_init_pages("unused kernel memory", | 
| Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 848 | (unsigned long)(&__init_begin), | 
|  | 849 | (unsigned long)(&__init_end)); | 
| Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 850 | } | 
| Arjan van de Ven | 63aaf30 | 2006-01-06 00:12:02 -0800 | [diff] [blame] | 851 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 853 | void free_initrd_mem(unsigned long start, unsigned long end) | 
|  | 854 | { | 
| Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 855 | free_init_pages("initrd memory", start, end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 856 | } | 
|  | 857 | #endif | 
| Gerd Hoffmann | 9a0b581 | 2006-03-23 02:59:32 -0800 | [diff] [blame] | 858 |  |