| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Initialize MMU support. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 
|  | 5 | *	David Mosberger-Tang <davidm@hpl.hp.com> | 
|  | 6 | */ | 
|  | 7 | #include <linux/config.h> | 
|  | 8 | #include <linux/kernel.h> | 
|  | 9 | #include <linux/init.h> | 
|  | 10 |  | 
|  | 11 | #include <linux/bootmem.h> | 
|  | 12 | #include <linux/efi.h> | 
|  | 13 | #include <linux/elf.h> | 
|  | 14 | #include <linux/mm.h> | 
|  | 15 | #include <linux/mmzone.h> | 
|  | 16 | #include <linux/module.h> | 
|  | 17 | #include <linux/personality.h> | 
|  | 18 | #include <linux/reboot.h> | 
|  | 19 | #include <linux/slab.h> | 
|  | 20 | #include <linux/swap.h> | 
|  | 21 | #include <linux/proc_fs.h> | 
|  | 22 | #include <linux/bitops.h> | 
|  | 23 |  | 
|  | 24 | #include <asm/a.out.h> | 
|  | 25 | #include <asm/dma.h> | 
|  | 26 | #include <asm/ia32.h> | 
|  | 27 | #include <asm/io.h> | 
|  | 28 | #include <asm/machvec.h> | 
|  | 29 | #include <asm/numa.h> | 
|  | 30 | #include <asm/patch.h> | 
|  | 31 | #include <asm/pgalloc.h> | 
|  | 32 | #include <asm/sal.h> | 
|  | 33 | #include <asm/sections.h> | 
|  | 34 | #include <asm/system.h> | 
|  | 35 | #include <asm/tlb.h> | 
|  | 36 | #include <asm/uaccess.h> | 
|  | 37 | #include <asm/unistd.h> | 
|  | 38 | #include <asm/mca.h> | 
|  | 39 |  | 
|  | 40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
|  | 41 |  | 
| Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 42 | DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist); | 
|  | 43 | DEFINE_PER_CPU(long, __pgtable_quicklist_size); | 
|  | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | extern void ia64_tlb_init (void); | 
|  | 46 |  | 
|  | 47 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; | 
|  | 48 |  | 
|  | 49 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 
|  | 50 | unsigned long vmalloc_end = VMALLOC_END_INIT; | 
|  | 51 | EXPORT_SYMBOL(vmalloc_end); | 
|  | 52 | struct page *vmem_map; | 
|  | 53 | EXPORT_SYMBOL(vmem_map); | 
|  | 54 | #endif | 
|  | 55 |  | 
| Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 56 | struct page *zero_page_memmap_ptr;	/* map entry for zero page */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | EXPORT_SYMBOL(zero_page_memmap_ptr); | 
|  | 58 |  | 
| Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 59 | #define MIN_PGT_PAGES			25UL | 
| Tony Luck | e96c9b4 | 2005-04-25 13:16:59 -0700 | [diff] [blame] | 60 | #define MAX_PGT_FREES_PER_PASS		16L | 
| Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 61 | #define PGT_FRACTION_OF_NODE_MEM	16 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 |  | 
| Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 63 | static inline long | 
|  | 64 | max_pgt_pages(void) | 
|  | 65 | { | 
|  | 66 | u64 node_free_pages, max_pgt_pages; | 
|  | 67 |  | 
|  | 68 | #ifndef	CONFIG_NUMA | 
|  | 69 | node_free_pages = nr_free_pages(); | 
|  | 70 | #else | 
|  | 71 | node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id())); | 
|  | 72 | #endif | 
|  | 73 | max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; | 
|  | 74 | max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); | 
|  | 75 | return max_pgt_pages; | 
|  | 76 | } | 
|  | 77 |  | 
|  | 78 | static inline long | 
|  | 79 | min_pages_to_free(void) | 
|  | 80 | { | 
|  | 81 | long pages_to_free; | 
|  | 82 |  | 
|  | 83 | pages_to_free = pgtable_quicklist_size - max_pgt_pages(); | 
|  | 84 | pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS); | 
|  | 85 | return pages_to_free; | 
|  | 86 | } | 
|  | 87 |  | 
|  | 88 | void | 
|  | 89 | check_pgt_cache(void) | 
|  | 90 | { | 
|  | 91 | long pages_to_free; | 
|  | 92 |  | 
|  | 93 | if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES)) | 
|  | 94 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  | 
|  | 96 | preempt_disable(); | 
| Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 97 | while (unlikely((pages_to_free = min_pages_to_free()) > 0)) { | 
|  | 98 | while (pages_to_free--) { | 
|  | 99 | free_page((unsigned long)pgtable_quicklist_alloc()); | 
|  | 100 | } | 
|  | 101 | preempt_enable(); | 
|  | 102 | preempt_disable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | } | 
|  | 104 | preempt_enable(); | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | void | 
|  | 108 | lazy_mmu_prot_update (pte_t pte) | 
|  | 109 | { | 
|  | 110 | unsigned long addr; | 
|  | 111 | struct page *page; | 
|  | 112 |  | 
|  | 113 | if (!pte_exec(pte)) | 
|  | 114 | return;				/* not an executable page... */ | 
|  | 115 |  | 
|  | 116 | page = pte_page(pte); | 
|  | 117 | addr = (unsigned long) page_address(page); | 
|  | 118 |  | 
|  | 119 | if (test_bit(PG_arch_1, &page->flags)) | 
|  | 120 | return;				/* i-cache is already coherent with d-cache */ | 
|  | 121 |  | 
|  | 122 | flush_icache_range(addr, addr + PAGE_SIZE); | 
|  | 123 | set_bit(PG_arch_1, &page->flags);	/* mark page as clean */ | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | inline void | 
|  | 127 | ia64_set_rbs_bot (void) | 
|  | 128 | { | 
|  | 129 | unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16; | 
|  | 130 |  | 
|  | 131 | if (stack_size > MAX_USER_STACK_SIZE) | 
|  | 132 | stack_size = MAX_USER_STACK_SIZE; | 
|  | 133 | current->thread.rbs_bot = STACK_TOP - stack_size; | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | /* | 
|  | 137 | * This performs some platform-dependent address space initialization. | 
|  | 138 | * On IA-64, we want to setup the VM area for the register backing | 
|  | 139 | * store (which grows upwards) and install the gateway page which is | 
|  | 140 | * used for signal trampolines, etc. | 
|  | 141 | */ | 
|  | 142 | void | 
|  | 143 | ia64_init_addr_space (void) | 
|  | 144 | { | 
|  | 145 | struct vm_area_struct *vma; | 
|  | 146 |  | 
|  | 147 | ia64_set_rbs_bot(); | 
|  | 148 |  | 
|  | 149 | /* | 
|  | 150 | * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore | 
|  | 151 | * the problem.  When the process attempts to write to the register backing store | 
|  | 152 | * for the first time, it will get a SEGFAULT in this case. | 
|  | 153 | */ | 
|  | 154 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 
|  | 155 | if (vma) { | 
|  | 156 | memset(vma, 0, sizeof(*vma)); | 
|  | 157 | vma->vm_mm = current->mm; | 
|  | 158 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; | 
|  | 159 | vma->vm_end = vma->vm_start + PAGE_SIZE; | 
|  | 160 | vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; | 
| Hugh Dickins | 46dea3d | 2005-10-29 18:16:20 -0700 | [diff] [blame] | 161 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | down_write(¤t->mm->mmap_sem); | 
|  | 163 | if (insert_vm_struct(current->mm, vma)) { | 
|  | 164 | up_write(¤t->mm->mmap_sem); | 
|  | 165 | kmem_cache_free(vm_area_cachep, vma); | 
|  | 166 | return; | 
|  | 167 | } | 
|  | 168 | up_write(¤t->mm->mmap_sem); | 
|  | 169 | } | 
|  | 170 |  | 
|  | 171 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ | 
|  | 172 | if (!(current->personality & MMAP_PAGE_ZERO)) { | 
|  | 173 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 
|  | 174 | if (vma) { | 
|  | 175 | memset(vma, 0, sizeof(*vma)); | 
|  | 176 | vma->vm_mm = current->mm; | 
|  | 177 | vma->vm_end = PAGE_SIZE; | 
|  | 178 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); | 
|  | 179 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; | 
|  | 180 | down_write(¤t->mm->mmap_sem); | 
|  | 181 | if (insert_vm_struct(current->mm, vma)) { | 
|  | 182 | up_write(¤t->mm->mmap_sem); | 
|  | 183 | kmem_cache_free(vm_area_cachep, vma); | 
|  | 184 | return; | 
|  | 185 | } | 
|  | 186 | up_write(¤t->mm->mmap_sem); | 
|  | 187 | } | 
|  | 188 | } | 
|  | 189 | } | 
|  | 190 |  | 
|  | 191 | void | 
|  | 192 | free_initmem (void) | 
|  | 193 | { | 
|  | 194 | unsigned long addr, eaddr; | 
|  | 195 |  | 
|  | 196 | addr = (unsigned long) ia64_imva(__init_begin); | 
|  | 197 | eaddr = (unsigned long) ia64_imva(__init_end); | 
|  | 198 | while (addr < eaddr) { | 
|  | 199 | ClearPageReserved(virt_to_page(addr)); | 
|  | 200 | set_page_count(virt_to_page(addr), 1); | 
|  | 201 | free_page(addr); | 
|  | 202 | ++totalram_pages; | 
|  | 203 | addr += PAGE_SIZE; | 
|  | 204 | } | 
|  | 205 | printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", | 
|  | 206 | (__init_end - __init_begin) >> 10); | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | void | 
|  | 210 | free_initrd_mem (unsigned long start, unsigned long end) | 
|  | 211 | { | 
|  | 212 | struct page *page; | 
|  | 213 | /* | 
|  | 214 | * EFI uses 4KB pages while the kernel can use 4KB or bigger. | 
|  | 215 | * Thus EFI and the kernel may have different page sizes. It is | 
|  | 216 | * therefore possible to have the initrd share the same page as | 
|  | 217 | * the end of the kernel (given current setup). | 
|  | 218 | * | 
|  | 219 | * To avoid freeing/using the wrong page (kernel sized) we: | 
|  | 220 | *	- align up the beginning of initrd | 
|  | 221 | *	- align down the end of initrd | 
|  | 222 | * | 
|  | 223 | *  |             | | 
|  | 224 | *  |=============| a000 | 
|  | 225 | *  |             | | 
|  | 226 | *  |             | | 
|  | 227 | *  |             | 9000 | 
|  | 228 | *  |/////////////| | 
|  | 229 | *  |/////////////| | 
|  | 230 | *  |=============| 8000 | 
|  | 231 | *  |///INITRD////| | 
|  | 232 | *  |/////////////| | 
|  | 233 | *  |/////////////| 7000 | 
|  | 234 | *  |             | | 
|  | 235 | *  |KKKKKKKKKKKKK| | 
|  | 236 | *  |=============| 6000 | 
|  | 237 | *  |KKKKKKKKKKKKK| | 
|  | 238 | *  |KKKKKKKKKKKKK| | 
|  | 239 | *  K=kernel using 8KB pages | 
|  | 240 | * | 
|  | 241 | * In this example, we must free page 8000 ONLY. So we must align up | 
|  | 242 | * initrd_start and keep initrd_end as is. | 
|  | 243 | */ | 
|  | 244 | start = PAGE_ALIGN(start); | 
|  | 245 | end = end & PAGE_MASK; | 
|  | 246 |  | 
|  | 247 | if (start < end) | 
|  | 248 | printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); | 
|  | 249 |  | 
|  | 250 | for (; start < end; start += PAGE_SIZE) { | 
|  | 251 | if (!virt_addr_valid(start)) | 
|  | 252 | continue; | 
|  | 253 | page = virt_to_page(start); | 
|  | 254 | ClearPageReserved(page); | 
|  | 255 | set_page_count(page, 1); | 
|  | 256 | free_page(start); | 
|  | 257 | ++totalram_pages; | 
|  | 258 | } | 
|  | 259 | } | 
|  | 260 |  | 
|  | 261 | /* | 
|  | 262 | * This installs a clean page in the kernel's page table. | 
|  | 263 | */ | 
|  | 264 | struct page * | 
|  | 265 | put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) | 
|  | 266 | { | 
|  | 267 | pgd_t *pgd; | 
|  | 268 | pud_t *pud; | 
|  | 269 | pmd_t *pmd; | 
|  | 270 | pte_t *pte; | 
|  | 271 |  | 
|  | 272 | if (!PageReserved(page)) | 
|  | 273 | printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n", | 
|  | 274 | page_address(page)); | 
|  | 275 |  | 
|  | 276 | pgd = pgd_offset_k(address);		/* note: this is NOT pgd_offset()! */ | 
|  | 277 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | { | 
|  | 279 | pud = pud_alloc(&init_mm, pgd, address); | 
|  | 280 | if (!pud) | 
|  | 281 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | pmd = pmd_alloc(&init_mm, pud, address); | 
|  | 283 | if (!pmd) | 
|  | 284 | goto out; | 
| Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 285 | pte = pte_alloc_kernel(pmd, address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | if (!pte) | 
|  | 287 | goto out; | 
| Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 288 | if (!pte_none(*pte)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | set_pte(pte, mk_pte(page, pgprot)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | } | 
| Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 292 | out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | /* no need for flush_tlb */ | 
|  | 294 | return page; | 
|  | 295 | } | 
|  | 296 |  | 
|  | 297 | static void | 
|  | 298 | setup_gate (void) | 
|  | 299 | { | 
|  | 300 | struct page *page; | 
|  | 301 |  | 
|  | 302 | /* | 
| David Mosberger-Tang | ad597bd | 2005-06-08 10:45:00 -0700 | [diff] [blame] | 303 | * Map the gate page twice: once read-only to export the ELF | 
|  | 304 | * headers etc. and once execute-only page to enable | 
|  | 305 | * privilege-promotion via "epc": | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | */ | 
|  | 307 | page = virt_to_page(ia64_imva(__start_gate_section)); | 
|  | 308 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); | 
|  | 309 | #ifdef HAVE_BUGGY_SEGREL | 
|  | 310 | page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); | 
|  | 311 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); | 
|  | 312 | #else | 
|  | 313 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); | 
| David Mosberger-Tang | ad597bd | 2005-06-08 10:45:00 -0700 | [diff] [blame] | 314 | /* Fill in the holes (if any) with read-only zero pages: */ | 
|  | 315 | { | 
|  | 316 | unsigned long addr; | 
|  | 317 |  | 
|  | 318 | for (addr = GATE_ADDR + PAGE_SIZE; | 
|  | 319 | addr < GATE_ADDR + PERCPU_PAGE_SIZE; | 
|  | 320 | addr += PAGE_SIZE) | 
|  | 321 | { | 
|  | 322 | put_kernel_page(ZERO_PAGE(0), addr, | 
|  | 323 | PAGE_READONLY); | 
|  | 324 | put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, | 
|  | 325 | PAGE_READONLY); | 
|  | 326 | } | 
|  | 327 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | #endif | 
|  | 329 | ia64_patch_gate(); | 
|  | 330 | } | 
|  | 331 |  | 
|  | 332 | void __devinit | 
|  | 333 | ia64_mmu_init (void *my_cpu_data) | 
|  | 334 | { | 
|  | 335 | unsigned long psr, pta, impl_va_bits; | 
|  | 336 | extern void __devinit tlb_init (void); | 
|  | 337 |  | 
|  | 338 | #ifdef CONFIG_DISABLE_VHPT | 
|  | 339 | #	define VHPT_ENABLE_BIT	0 | 
|  | 340 | #else | 
|  | 341 | #	define VHPT_ENABLE_BIT	1 | 
|  | 342 | #endif | 
|  | 343 |  | 
|  | 344 | /* Pin mapping for percpu area into TLB */ | 
|  | 345 | psr = ia64_clear_ic(); | 
|  | 346 | ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, | 
|  | 347 | pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), | 
|  | 348 | PERCPU_PAGE_SHIFT); | 
|  | 349 |  | 
|  | 350 | ia64_set_psr(psr); | 
|  | 351 | ia64_srlz_i(); | 
|  | 352 |  | 
|  | 353 | /* | 
|  | 354 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped | 
|  | 355 | * address space.  The IA-64 architecture guarantees that at least 50 bits of | 
|  | 356 | * virtual address space are implemented but if we pick a large enough page size | 
|  | 357 | * (e.g., 64KB), the mapped address space is big enough that it will overlap with | 
|  | 358 | * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages, | 
|  | 359 | * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a | 
|  | 360 | * problem in practice.  Alternatively, we could truncate the top of the mapped | 
|  | 361 | * address space to not permit mappings that would overlap with the VMLPT. | 
|  | 362 | * --davidm 00/12/06 | 
|  | 363 | */ | 
|  | 364 | #	define pte_bits			3 | 
|  | 365 | #	define mapped_space_bits	(3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) | 
|  | 366 | /* | 
|  | 367 | * The virtual page table has to cover the entire implemented address space within | 
|  | 368 | * a region even though not all of this space may be mappable.  The reason for | 
|  | 369 | * this is that the Access bit and Dirty bit fault handlers perform | 
|  | 370 | * non-speculative accesses to the virtual page table, so the address range of the | 
|  | 371 | * virtual page table itself needs to be covered by virtual page table. | 
|  | 372 | */ | 
|  | 373 | #	define vmlpt_bits		(impl_va_bits - PAGE_SHIFT + pte_bits) | 
|  | 374 | #	define POW2(n)			(1ULL << (n)) | 
|  | 375 |  | 
|  | 376 | impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); | 
|  | 377 |  | 
|  | 378 | if (impl_va_bits < 51 || impl_va_bits > 61) | 
|  | 379 | panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); | 
| Peter Chubb | 6cf07a8 | 2005-08-23 20:07:00 -0700 | [diff] [blame] | 380 | /* | 
|  | 381 | * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, | 
|  | 382 | * which must fit into "vmlpt_bits - pte_bits" slots. Second half of | 
|  | 383 | * the test makes sure that our mapped space doesn't overlap the | 
|  | 384 | * unimplemented hole in the middle of the region. | 
|  | 385 | */ | 
|  | 386 | if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || | 
|  | 387 | (mapped_space_bits > impl_va_bits - 1)) | 
|  | 388 | panic("Cannot build a big enough virtual-linear page table" | 
|  | 389 | " to cover mapped address space.\n" | 
|  | 390 | " Try using a smaller page size.\n"); | 
|  | 391 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 |  | 
|  | 393 | /* place the VMLPT at the end of each page-table mapped region: */ | 
|  | 394 | pta = POW2(61) - POW2(vmlpt_bits); | 
|  | 395 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | /* | 
|  | 397 | * Set the (virtually mapped linear) page table address.  Bit | 
|  | 398 | * 8 selects between the short and long format, bits 2-7 the | 
|  | 399 | * size of the table, and bit 0 whether the VHPT walker is | 
|  | 400 | * enabled. | 
|  | 401 | */ | 
|  | 402 | ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT); | 
|  | 403 |  | 
|  | 404 | ia64_tlb_init(); | 
|  | 405 |  | 
|  | 406 | #ifdef	CONFIG_HUGETLB_PAGE | 
|  | 407 | ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); | 
|  | 408 | ia64_srlz_d(); | 
|  | 409 | #endif | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 
|  | 413 |  | 
|  | 414 | int | 
|  | 415 | create_mem_map_page_table (u64 start, u64 end, void *arg) | 
|  | 416 | { | 
|  | 417 | unsigned long address, start_page, end_page; | 
|  | 418 | struct page *map_start, *map_end; | 
|  | 419 | int node; | 
|  | 420 | pgd_t *pgd; | 
|  | 421 | pud_t *pud; | 
|  | 422 | pmd_t *pmd; | 
|  | 423 | pte_t *pte; | 
|  | 424 |  | 
|  | 425 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); | 
|  | 426 | map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT); | 
|  | 427 |  | 
|  | 428 | start_page = (unsigned long) map_start & PAGE_MASK; | 
|  | 429 | end_page = PAGE_ALIGN((unsigned long) map_end); | 
|  | 430 | node = paddr_to_nid(__pa(start)); | 
|  | 431 |  | 
|  | 432 | for (address = start_page; address < end_page; address += PAGE_SIZE) { | 
|  | 433 | pgd = pgd_offset_k(address); | 
|  | 434 | if (pgd_none(*pgd)) | 
|  | 435 | pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); | 
|  | 436 | pud = pud_offset(pgd, address); | 
|  | 437 |  | 
|  | 438 | if (pud_none(*pud)) | 
|  | 439 | pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); | 
|  | 440 | pmd = pmd_offset(pud, address); | 
|  | 441 |  | 
|  | 442 | if (pmd_none(*pmd)) | 
|  | 443 | pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); | 
|  | 444 | pte = pte_offset_kernel(pmd, address); | 
|  | 445 |  | 
|  | 446 | if (pte_none(*pte)) | 
|  | 447 | set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, | 
|  | 448 | PAGE_KERNEL)); | 
|  | 449 | } | 
|  | 450 | return 0; | 
|  | 451 | } | 
|  | 452 |  | 
|  | 453 | struct memmap_init_callback_data { | 
|  | 454 | struct page *start; | 
|  | 455 | struct page *end; | 
|  | 456 | int nid; | 
|  | 457 | unsigned long zone; | 
|  | 458 | }; | 
|  | 459 |  | 
|  | 460 | static int | 
|  | 461 | virtual_memmap_init (u64 start, u64 end, void *arg) | 
|  | 462 | { | 
|  | 463 | struct memmap_init_callback_data *args; | 
|  | 464 | struct page *map_start, *map_end; | 
|  | 465 |  | 
|  | 466 | args = (struct memmap_init_callback_data *) arg; | 
|  | 467 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); | 
|  | 468 | map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT); | 
|  | 469 |  | 
|  | 470 | if (map_start < args->start) | 
|  | 471 | map_start = args->start; | 
|  | 472 | if (map_end > args->end) | 
|  | 473 | map_end = args->end; | 
|  | 474 |  | 
|  | 475 | /* | 
|  | 476 | * We have to initialize "out of bounds" struct page elements that fit completely | 
|  | 477 | * on the same pages that were allocated for the "in bounds" elements because they | 
|  | 478 | * may be referenced later (and found to be "reserved"). | 
|  | 479 | */ | 
|  | 480 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); | 
|  | 481 | map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) | 
|  | 482 | / sizeof(struct page)); | 
|  | 483 |  | 
|  | 484 | if (map_start < map_end) | 
|  | 485 | memmap_init_zone((unsigned long)(map_end - map_start), | 
|  | 486 | args->nid, args->zone, page_to_pfn(map_start)); | 
|  | 487 | return 0; | 
|  | 488 | } | 
|  | 489 |  | 
|  | 490 | void | 
|  | 491 | memmap_init (unsigned long size, int nid, unsigned long zone, | 
|  | 492 | unsigned long start_pfn) | 
|  | 493 | { | 
|  | 494 | if (!vmem_map) | 
|  | 495 | memmap_init_zone(size, nid, zone, start_pfn); | 
|  | 496 | else { | 
|  | 497 | struct page *start; | 
|  | 498 | struct memmap_init_callback_data args; | 
|  | 499 |  | 
|  | 500 | start = pfn_to_page(start_pfn); | 
|  | 501 | args.start = start; | 
|  | 502 | args.end = start + size; | 
|  | 503 | args.nid = nid; | 
|  | 504 | args.zone = zone; | 
|  | 505 |  | 
|  | 506 | efi_memmap_walk(virtual_memmap_init, &args); | 
|  | 507 | } | 
|  | 508 | } | 
|  | 509 |  | 
|  | 510 | int | 
|  | 511 | ia64_pfn_valid (unsigned long pfn) | 
|  | 512 | { | 
|  | 513 | char byte; | 
|  | 514 | struct page *pg = pfn_to_page(pfn); | 
|  | 515 |  | 
|  | 516 | return     (__get_user(byte, (char __user *) pg) == 0) | 
|  | 517 | && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) | 
|  | 518 | || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); | 
|  | 519 | } | 
|  | 520 | EXPORT_SYMBOL(ia64_pfn_valid); | 
|  | 521 |  | 
|  | 522 | int | 
|  | 523 | find_largest_hole (u64 start, u64 end, void *arg) | 
|  | 524 | { | 
|  | 525 | u64 *max_gap = arg; | 
|  | 526 |  | 
|  | 527 | static u64 last_end = PAGE_OFFSET; | 
|  | 528 |  | 
|  | 529 | /* NOTE: this algorithm assumes efi memmap table is ordered */ | 
|  | 530 |  | 
|  | 531 | if (*max_gap < (start - last_end)) | 
|  | 532 | *max_gap = start - last_end; | 
|  | 533 | last_end = end; | 
|  | 534 | return 0; | 
|  | 535 | } | 
|  | 536 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ | 
|  | 537 |  | 
|  | 538 | static int | 
|  | 539 | count_reserved_pages (u64 start, u64 end, void *arg) | 
|  | 540 | { | 
|  | 541 | unsigned long num_reserved = 0; | 
|  | 542 | unsigned long *count = arg; | 
|  | 543 |  | 
|  | 544 | for (; start < end; start += PAGE_SIZE) | 
|  | 545 | if (PageReserved(virt_to_page(start))) | 
|  | 546 | ++num_reserved; | 
|  | 547 | *count += num_reserved; | 
|  | 548 | return 0; | 
|  | 549 | } | 
|  | 550 |  | 
|  | 551 | /* | 
|  | 552 | * Boot command-line option "nolwsys" can be used to disable the use of any light-weight | 
|  | 553 | * system call handler.  When this option is in effect, all fsyscalls will end up bubbling | 
|  | 554 | * down into the kernel and calling the normal (heavy-weight) syscall handler.  This is | 
|  | 555 | * useful for performance testing, but conceivably could also come in handy for debugging | 
|  | 556 | * purposes. | 
|  | 557 | */ | 
|  | 558 |  | 
|  | 559 | static int nolwsys; | 
|  | 560 |  | 
|  | 561 | static int __init | 
|  | 562 | nolwsys_setup (char *s) | 
|  | 563 | { | 
|  | 564 | nolwsys = 1; | 
|  | 565 | return 1; | 
|  | 566 | } | 
|  | 567 |  | 
|  | 568 | __setup("nolwsys", nolwsys_setup); | 
|  | 569 |  | 
|  | 570 | void | 
|  | 571 | mem_init (void) | 
|  | 572 | { | 
|  | 573 | long reserved_pages, codesize, datasize, initsize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | pg_data_t *pgdat; | 
|  | 575 | int i; | 
|  | 576 | static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; | 
|  | 577 |  | 
| Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 578 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); | 
|  | 579 | BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); | 
|  | 580 | BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); | 
|  | 581 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | #ifdef CONFIG_PCI | 
|  | 583 | /* | 
|  | 584 | * This needs to be called _after_ the command line has been parsed but _before_ | 
|  | 585 | * any drivers that may need the PCI DMA interface are initialized or bootmem has | 
|  | 586 | * been freed. | 
|  | 587 | */ | 
|  | 588 | platform_dma_init(); | 
|  | 589 | #endif | 
|  | 590 |  | 
| Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 591 | #ifdef CONFIG_FLATMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | if (!mem_map) | 
|  | 593 | BUG(); | 
|  | 594 | max_mapnr = max_low_pfn; | 
|  | 595 | #endif | 
|  | 596 |  | 
|  | 597 | high_memory = __va(max_low_pfn * PAGE_SIZE); | 
|  | 598 |  | 
|  | 599 | kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE); | 
|  | 600 | kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); | 
|  | 601 | kclist_add(&kcore_kernel, _stext, _end - _stext); | 
|  | 602 |  | 
|  | 603 | for_each_pgdat(pgdat) | 
| bob.picco | 564601a | 2005-06-30 09:52:00 -0700 | [diff] [blame] | 604 | if (pgdat->bdata->node_bootmem_map) | 
|  | 605 | totalram_pages += free_all_bootmem_node(pgdat); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 |  | 
|  | 607 | reserved_pages = 0; | 
|  | 608 | efi_memmap_walk(count_reserved_pages, &reserved_pages); | 
|  | 609 |  | 
|  | 610 | codesize =  (unsigned long) _etext - (unsigned long) _stext; | 
|  | 611 | datasize =  (unsigned long) _edata - (unsigned long) _etext; | 
|  | 612 | initsize =  (unsigned long) __init_end - (unsigned long) __init_begin; | 
|  | 613 |  | 
|  | 614 | printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " | 
|  | 615 | "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), | 
|  | 616 | num_physpages << (PAGE_SHIFT - 10), codesize >> 10, | 
|  | 617 | reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); | 
|  | 618 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 |  | 
|  | 620 | /* | 
|  | 621 | * For fsyscall entrpoints with no light-weight handler, use the ordinary | 
|  | 622 | * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry | 
|  | 623 | * code can tell them apart. | 
|  | 624 | */ | 
|  | 625 | for (i = 0; i < NR_syscalls; ++i) { | 
|  | 626 | extern unsigned long fsyscall_table[NR_syscalls]; | 
|  | 627 | extern unsigned long sys_call_table[NR_syscalls]; | 
|  | 628 |  | 
|  | 629 | if (!fsyscall_table[i] || nolwsys) | 
|  | 630 | fsyscall_table[i] = sys_call_table[i] | 1; | 
|  | 631 | } | 
|  | 632 | setup_gate(); | 
|  | 633 |  | 
|  | 634 | #ifdef CONFIG_IA32_SUPPORT | 
|  | 635 | ia32_mem_init(); | 
|  | 636 | #endif | 
|  | 637 | } |