| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Initialize MMU support. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 1998-2003 Hewlett-Packard Co | 
 | 5 |  *	David Mosberger-Tang <davidm@hpl.hp.com> | 
 | 6 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/kernel.h> | 
 | 8 | #include <linux/init.h> | 
 | 9 |  | 
 | 10 | #include <linux/bootmem.h> | 
 | 11 | #include <linux/efi.h> | 
 | 12 | #include <linux/elf.h> | 
| Tejun Heo | 98e4ae8 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 13 | #include <linux/memblock.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> | 
 | 15 | #include <linux/mmzone.h> | 
 | 16 | #include <linux/module.h> | 
 | 17 | #include <linux/personality.h> | 
 | 18 | #include <linux/reboot.h> | 
 | 19 | #include <linux/slab.h> | 
 | 20 | #include <linux/swap.h> | 
 | 21 | #include <linux/proc_fs.h> | 
 | 22 | #include <linux/bitops.h> | 
| Bob Picco | 139b830 | 2007-01-30 02:11:09 -0800 | [diff] [blame] | 23 | #include <linux/kexec.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <asm/dma.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <asm/io.h> | 
 | 27 | #include <asm/machvec.h> | 
 | 28 | #include <asm/numa.h> | 
 | 29 | #include <asm/patch.h> | 
 | 30 | #include <asm/pgalloc.h> | 
 | 31 | #include <asm/sal.h> | 
 | 32 | #include <asm/sections.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/tlb.h> | 
 | 34 | #include <asm/uaccess.h> | 
 | 35 | #include <asm/unistd.h> | 
 | 36 | #include <asm/mca.h> | 
| Isaku Yamahata | dd97d5c | 2009-03-04 21:05:34 +0900 | [diff] [blame] | 37 | #include <asm/paravirt.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | extern void ia64_tlb_init (void); | 
 | 40 |  | 
 | 41 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; | 
 | 42 |  | 
 | 43 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 
| Tejun Heo | 126b3fc | 2009-10-02 13:28:55 +0900 | [diff] [blame] | 44 | unsigned long VMALLOC_END = VMALLOC_END_INIT; | 
 | 45 | EXPORT_SYMBOL(VMALLOC_END); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | struct page *vmem_map; | 
 | 47 | EXPORT_SYMBOL(vmem_map); | 
 | 48 | #endif | 
 | 49 |  | 
| Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 50 | struct page *zero_page_memmap_ptr;	/* map entry for zero page */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | EXPORT_SYMBOL(zero_page_memmap_ptr); | 
 | 52 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | void | 
| KAMEZAWA Hiroyuki | 954ffcb | 2007-10-16 01:25:44 -0700 | [diff] [blame] | 54 | __ia64_sync_icache_dcache (pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | { | 
 | 56 | 	unsigned long addr; | 
 | 57 | 	struct page *page; | 
 | 58 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | 	page = pte_page(pte); | 
 | 60 | 	addr = (unsigned long) page_address(page); | 
 | 61 |  | 
 | 62 | 	if (test_bit(PG_arch_1, &page->flags)) | 
 | 63 | 		return;				/* i-cache is already coherent with d-cache */ | 
 | 64 |  | 
| Christoph Lameter | 273988f | 2008-04-09 13:05:41 -0700 | [diff] [blame] | 65 | 	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | 	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */ | 
 | 67 | } | 
 | 68 |  | 
| Jan Beulich | cde14bb | 2007-02-05 18:46:40 -0800 | [diff] [blame] | 69 | /* | 
 | 70 |  * Since DMA is i-cache coherent, any (complete) pages that were written via | 
 | 71 |  * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to | 
 | 72 |  * flush them when they get mapped into an executable vm-area. | 
 | 73 |  */ | 
 | 74 | void | 
 | 75 | dma_mark_clean(void *addr, size_t size) | 
 | 76 | { | 
 | 77 | 	unsigned long pg_addr, end; | 
 | 78 |  | 
 | 79 | 	pg_addr = PAGE_ALIGN((unsigned long) addr); | 
 | 80 | 	end = (unsigned long) addr + size; | 
 | 81 | 	while (pg_addr + PAGE_SIZE <= end) { | 
 | 82 | 		struct page *page = virt_to_page(pg_addr); | 
 | 83 | 		set_bit(PG_arch_1, &page->flags); | 
 | 84 | 		pg_addr += PAGE_SIZE; | 
 | 85 | 	} | 
 | 86 | } | 
 | 87 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | inline void | 
 | 89 | ia64_set_rbs_bot (void) | 
 | 90 | { | 
| Jiri Slaby | 02b763b | 2010-01-06 16:24:30 +0100 | [diff] [blame] | 91 | 	unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 |  | 
 | 93 | 	if (stack_size > MAX_USER_STACK_SIZE) | 
 | 94 | 		stack_size = MAX_USER_STACK_SIZE; | 
| KAMEZAWA Hiroyuki | 83d2cd3 | 2007-03-23 12:17:46 +0900 | [diff] [blame] | 95 | 	current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | } | 
 | 97 |  | 
 | 98 | /* | 
 | 99 |  * This performs some platform-dependent address space initialization. | 
 | 100 |  * On IA-64, we want to setup the VM area for the register backing | 
 | 101 |  * store (which grows upwards) and install the gateway page which is | 
 | 102 |  * used for signal trampolines, etc. | 
 | 103 |  */ | 
 | 104 | void | 
 | 105 | ia64_init_addr_space (void) | 
 | 106 | { | 
 | 107 | 	struct vm_area_struct *vma; | 
 | 108 |  | 
 | 109 | 	ia64_set_rbs_bot(); | 
 | 110 |  | 
 | 111 | 	/* | 
 | 112 | 	 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore | 
 | 113 | 	 * the problem.  When the process attempts to write to the register backing store | 
 | 114 | 	 * for the first time, it will get a SEGFAULT in this case. | 
 | 115 | 	 */ | 
| Robert P. J. Day | c376222 | 2007-02-10 01:45:03 -0800 | [diff] [blame] | 116 | 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | 	if (vma) { | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 118 | 		INIT_LIST_HEAD(&vma->anon_vma_chain); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | 		vma->vm_mm = current->mm; | 
 | 120 | 		vma->vm_start = current->thread.rbs_bot & PAGE_MASK; | 
 | 121 | 		vma->vm_end = vma->vm_start + PAGE_SIZE; | 
| Hugh Dickins | 46dea3d | 2005-10-29 18:16:20 -0700 | [diff] [blame] | 122 | 		vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; | 
| Coly Li | 3ed75eb | 2007-10-18 23:39:15 -0700 | [diff] [blame] | 123 | 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | 		down_write(¤t->mm->mmap_sem); | 
 | 125 | 		if (insert_vm_struct(current->mm, vma)) { | 
 | 126 | 			up_write(¤t->mm->mmap_sem); | 
 | 127 | 			kmem_cache_free(vm_area_cachep, vma); | 
 | 128 | 			return; | 
 | 129 | 		} | 
 | 130 | 		up_write(¤t->mm->mmap_sem); | 
 | 131 | 	} | 
 | 132 |  | 
 | 133 | 	/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ | 
 | 134 | 	if (!(current->personality & MMAP_PAGE_ZERO)) { | 
| Robert P. J. Day | c376222 | 2007-02-10 01:45:03 -0800 | [diff] [blame] | 135 | 		vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | 		if (vma) { | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 137 | 			INIT_LIST_HEAD(&vma->anon_vma_chain); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | 			vma->vm_mm = current->mm; | 
 | 139 | 			vma->vm_end = PAGE_SIZE; | 
 | 140 | 			vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); | 
 | 141 | 			vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; | 
 | 142 | 			down_write(¤t->mm->mmap_sem); | 
 | 143 | 			if (insert_vm_struct(current->mm, vma)) { | 
 | 144 | 				up_write(¤t->mm->mmap_sem); | 
 | 145 | 				kmem_cache_free(vm_area_cachep, vma); | 
 | 146 | 				return; | 
 | 147 | 			} | 
 | 148 | 			up_write(¤t->mm->mmap_sem); | 
 | 149 | 		} | 
 | 150 | 	} | 
 | 151 | } | 
 | 152 |  | 
 | 153 | void | 
 | 154 | free_initmem (void) | 
 | 155 | { | 
 | 156 | 	unsigned long addr, eaddr; | 
 | 157 |  | 
 | 158 | 	addr = (unsigned long) ia64_imva(__init_begin); | 
 | 159 | 	eaddr = (unsigned long) ia64_imva(__init_end); | 
 | 160 | 	while (addr < eaddr) { | 
 | 161 | 		ClearPageReserved(virt_to_page(addr)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 162 | 		init_page_count(virt_to_page(addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | 		free_page(addr); | 
 | 164 | 		++totalram_pages; | 
 | 165 | 		addr += PAGE_SIZE; | 
 | 166 | 	} | 
 | 167 | 	printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", | 
 | 168 | 	       (__init_end - __init_begin) >> 10); | 
 | 169 | } | 
 | 170 |  | 
| Chen, Kenneth W | dae2806 | 2006-03-22 16:54:15 -0800 | [diff] [blame] | 171 | void __init | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | free_initrd_mem (unsigned long start, unsigned long end) | 
 | 173 | { | 
 | 174 | 	struct page *page; | 
 | 175 | 	/* | 
 | 176 | 	 * EFI uses 4KB pages while the kernel can use 4KB or bigger. | 
 | 177 | 	 * Thus EFI and the kernel may have different page sizes. It is | 
 | 178 | 	 * therefore possible to have the initrd share the same page as | 
 | 179 | 	 * the end of the kernel (given current setup). | 
 | 180 | 	 * | 
 | 181 | 	 * To avoid freeing/using the wrong page (kernel sized) we: | 
 | 182 | 	 *	- align up the beginning of initrd | 
 | 183 | 	 *	- align down the end of initrd | 
 | 184 | 	 * | 
 | 185 | 	 *  |             | | 
 | 186 | 	 *  |=============| a000 | 
 | 187 | 	 *  |             | | 
 | 188 | 	 *  |             | | 
 | 189 | 	 *  |             | 9000 | 
 | 190 | 	 *  |/////////////| | 
 | 191 | 	 *  |/////////////| | 
 | 192 | 	 *  |=============| 8000 | 
 | 193 | 	 *  |///INITRD////| | 
 | 194 | 	 *  |/////////////| | 
 | 195 | 	 *  |/////////////| 7000 | 
 | 196 | 	 *  |             | | 
 | 197 | 	 *  |KKKKKKKKKKKKK| | 
 | 198 | 	 *  |=============| 6000 | 
 | 199 | 	 *  |KKKKKKKKKKKKK| | 
 | 200 | 	 *  |KKKKKKKKKKKKK| | 
 | 201 | 	 *  K=kernel using 8KB pages | 
 | 202 | 	 * | 
 | 203 | 	 * In this example, we must free page 8000 ONLY. So we must align up | 
 | 204 | 	 * initrd_start and keep initrd_end as is. | 
 | 205 | 	 */ | 
 | 206 | 	start = PAGE_ALIGN(start); | 
 | 207 | 	end = end & PAGE_MASK; | 
 | 208 |  | 
 | 209 | 	if (start < end) | 
 | 210 | 		printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); | 
 | 211 |  | 
 | 212 | 	for (; start < end; start += PAGE_SIZE) { | 
 | 213 | 		if (!virt_addr_valid(start)) | 
 | 214 | 			continue; | 
 | 215 | 		page = virt_to_page(start); | 
 | 216 | 		ClearPageReserved(page); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 217 | 		init_page_count(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | 		free_page(start); | 
 | 219 | 		++totalram_pages; | 
 | 220 | 	} | 
 | 221 | } | 
 | 222 |  | 
 | 223 | /* | 
 | 224 |  * This installs a clean page in the kernel's page table. | 
 | 225 |  */ | 
| Chen, Kenneth W | dae2806 | 2006-03-22 16:54:15 -0800 | [diff] [blame] | 226 | static struct page * __init | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) | 
 | 228 | { | 
 | 229 | 	pgd_t *pgd; | 
 | 230 | 	pud_t *pud; | 
 | 231 | 	pmd_t *pmd; | 
 | 232 | 	pte_t *pte; | 
 | 233 |  | 
 | 234 | 	if (!PageReserved(page)) | 
 | 235 | 		printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n", | 
 | 236 | 		       page_address(page)); | 
 | 237 |  | 
 | 238 | 	pgd = pgd_offset_k(address);		/* note: this is NOT pgd_offset()! */ | 
 | 239 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | 	{ | 
 | 241 | 		pud = pud_alloc(&init_mm, pgd, address); | 
 | 242 | 		if (!pud) | 
 | 243 | 			goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | 		pmd = pmd_alloc(&init_mm, pud, address); | 
 | 245 | 		if (!pmd) | 
 | 246 | 			goto out; | 
| Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 247 | 		pte = pte_alloc_kernel(pmd, address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | 		if (!pte) | 
 | 249 | 			goto out; | 
| Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 250 | 		if (!pte_none(*pte)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | 			goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | 		set_pte(pte, mk_pte(page, pgprot)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | 	} | 
| Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 254 |   out: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | 	/* no need for flush_tlb */ | 
 | 256 | 	return page; | 
 | 257 | } | 
 | 258 |  | 
| Chen, Kenneth W | 914a4ea | 2006-03-12 09:08:26 -0800 | [diff] [blame] | 259 | static void __init | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | setup_gate (void) | 
 | 261 | { | 
| Isaku Yamahata | e4ff5b8 | 2009-03-04 21:05:42 +0900 | [diff] [blame] | 262 | 	void *gate_section; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | 	struct page *page; | 
 | 264 |  | 
 | 265 | 	/* | 
| David Mosberger-Tang | ad597bd | 2005-06-08 10:45:00 -0700 | [diff] [blame] | 266 | 	 * Map the gate page twice: once read-only to export the ELF | 
 | 267 | 	 * headers etc. and once execute-only page to enable | 
 | 268 | 	 * privilege-promotion via "epc": | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | 	 */ | 
| Isaku Yamahata | e4ff5b8 | 2009-03-04 21:05:42 +0900 | [diff] [blame] | 270 | 	gate_section = paravirt_get_gate_section(); | 
 | 271 | 	page = virt_to_page(ia64_imva(gate_section)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | 	put_kernel_page(page, GATE_ADDR, PAGE_READONLY); | 
 | 273 | #ifdef HAVE_BUGGY_SEGREL | 
| Isaku Yamahata | e4ff5b8 | 2009-03-04 21:05:42 +0900 | [diff] [blame] | 274 | 	page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | 	put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); | 
 | 276 | #else | 
 | 277 | 	put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); | 
| David Mosberger-Tang | ad597bd | 2005-06-08 10:45:00 -0700 | [diff] [blame] | 278 | 	/* Fill in the holes (if any) with read-only zero pages: */ | 
 | 279 | 	{ | 
 | 280 | 		unsigned long addr; | 
 | 281 |  | 
 | 282 | 		for (addr = GATE_ADDR + PAGE_SIZE; | 
 | 283 | 		     addr < GATE_ADDR + PERCPU_PAGE_SIZE; | 
 | 284 | 		     addr += PAGE_SIZE) | 
 | 285 | 		{ | 
 | 286 | 			put_kernel_page(ZERO_PAGE(0), addr, | 
 | 287 | 					PAGE_READONLY); | 
 | 288 | 			put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, | 
 | 289 | 					PAGE_READONLY); | 
 | 290 | 		} | 
 | 291 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | #endif | 
 | 293 | 	ia64_patch_gate(); | 
 | 294 | } | 
 | 295 |  | 
 | 296 | void __devinit | 
 | 297 | ia64_mmu_init (void *my_cpu_data) | 
 | 298 | { | 
| Chen, Kenneth W | 00b6598 | 2006-10-13 10:08:13 -0700 | [diff] [blame] | 299 | 	unsigned long pta, impl_va_bits; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | 	extern void __devinit tlb_init (void); | 
 | 301 |  | 
 | 302 | #ifdef CONFIG_DISABLE_VHPT | 
 | 303 | #	define VHPT_ENABLE_BIT	0 | 
 | 304 | #else | 
 | 305 | #	define VHPT_ENABLE_BIT	1 | 
 | 306 | #endif | 
 | 307 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | 	/* | 
 | 309 | 	 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped | 
 | 310 | 	 * address space.  The IA-64 architecture guarantees that at least 50 bits of | 
 | 311 | 	 * virtual address space are implemented but if we pick a large enough page size | 
 | 312 | 	 * (e.g., 64KB), the mapped address space is big enough that it will overlap with | 
 | 313 | 	 * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages, | 
 | 314 | 	 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a | 
 | 315 | 	 * problem in practice.  Alternatively, we could truncate the top of the mapped | 
 | 316 | 	 * address space to not permit mappings that would overlap with the VMLPT. | 
 | 317 | 	 * --davidm 00/12/06 | 
 | 318 | 	 */ | 
 | 319 | #	define pte_bits			3 | 
 | 320 | #	define mapped_space_bits	(3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) | 
 | 321 | 	/* | 
 | 322 | 	 * The virtual page table has to cover the entire implemented address space within | 
 | 323 | 	 * a region even though not all of this space may be mappable.  The reason for | 
 | 324 | 	 * this is that the Access bit and Dirty bit fault handlers perform | 
 | 325 | 	 * non-speculative accesses to the virtual page table, so the address range of the | 
 | 326 | 	 * virtual page table itself needs to be covered by virtual page table. | 
 | 327 | 	 */ | 
 | 328 | #	define vmlpt_bits		(impl_va_bits - PAGE_SHIFT + pte_bits) | 
 | 329 | #	define POW2(n)			(1ULL << (n)) | 
 | 330 |  | 
 | 331 | 	impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); | 
 | 332 |  | 
 | 333 | 	if (impl_va_bits < 51 || impl_va_bits > 61) | 
 | 334 | 		panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); | 
| Peter Chubb | 6cf07a8 | 2005-08-23 20:07:00 -0700 | [diff] [blame] | 335 | 	/* | 
 | 336 | 	 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, | 
 | 337 | 	 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of | 
 | 338 | 	 * the test makes sure that our mapped space doesn't overlap the | 
 | 339 | 	 * unimplemented hole in the middle of the region. | 
 | 340 | 	 */ | 
 | 341 | 	if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || | 
 | 342 | 	    (mapped_space_bits > impl_va_bits - 1)) | 
 | 343 | 		panic("Cannot build a big enough virtual-linear page table" | 
 | 344 | 		      " to cover mapped address space.\n" | 
 | 345 | 		      " Try using a smaller page size.\n"); | 
 | 346 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 |  | 
 | 348 | 	/* place the VMLPT at the end of each page-table mapped region: */ | 
 | 349 | 	pta = POW2(61) - POW2(vmlpt_bits); | 
 | 350 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | 	/* | 
 | 352 | 	 * Set the (virtually mapped linear) page table address.  Bit | 
 | 353 | 	 * 8 selects between the short and long format, bits 2-7 the | 
 | 354 | 	 * size of the table, and bit 0 whether the VHPT walker is | 
 | 355 | 	 * enabled. | 
 | 356 | 	 */ | 
 | 357 | 	ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT); | 
 | 358 |  | 
 | 359 | 	ia64_tlb_init(); | 
 | 360 |  | 
 | 361 | #ifdef	CONFIG_HUGETLB_PAGE | 
 | 362 | 	ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); | 
 | 363 | 	ia64_srlz_d(); | 
 | 364 | #endif | 
 | 365 | } | 
 | 366 |  | 
 | 367 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 
| Bob Picco | e44e41d | 2006-06-28 12:55:43 -0400 | [diff] [blame] | 368 | int vmemmap_find_next_valid_pfn(int node, int i) | 
 | 369 | { | 
 | 370 | 	unsigned long end_address, hole_next_pfn; | 
 | 371 | 	unsigned long stop_address; | 
 | 372 | 	pg_data_t *pgdat = NODE_DATA(node); | 
 | 373 |  | 
 | 374 | 	end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | 
 | 375 | 	end_address = PAGE_ALIGN(end_address); | 
 | 376 |  | 
 | 377 | 	stop_address = (unsigned long) &vmem_map[ | 
 | 378 | 		pgdat->node_start_pfn + pgdat->node_spanned_pages]; | 
 | 379 |  | 
 | 380 | 	do { | 
 | 381 | 		pgd_t *pgd; | 
 | 382 | 		pud_t *pud; | 
 | 383 | 		pmd_t *pmd; | 
 | 384 | 		pte_t *pte; | 
 | 385 |  | 
 | 386 | 		pgd = pgd_offset_k(end_address); | 
 | 387 | 		if (pgd_none(*pgd)) { | 
 | 388 | 			end_address += PGDIR_SIZE; | 
 | 389 | 			continue; | 
 | 390 | 		} | 
 | 391 |  | 
 | 392 | 		pud = pud_offset(pgd, end_address); | 
 | 393 | 		if (pud_none(*pud)) { | 
 | 394 | 			end_address += PUD_SIZE; | 
 | 395 | 			continue; | 
 | 396 | 		} | 
 | 397 |  | 
 | 398 | 		pmd = pmd_offset(pud, end_address); | 
 | 399 | 		if (pmd_none(*pmd)) { | 
 | 400 | 			end_address += PMD_SIZE; | 
 | 401 | 			continue; | 
 | 402 | 		} | 
 | 403 |  | 
 | 404 | 		pte = pte_offset_kernel(pmd, end_address); | 
 | 405 | retry_pte: | 
 | 406 | 		if (pte_none(*pte)) { | 
 | 407 | 			end_address += PAGE_SIZE; | 
 | 408 | 			pte++; | 
 | 409 | 			if ((end_address < stop_address) && | 
 | 410 | 			    (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | 
 | 411 | 				goto retry_pte; | 
 | 412 | 			continue; | 
 | 413 | 		} | 
 | 414 | 		/* Found next valid vmem_map page */ | 
 | 415 | 		break; | 
 | 416 | 	} while (end_address < stop_address); | 
 | 417 |  | 
 | 418 | 	end_address = min(end_address, stop_address); | 
 | 419 | 	end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | 
 | 420 | 	hole_next_pfn = end_address / sizeof(struct page); | 
 | 421 | 	return hole_next_pfn - pgdat->node_start_pfn; | 
 | 422 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 |  | 
| Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 424 | int __init create_mem_map_page_table(u64 start, u64 end, void *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | { | 
 | 426 | 	unsigned long address, start_page, end_page; | 
 | 427 | 	struct page *map_start, *map_end; | 
 | 428 | 	int node; | 
 | 429 | 	pgd_t *pgd; | 
 | 430 | 	pud_t *pud; | 
 | 431 | 	pmd_t *pmd; | 
 | 432 | 	pte_t *pte; | 
 | 433 |  | 
 | 434 | 	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); | 
 | 435 | 	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT); | 
 | 436 |  | 
 | 437 | 	start_page = (unsigned long) map_start & PAGE_MASK; | 
 | 438 | 	end_page = PAGE_ALIGN((unsigned long) map_end); | 
 | 439 | 	node = paddr_to_nid(__pa(start)); | 
 | 440 |  | 
 | 441 | 	for (address = start_page; address < end_page; address += PAGE_SIZE) { | 
 | 442 | 		pgd = pgd_offset_k(address); | 
 | 443 | 		if (pgd_none(*pgd)) | 
 | 444 | 			pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); | 
 | 445 | 		pud = pud_offset(pgd, address); | 
 | 446 |  | 
 | 447 | 		if (pud_none(*pud)) | 
 | 448 | 			pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); | 
 | 449 | 		pmd = pmd_offset(pud, address); | 
 | 450 |  | 
 | 451 | 		if (pmd_none(*pmd)) | 
 | 452 | 			pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); | 
 | 453 | 		pte = pte_offset_kernel(pmd, address); | 
 | 454 |  | 
 | 455 | 		if (pte_none(*pte)) | 
 | 456 | 			set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, | 
 | 457 | 					     PAGE_KERNEL)); | 
 | 458 | 	} | 
 | 459 | 	return 0; | 
 | 460 | } | 
 | 461 |  | 
 | 462 | struct memmap_init_callback_data { | 
 | 463 | 	struct page *start; | 
 | 464 | 	struct page *end; | 
 | 465 | 	int nid; | 
 | 466 | 	unsigned long zone; | 
 | 467 | }; | 
 | 468 |  | 
| Adrian Bunk | 18b8bef | 2007-10-29 13:49:47 +0100 | [diff] [blame] | 469 | static int __meminit | 
| Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 470 | virtual_memmap_init(u64 start, u64 end, void *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | { | 
 | 472 | 	struct memmap_init_callback_data *args; | 
 | 473 | 	struct page *map_start, *map_end; | 
 | 474 |  | 
 | 475 | 	args = (struct memmap_init_callback_data *) arg; | 
 | 476 | 	map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); | 
 | 477 | 	map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT); | 
 | 478 |  | 
 | 479 | 	if (map_start < args->start) | 
 | 480 | 		map_start = args->start; | 
 | 481 | 	if (map_end > args->end) | 
 | 482 | 		map_end = args->end; | 
 | 483 |  | 
 | 484 | 	/* | 
 | 485 | 	 * We have to initialize "out of bounds" struct page elements that fit completely | 
 | 486 | 	 * on the same pages that were allocated for the "in bounds" elements because they | 
 | 487 | 	 * may be referenced later (and found to be "reserved"). | 
 | 488 | 	 */ | 
 | 489 | 	map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); | 
 | 490 | 	map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) | 
 | 491 | 		    / sizeof(struct page)); | 
 | 492 |  | 
 | 493 | 	if (map_start < map_end) | 
 | 494 | 		memmap_init_zone((unsigned long)(map_end - map_start), | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 495 | 				 args->nid, args->zone, page_to_pfn(map_start), | 
 | 496 | 				 MEMMAP_EARLY); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | 	return 0; | 
 | 498 | } | 
 | 499 |  | 
| Adrian Bunk | 18b8bef | 2007-10-29 13:49:47 +0100 | [diff] [blame] | 500 | void __meminit | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | memmap_init (unsigned long size, int nid, unsigned long zone, | 
 | 502 | 	     unsigned long start_pfn) | 
 | 503 | { | 
 | 504 | 	if (!vmem_map) | 
| Dave Hansen | a2f3aa0 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 505 | 		memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | 	else { | 
 | 507 | 		struct page *start; | 
 | 508 | 		struct memmap_init_callback_data args; | 
 | 509 |  | 
 | 510 | 		start = pfn_to_page(start_pfn); | 
 | 511 | 		args.start = start; | 
 | 512 | 		args.end = start + size; | 
 | 513 | 		args.nid = nid; | 
 | 514 | 		args.zone = zone; | 
 | 515 |  | 
 | 516 | 		efi_memmap_walk(virtual_memmap_init, &args); | 
 | 517 | 	} | 
 | 518 | } | 
 | 519 |  | 
 | 520 | int | 
 | 521 | ia64_pfn_valid (unsigned long pfn) | 
 | 522 | { | 
 | 523 | 	char byte; | 
 | 524 | 	struct page *pg = pfn_to_page(pfn); | 
 | 525 |  | 
 | 526 | 	return     (__get_user(byte, (char __user *) pg) == 0) | 
 | 527 | 		&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) | 
 | 528 | 			|| (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); | 
 | 529 | } | 
 | 530 | EXPORT_SYMBOL(ia64_pfn_valid); | 
 | 531 |  | 
| Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 532 | int __init find_largest_hole(u64 start, u64 end, void *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | { | 
 | 534 | 	u64 *max_gap = arg; | 
 | 535 |  | 
 | 536 | 	static u64 last_end = PAGE_OFFSET; | 
 | 537 |  | 
 | 538 | 	/* NOTE: this algorithm assumes efi memmap table is ordered */ | 
 | 539 |  | 
 | 540 | 	if (*max_gap < (start - last_end)) | 
 | 541 | 		*max_gap = start - last_end; | 
 | 542 | 	last_end = end; | 
 | 543 | 	return 0; | 
 | 544 | } | 
| Mel Gorman | 05e0caa | 2006-09-27 01:49:54 -0700 | [diff] [blame] | 545 |  | 
| Bob Picco | 139b830 | 2007-01-30 02:11:09 -0800 | [diff] [blame] | 546 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ | 
 | 547 |  | 
| Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 548 | int __init register_active_ranges(u64 start, u64 len, int nid) | 
| Mel Gorman | 05e0caa | 2006-09-27 01:49:54 -0700 | [diff] [blame] | 549 | { | 
| Zoltan Menyhart | 98075d2 | 2008-04-11 15:21:35 -0700 | [diff] [blame] | 550 | 	u64 end = start + len; | 
| Bob Picco | 139b830 | 2007-01-30 02:11:09 -0800 | [diff] [blame] | 551 |  | 
| Bob Picco | 139b830 | 2007-01-30 02:11:09 -0800 | [diff] [blame] | 552 | #ifdef CONFIG_KEXEC | 
 | 553 | 	if (start > crashk_res.start && start < crashk_res.end) | 
 | 554 | 		start = crashk_res.end; | 
 | 555 | 	if (end > crashk_res.start && end < crashk_res.end) | 
 | 556 | 		end = crashk_res.start; | 
 | 557 | #endif | 
 | 558 |  | 
 | 559 | 	if (start < end) | 
| Tejun Heo | 98e4ae8 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 560 | 		memblock_add_node(__pa(start), end - start, nid); | 
| Mel Gorman | 05e0caa | 2006-09-27 01:49:54 -0700 | [diff] [blame] | 561 | 	return 0; | 
 | 562 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 |  | 
| Chen, Kenneth W | dae2806 | 2006-03-22 16:54:15 -0800 | [diff] [blame] | 564 | static int __init | 
| Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 565 | count_reserved_pages(u64 start, u64 end, void *arg) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | { | 
 | 567 | 	unsigned long num_reserved = 0; | 
 | 568 | 	unsigned long *count = arg; | 
 | 569 |  | 
 | 570 | 	for (; start < end; start += PAGE_SIZE) | 
 | 571 | 		if (PageReserved(virt_to_page(start))) | 
 | 572 | 			++num_reserved; | 
 | 573 | 	*count += num_reserved; | 
 | 574 | 	return 0; | 
 | 575 | } | 
 | 576 |  | 
| Zou Nan hai | a3f5c33 | 2007-03-20 13:41:57 -0700 | [diff] [blame] | 577 | int | 
| Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 578 | find_max_min_low_pfn (u64 start, u64 end, void *arg) | 
| Zou Nan hai | a3f5c33 | 2007-03-20 13:41:57 -0700 | [diff] [blame] | 579 | { | 
 | 580 | 	unsigned long pfn_start, pfn_end; | 
 | 581 | #ifdef CONFIG_FLATMEM | 
 | 582 | 	pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; | 
 | 583 | 	pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; | 
 | 584 | #else | 
 | 585 | 	pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; | 
 | 586 | 	pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; | 
 | 587 | #endif | 
 | 588 | 	min_low_pfn = min(min_low_pfn, pfn_start); | 
 | 589 | 	max_low_pfn = max(max_low_pfn, pfn_end); | 
 | 590 | 	return 0; | 
 | 591 | } | 
 | 592 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | /* | 
 | 594 |  * Boot command-line option "nolwsys" can be used to disable the use of any light-weight | 
 | 595 |  * system call handler.  When this option is in effect, all fsyscalls will end up bubbling | 
 | 596 |  * down into the kernel and calling the normal (heavy-weight) syscall handler.  This is | 
 | 597 |  * useful for performance testing, but conceivably could also come in handy for debugging | 
 | 598 |  * purposes. | 
 | 599 |  */ | 
 | 600 |  | 
| Chen, Kenneth W | 03906ea | 2006-03-12 09:10:59 -0800 | [diff] [blame] | 601 | static int nolwsys __initdata; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 |  | 
 | 603 | static int __init | 
 | 604 | nolwsys_setup (char *s) | 
 | 605 | { | 
 | 606 | 	nolwsys = 1; | 
 | 607 | 	return 1; | 
 | 608 | } | 
 | 609 |  | 
 | 610 | __setup("nolwsys", nolwsys_setup); | 
 | 611 |  | 
| Chen, Kenneth W | dae2806 | 2006-03-22 16:54:15 -0800 | [diff] [blame] | 612 | void __init | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | mem_init (void) | 
 | 614 | { | 
 | 615 | 	long reserved_pages, codesize, datasize, initsize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | 	pg_data_t *pgdat; | 
 | 617 | 	int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 |  | 
| Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 619 | 	BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); | 
 | 620 | 	BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); | 
 | 621 | 	BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); | 
 | 622 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | #ifdef CONFIG_PCI | 
 | 624 | 	/* | 
 | 625 | 	 * This needs to be called _after_ the command line has been parsed but _before_ | 
 | 626 | 	 * any drivers that may need the PCI DMA interface are initialized or bootmem has | 
 | 627 | 	 * been freed. | 
 | 628 | 	 */ | 
 | 629 | 	platform_dma_init(); | 
 | 630 | #endif | 
 | 631 |  | 
| Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 632 | #ifdef CONFIG_FLATMEM | 
| Stoyan Gaydarov | 80a03e2 | 2009-03-10 00:10:30 -0500 | [diff] [blame] | 633 | 	BUG_ON(!mem_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | 	max_mapnr = max_low_pfn; | 
 | 635 | #endif | 
 | 636 |  | 
 | 637 | 	high_memory = __va(max_low_pfn * PAGE_SIZE); | 
 | 638 |  | 
| KAMEZAWA Hiroyuki | ec936fc | 2006-03-27 01:15:59 -0800 | [diff] [blame] | 639 | 	for_each_online_pgdat(pgdat) | 
| bob.picco | 564601a | 2005-06-30 09:52:00 -0700 | [diff] [blame] | 640 | 		if (pgdat->bdata->node_bootmem_map) | 
 | 641 | 			totalram_pages += free_all_bootmem_node(pgdat); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 |  | 
 | 643 | 	reserved_pages = 0; | 
 | 644 | 	efi_memmap_walk(count_reserved_pages, &reserved_pages); | 
 | 645 |  | 
 | 646 | 	codesize =  (unsigned long) _etext - (unsigned long) _stext; | 
 | 647 | 	datasize =  (unsigned long) _edata - (unsigned long) _etext; | 
 | 648 | 	initsize =  (unsigned long) __init_end - (unsigned long) __init_begin; | 
 | 649 |  | 
 | 650 | 	printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " | 
| Geert Uytterhoeven | cc013a8 | 2009-09-21 17:02:36 -0700 | [diff] [blame] | 651 | 	       "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | 	       num_physpages << (PAGE_SHIFT - 10), codesize >> 10, | 
 | 653 | 	       reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); | 
 | 654 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 |  | 
 | 656 | 	/* | 
 | 657 | 	 * For fsyscall entrpoints with no light-weight handler, use the ordinary | 
 | 658 | 	 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry | 
 | 659 | 	 * code can tell them apart. | 
 | 660 | 	 */ | 
 | 661 | 	for (i = 0; i < NR_syscalls; ++i) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | 		extern unsigned long sys_call_table[NR_syscalls]; | 
| Isaku Yamahata | dd97d5c | 2009-03-04 21:05:34 +0900 | [diff] [blame] | 663 | 		unsigned long *fsyscall_table = paravirt_get_fsyscall_table(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 |  | 
 | 665 | 		if (!fsyscall_table[i] || nolwsys) | 
 | 666 | 			fsyscall_table[i] = sys_call_table[i] | 1; | 
 | 667 | 	} | 
 | 668 | 	setup_gate(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | } | 
| Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 670 |  | 
 | 671 | #ifdef CONFIG_MEMORY_HOTPLUG | 
| Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 672 | int arch_add_memory(int nid, u64 start, u64 size) | 
| Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 673 | { | 
 | 674 | 	pg_data_t *pgdat; | 
 | 675 | 	struct zone *zone; | 
 | 676 | 	unsigned long start_pfn = start >> PAGE_SHIFT; | 
 | 677 | 	unsigned long nr_pages = size >> PAGE_SHIFT; | 
 | 678 | 	int ret; | 
 | 679 |  | 
| Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 680 | 	pgdat = NODE_DATA(nid); | 
| Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 681 |  | 
 | 682 | 	zone = pgdat->node_zones + ZONE_NORMAL; | 
| Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 683 | 	ret = __add_pages(nid, zone, start_pfn, nr_pages); | 
| Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 684 |  | 
 | 685 | 	if (ret) | 
 | 686 | 		printk("%s: Problem encountered in __add_pages() as ret=%d\n", | 
| Harvey Harrison | d4ed808 | 2008-03-04 15:15:00 -0800 | [diff] [blame] | 687 | 		       __func__,  ret); | 
| Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 688 |  | 
 | 689 | 	return ret; | 
 | 690 | } | 
| Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 691 | #endif | 
| Huang, Xiaolan | 839052d | 2008-05-15 10:18:41 +0800 | [diff] [blame] | 692 |  | 
 | 693 | /* | 
 | 694 |  * Even when CONFIG_IA32_SUPPORT is not enabled it is | 
 | 695 |  * useful to have the Linux/x86 domain registered to | 
 | 696 |  * avoid an attempted module load when emulators call | 
 | 697 |  * personality(PER_LINUX32). This saves several milliseconds | 
 | 698 |  * on each such call. | 
 | 699 |  */ | 
 | 700 | static struct exec_domain ia32_exec_domain; | 
 | 701 |  | 
 | 702 | static int __init | 
 | 703 | per_linux32_init(void) | 
 | 704 | { | 
 | 705 | 	ia32_exec_domain.name = "Linux/x86"; | 
 | 706 | 	ia32_exec_domain.handler = NULL; | 
 | 707 | 	ia32_exec_domain.pers_low = PER_LINUX32; | 
 | 708 | 	ia32_exec_domain.pers_high = PER_LINUX32; | 
 | 709 | 	ia32_exec_domain.signal_map = default_exec_domain.signal_map; | 
 | 710 | 	ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap; | 
 | 711 | 	register_exec_domain(&ia32_exec_domain); | 
 | 712 |  | 
 | 713 | 	return 0; | 
 | 714 | } | 
 | 715 |  | 
 | 716 | __initcall(per_linux32_init); |