| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  *  PowerPC version | 
 | 3 |  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
 | 4 |  * | 
 | 5 |  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | 
 | 6 |  *  and Cort Dougan (PReP) (cort@cs.nmt.edu) | 
 | 7 |  *    Copyright (C) 1996 Paul Mackerras | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 8 |  * | 
 | 9 |  *  Derived from "arch/i386/mm/init.c" | 
 | 10 |  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
 | 11 |  * | 
 | 12 |  *  Dave Engebretsen <engebret@us.ibm.com> | 
 | 13 |  *      Rework for PPC64 port. | 
 | 14 |  * | 
 | 15 |  *  This program is free software; you can redistribute it and/or | 
 | 16 |  *  modify it under the terms of the GNU General Public License | 
 | 17 |  *  as published by the Free Software Foundation; either version | 
 | 18 |  *  2 of the License, or (at your option) any later version. | 
 | 19 |  * | 
 | 20 |  */ | 
 | 21 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 22 | #include <linux/signal.h> | 
 | 23 | #include <linux/sched.h> | 
 | 24 | #include <linux/kernel.h> | 
 | 25 | #include <linux/errno.h> | 
 | 26 | #include <linux/string.h> | 
 | 27 | #include <linux/types.h> | 
 | 28 | #include <linux/mman.h> | 
 | 29 | #include <linux/mm.h> | 
 | 30 | #include <linux/swap.h> | 
 | 31 | #include <linux/stddef.h> | 
 | 32 | #include <linux/vmalloc.h> | 
 | 33 | #include <linux/init.h> | 
 | 34 | #include <linux/delay.h> | 
 | 35 | #include <linux/bootmem.h> | 
 | 36 | #include <linux/highmem.h> | 
 | 37 | #include <linux/idr.h> | 
 | 38 | #include <linux/nodemask.h> | 
 | 39 | #include <linux/module.h> | 
| Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 40 | #include <linux/poison.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 41 |  | 
 | 42 | #include <asm/pgalloc.h> | 
 | 43 | #include <asm/page.h> | 
 | 44 | #include <asm/prom.h> | 
 | 45 | #include <asm/lmb.h> | 
 | 46 | #include <asm/rtas.h> | 
 | 47 | #include <asm/io.h> | 
 | 48 | #include <asm/mmu_context.h> | 
 | 49 | #include <asm/pgtable.h> | 
 | 50 | #include <asm/mmu.h> | 
 | 51 | #include <asm/uaccess.h> | 
 | 52 | #include <asm/smp.h> | 
 | 53 | #include <asm/machdep.h> | 
 | 54 | #include <asm/tlb.h> | 
 | 55 | #include <asm/eeh.h> | 
 | 56 | #include <asm/processor.h> | 
 | 57 | #include <asm/mmzone.h> | 
 | 58 | #include <asm/cputable.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 59 | #include <asm/sections.h> | 
 | 60 | #include <asm/system.h> | 
 | 61 | #include <asm/iommu.h> | 
 | 62 | #include <asm/abs_addr.h> | 
 | 63 | #include <asm/vdso.h> | 
| David Gibson | 800fc3e | 2005-11-16 15:43:48 +1100 | [diff] [blame] | 64 |  | 
 | 65 | #include "mmu_decl.h" | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 66 |  | 
 | 67 | #if PGTABLE_RANGE > USER_VSID_RANGE | 
 | 68 | #warning Limited user VSID range means pagetable space is wasted | 
 | 69 | #endif | 
 | 70 |  | 
 | 71 | #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) | 
 | 72 | #warning TASK_SIZE is smaller than it needs to be. | 
 | 73 | #endif | 
 | 74 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 75 | /* max amount of RAM to use */ | 
 | 76 | unsigned long __max_memory; | 
 | 77 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 78 | void free_initmem(void) | 
 | 79 | { | 
 | 80 | 	unsigned long addr; | 
 | 81 |  | 
 | 82 | 	addr = (unsigned long)__init_begin; | 
 | 83 | 	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { | 
| Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 84 | 		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 85 | 		ClearPageReserved(virt_to_page(addr)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 86 | 		init_page_count(virt_to_page(addr)); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 87 | 		free_page(addr); | 
 | 88 | 		totalram_pages++; | 
 | 89 | 	} | 
 | 90 | 	printk ("Freeing unused kernel memory: %luk freed\n", | 
 | 91 | 		((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); | 
 | 92 | } | 
 | 93 |  | 
 | 94 | #ifdef CONFIG_BLK_DEV_INITRD | 
 | 95 | void free_initrd_mem(unsigned long start, unsigned long end) | 
 | 96 | { | 
 | 97 | 	if (start < end) | 
 | 98 | 		printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | 
 | 99 | 	for (; start < end; start += PAGE_SIZE) { | 
 | 100 | 		ClearPageReserved(virt_to_page(start)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 101 | 		init_page_count(virt_to_page(start)); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 102 | 		free_page(start); | 
 | 103 | 		totalram_pages++; | 
 | 104 | 	} | 
 | 105 | } | 
 | 106 | #endif | 
 | 107 |  | 
| Ed Swarthout | df174e3 | 2007-09-21 12:53:02 +1000 | [diff] [blame] | 108 | #ifdef CONFIG_PROC_KCORE | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 109 | static struct kcore_list kcore_vmem; | 
 | 110 |  | 
 | 111 | static int __init setup_kcore(void) | 
 | 112 | { | 
 | 113 | 	int i; | 
 | 114 |  | 
 | 115 | 	for (i=0; i < lmb.memory.cnt; i++) { | 
 | 116 | 		unsigned long base, size; | 
 | 117 | 		struct kcore_list *kcore_mem; | 
 | 118 |  | 
 | 119 | 		base = lmb.memory.region[i].base; | 
 | 120 | 		size = lmb.memory.region[i].size; | 
 | 121 |  | 
 | 122 | 		/* GFP_ATOMIC to avoid might_sleep warnings during boot */ | 
 | 123 | 		kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); | 
 | 124 | 		if (!kcore_mem) | 
| Geert Uytterhoeven | adaa3a7 | 2006-11-17 06:21:12 +0100 | [diff] [blame] | 125 | 			panic("%s: kmalloc failed\n", __FUNCTION__); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 126 |  | 
 | 127 | 		kclist_add(kcore_mem, __va(base), size); | 
 | 128 | 	} | 
 | 129 |  | 
 | 130 | 	kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); | 
 | 131 |  | 
 | 132 | 	return 0; | 
 | 133 | } | 
 | 134 | module_init(setup_kcore); | 
| Ed Swarthout | df174e3 | 2007-09-21 12:53:02 +1000 | [diff] [blame] | 135 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 136 |  | 
| Christoph Lameter | 4ba9b9d | 2007-10-16 23:25:51 -0700 | [diff] [blame] | 137 | static void zero_ctor(struct kmem_cache *cache, void *addr) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 138 | { | 
 | 139 | 	memset(addr, 0, kmem_cache_size(cache)); | 
 | 140 | } | 
 | 141 |  | 
| Benjamin Herrenschmidt | 87655ff | 2005-11-10 14:53:16 +1100 | [diff] [blame] | 142 | static const unsigned int pgtable_cache_size[2] = { | 
| Hugh Dickins | 517e226 | 2007-05-09 14:38:48 +1000 | [diff] [blame] | 143 | 	PGD_TABLE_SIZE, PMD_TABLE_SIZE | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 144 | }; | 
 | 145 | static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { | 
| Hugh Dickins | 517e226 | 2007-05-09 14:38:48 +1000 | [diff] [blame] | 146 | #ifdef CONFIG_PPC_64K_PAGES | 
 | 147 | 	"pgd_cache", "pmd_cache", | 
 | 148 | #else | 
 | 149 | 	"pgd_cache", "pud_pmd_cache", | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 150 | #endif /* CONFIG_PPC_64K_PAGES */ | 
| Hugh Dickins | 517e226 | 2007-05-09 14:38:48 +1000 | [diff] [blame] | 151 | }; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 152 |  | 
| David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 153 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 154 | /* Hugepages need one extra cache, initialized in hugetlbpage.c.  We | 
 | 155 |  * can't put into the tables above, because HPAGE_SHIFT is not compile | 
 | 156 |  * time constant. */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 157 | struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1]; | 
| David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 158 | #else | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 159 | struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; | 
| David Gibson | f10a04c | 2006-04-28 15:02:51 +1000 | [diff] [blame] | 160 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 161 |  | 
 | 162 | void pgtable_cache_init(void) | 
 | 163 | { | 
 | 164 | 	int i; | 
 | 165 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 166 | 	for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { | 
 | 167 | 		int size = pgtable_cache_size[i]; | 
 | 168 | 		const char *name = pgtable_cache_name[i]; | 
 | 169 |  | 
| Stephen Rothwell | 6548d83 | 2007-11-13 15:41:49 +1100 | [diff] [blame] | 170 | 		pr_debug("Allocating page table cache %s (#%d) " | 
 | 171 | 			"for size: %08x...\n", name, i, size); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 172 | 		pgtable_cache[i] = kmem_cache_create(name, | 
 | 173 | 						     size, size, | 
| Akinobu Mita | 0e6b9c9 | 2007-05-08 00:23:13 -0700 | [diff] [blame] | 174 | 						     SLAB_PANIC, | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 175 | 						     zero_ctor); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 176 | 	} | 
 | 177 | } | 
| Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 178 |  | 
 | 179 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 
 | 180 | /* | 
 | 181 |  * Given an address within the vmemmap, determine the pfn of the page that | 
 | 182 |  * represents the start of the section it is within.  Note that we have to | 
 | 183 |  * do this by hand as the proffered address may not be correctly aligned. | 
 | 184 |  * Subtraction of non-aligned pointers produces undefined results. | 
 | 185 |  */ | 
 | 186 | unsigned long __meminit vmemmap_section_start(unsigned long page) | 
 | 187 | { | 
 | 188 | 	unsigned long offset = page - ((unsigned long)(vmemmap)); | 
 | 189 |  | 
 | 190 | 	/* Return the pfn of the start of the section. */ | 
 | 191 | 	return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; | 
 | 192 | } | 
 | 193 |  | 
 | 194 | /* | 
 | 195 |  * Check if this vmemmap page is already initialised.  If any section | 
 | 196 |  * which overlaps this vmemmap page is initialised then this page is | 
 | 197 |  * initialised already. | 
 | 198 |  */ | 
 | 199 | int __meminit vmemmap_populated(unsigned long start, int page_size) | 
 | 200 | { | 
 | 201 | 	unsigned long end = start + page_size; | 
 | 202 |  | 
 | 203 | 	for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) | 
 | 204 | 		if (pfn_valid(vmemmap_section_start(start))) | 
 | 205 | 			return 1; | 
 | 206 |  | 
 | 207 | 	return 0; | 
 | 208 | } | 
 | 209 |  | 
 | 210 | int __meminit vmemmap_populate(struct page *start_page, | 
 | 211 | 					unsigned long nr_pages, int node) | 
 | 212 | { | 
 | 213 | 	unsigned long mode_rw; | 
 | 214 | 	unsigned long start = (unsigned long)start_page; | 
 | 215 | 	unsigned long end = (unsigned long)(start_page + nr_pages); | 
 | 216 | 	unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift; | 
 | 217 |  | 
 | 218 | 	mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; | 
 | 219 |  | 
 | 220 | 	/* Align to the page size of the linear mapping. */ | 
 | 221 | 	start = _ALIGN_DOWN(start, page_size); | 
 | 222 |  | 
 | 223 | 	for (; start < end; start += page_size) { | 
 | 224 | 		int mapped; | 
 | 225 | 		void *p; | 
 | 226 |  | 
 | 227 | 		if (vmemmap_populated(start, page_size)) | 
 | 228 | 			continue; | 
 | 229 |  | 
 | 230 | 		p = vmemmap_alloc_block(page_size, node); | 
 | 231 | 		if (!p) | 
 | 232 | 			return -ENOMEM; | 
 | 233 |  | 
| Stephen Rothwell | 6548d83 | 2007-11-13 15:41:49 +1100 | [diff] [blame] | 234 | 		pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n", | 
 | 235 | 			start, p, __pa(p)); | 
| Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 236 |  | 
 | 237 | 		mapped = htab_bolt_mapping(start, start + page_size, | 
| Anton Blanchard | e95206a | 2007-10-16 14:57:06 -0500 | [diff] [blame] | 238 | 					__pa(p), mode_rw, mmu_linear_psize, | 
 | 239 | 					mmu_kernel_ssize); | 
| Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 240 | 		BUG_ON(mapped < 0); | 
 | 241 | 	} | 
 | 242 |  | 
 | 243 | 	return 0; | 
 | 244 | } | 
 | 245 | #endif |