| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: init.c,v 1.19 2004/02/21 04:42:16 kkojima Exp $ | 
 | 2 |  * | 
 | 3 |  *  linux/arch/sh/mm/init.c | 
 | 4 |  * | 
 | 5 |  *  Copyright (C) 1999  Niibe Yutaka | 
 | 6 |  *  Copyright (C) 2002, 2004  Paul Mundt | 
 | 7 |  * | 
 | 8 |  *  Based on linux/arch/i386/mm/init.c: | 
 | 9 |  *   Copyright (C) 1995  Linus Torvalds | 
 | 10 |  */ | 
 | 11 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/signal.h> | 
 | 13 | #include <linux/sched.h> | 
 | 14 | #include <linux/kernel.h> | 
 | 15 | #include <linux/errno.h> | 
 | 16 | #include <linux/string.h> | 
 | 17 | #include <linux/types.h> | 
 | 18 | #include <linux/ptrace.h> | 
 | 19 | #include <linux/mman.h> | 
 | 20 | #include <linux/mm.h> | 
 | 21 | #include <linux/swap.h> | 
 | 22 | #include <linux/smp.h> | 
 | 23 | #include <linux/init.h> | 
 | 24 | #include <linux/highmem.h> | 
 | 25 | #include <linux/bootmem.h> | 
 | 26 | #include <linux/pagemap.h> | 
| Paul Mundt | 2cb7ce3 | 2006-09-27 18:20:58 +0900 | [diff] [blame] | 27 | #include <linux/proc_fs.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <asm/processor.h> | 
 | 29 | #include <asm/system.h> | 
 | 30 | #include <asm/uaccess.h> | 
 | 31 | #include <asm/pgtable.h> | 
 | 32 | #include <asm/pgalloc.h> | 
 | 33 | #include <asm/mmu_context.h> | 
 | 34 | #include <asm/io.h> | 
 | 35 | #include <asm/tlb.h> | 
 | 36 | #include <asm/cacheflush.h> | 
 | 37 | #include <asm/cache.h> | 
 | 38 |  | 
 | 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
 | 40 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 
 | 41 |  | 
 | 42 | /* | 
 | 43 |  * Cache of MMU context last used. | 
 | 44 |  */ | 
 | 45 | unsigned long mmu_context_cache = NO_CONTEXT; | 
 | 46 |  | 
 | 47 | #ifdef CONFIG_MMU | 
 | 48 | /* It'd be good if these lines were in the standard header file. */ | 
 | 49 | #define START_PFN	(NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) | 
 | 50 | #define MAX_LOW_PFN	(NODE_DATA(0)->bdata->node_low_pfn) | 
 | 51 | #endif | 
 | 52 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | void (*copy_page)(void *from, void *to); | 
 | 54 | void (*clear_page)(void *to); | 
 | 55 |  | 
 | 56 | void show_mem(void) | 
 | 57 | { | 
 | 58 | 	int i, total = 0, reserved = 0; | 
 | 59 | 	int shared = 0, cached = 0; | 
 | 60 |  | 
 | 61 | 	printk("Mem-info:\n"); | 
 | 62 | 	show_free_areas(); | 
 | 63 | 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 
 | 64 | 	i = max_mapnr; | 
 | 65 | 	while (i-- > 0) { | 
 | 66 | 		total++; | 
 | 67 | 		if (PageReserved(mem_map+i)) | 
 | 68 | 			reserved++; | 
 | 69 | 		else if (PageSwapCache(mem_map+i)) | 
 | 70 | 			cached++; | 
 | 71 | 		else if (page_count(mem_map+i)) | 
 | 72 | 			shared += page_count(mem_map+i) - 1; | 
 | 73 | 	} | 
 | 74 | 	printk("%d pages of RAM\n",total); | 
 | 75 | 	printk("%d reserved pages\n",reserved); | 
 | 76 | 	printk("%d pages shared\n",shared); | 
 | 77 | 	printk("%d pages swap cached\n",cached); | 
 | 78 | } | 
 | 79 |  | 
 | 80 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | 
 | 81 | { | 
 | 82 | 	pgd_t *pgd; | 
| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 83 | 	pud_t *pud; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | 	pmd_t *pmd; | 
 | 85 | 	pte_t *pte; | 
 | 86 |  | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 87 | 	pgd = pgd_offset_k(addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | 	if (pgd_none(*pgd)) { | 
 | 89 | 		pgd_ERROR(*pgd); | 
 | 90 | 		return; | 
 | 91 | 	} | 
 | 92 |  | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 93 | 	pud = pud_alloc(NULL, pgd, addr); | 
 | 94 | 	if (unlikely(!pud)) { | 
 | 95 | 		pud_ERROR(*pud); | 
 | 96 | 		return; | 
| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 97 | 	} | 
 | 98 |  | 
| Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 99 | 	pmd = pmd_alloc(NULL, pud, addr); | 
 | 100 | 	if (unlikely(!pmd)) { | 
 | 101 | 		pmd_ERROR(*pmd); | 
 | 102 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | 	} | 
 | 104 |  | 
 | 105 | 	pte = pte_offset_kernel(pmd, addr); | 
 | 106 | 	if (!pte_none(*pte)) { | 
 | 107 | 		pte_ERROR(*pte); | 
 | 108 | 		return; | 
 | 109 | 	} | 
 | 110 |  | 
 | 111 | 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | 
 | 112 |  | 
 | 113 | 	__flush_tlb_page(get_asid(), addr); | 
 | 114 | } | 
 | 115 |  | 
 | 116 | /* | 
 | 117 |  * As a performance optimization, other platforms preserve the fixmap mapping | 
 | 118 |  * across a context switch, we don't presently do this, but this could be done | 
 | 119 |  * in a similar fashion as to the wired TLB interface that sh64 uses (by way | 
 | 120 |  * of the memorry mapped UTLB configuration) -- this unfortunately forces us to | 
 | 121 |  * give up a TLB entry for each mapping we want to preserve. While this may be | 
 | 122 |  * viable for a small number of fixmaps, it's not particularly useful for | 
 | 123 |  * everything and needs to be carefully evaluated. (ie, we may want this for | 
 | 124 |  * the vsyscall page). | 
 | 125 |  * | 
 | 126 |  * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass | 
 | 127 |  * in at __set_fixmap() time to determine the appropriate behavior to follow. | 
 | 128 |  * | 
 | 129 |  *					 -- PFM. | 
 | 130 |  */ | 
 | 131 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | 
 | 132 | { | 
 | 133 | 	unsigned long address = __fix_to_virt(idx); | 
 | 134 |  | 
 | 135 | 	if (idx >= __end_of_fixed_addresses) { | 
 | 136 | 		BUG(); | 
 | 137 | 		return; | 
 | 138 | 	} | 
 | 139 |  | 
 | 140 | 	set_pte_phys(address, phys, prot); | 
 | 141 | } | 
 | 142 |  | 
 | 143 | /* References to section boundaries */ | 
 | 144 |  | 
 | 145 | extern char _text, _etext, _edata, __bss_start, _end; | 
 | 146 | extern char __init_begin, __init_end; | 
 | 147 |  | 
 | 148 | /* | 
 | 149 |  * paging_init() sets up the page tables | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 |  */ | 
 | 151 | void __init paging_init(void) | 
 | 152 | { | 
 | 153 | 	unsigned long zones_size[MAX_NR_ZONES] = { 0, }; | 
 | 154 |  | 
 | 155 | 	/* | 
 | 156 | 	 * Setup some defaults for the zone sizes.. these should be safe | 
 | 157 | 	 * regardless of distcontiguous memory or MMU settings. | 
 | 158 | 	 */ | 
 | 159 | 	zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; | 
 | 160 | 	zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; | 
 | 161 | #ifdef CONFIG_HIGHMEM | 
 | 162 | 	zones_size[ZONE_HIGHMEM] = 0 >> PAGE_SHIFT; | 
 | 163 | #endif | 
 | 164 |  | 
 | 165 | #ifdef CONFIG_MMU | 
 | 166 | 	/* | 
 | 167 | 	 * If we have an MMU, and want to be using it .. we need to adjust | 
 | 168 | 	 * the zone sizes accordingly, in addition to turning it on. | 
 | 169 | 	 */ | 
 | 170 | 	{ | 
 | 171 | 		unsigned long max_dma, low, start_pfn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 |  | 
| Stuart Menefy | 6e4662f | 2006-11-21 13:53:44 +0900 | [diff] [blame] | 173 | 		/* We don't need to map the kernel through the TLB, as | 
 | 174 | 		 * it is permanatly mapped using P1. So clear the | 
 | 175 | 		 * entire pgd. */ | 
 | 176 | 		memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 |  | 
 | 178 | 		/* Turn on the MMU */ | 
 | 179 | 		enable_mmu(); | 
 | 180 |  | 
 | 181 | 		/* Fixup the zone sizes */ | 
 | 182 | 		start_pfn = START_PFN; | 
 | 183 | 		max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 
 | 184 | 		low = MAX_LOW_PFN; | 
 | 185 |  | 
 | 186 | 		if (low < max_dma) { | 
 | 187 | 			zones_size[ZONE_DMA] = low - start_pfn; | 
 | 188 | 			zones_size[ZONE_NORMAL] = 0; | 
 | 189 | 		} else { | 
 | 190 | 			zones_size[ZONE_DMA] = max_dma - start_pfn; | 
 | 191 | 			zones_size[ZONE_NORMAL] = low - max_dma; | 
 | 192 | 		} | 
 | 193 | 	} | 
 | 194 |  | 
| Stuart Menefy | 6e4662f | 2006-11-21 13:53:44 +0900 | [diff] [blame] | 195 | 	/* Set an initial value for the MMU.TTB so we don't have to | 
 | 196 | 	 * check for a null value. */ | 
 | 197 | 	set_TTB(swapper_pg_dir); | 
 | 198 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) | 
 | 200 | 	/* | 
 | 201 | 	 * If we don't have CONFIG_MMU set and the processor in question | 
 | 202 | 	 * still has an MMU, care needs to be taken to make sure it doesn't | 
 | 203 | 	 * stay on.. Since the boot loader could have potentially already | 
 | 204 | 	 * turned it on, and we clearly don't want it, we simply turn it off. | 
 | 205 | 	 * | 
 | 206 | 	 * We don't need to do anything special for the zone sizes, since the | 
 | 207 | 	 * default values that were already configured up above should be | 
 | 208 | 	 * satisfactory. | 
 | 209 | 	 */ | 
 | 210 | 	disable_mmu(); | 
 | 211 | #endif | 
 | 212 | 	NODE_DATA(0)->node_mem_map = NULL; | 
 | 213 | 	free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | } | 
 | 215 |  | 
| Paul Mundt | 2cb7ce3 | 2006-09-27 18:20:58 +0900 | [diff] [blame] | 216 | static struct kcore_list kcore_mem, kcore_vmalloc; | 
 | 217 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | void __init mem_init(void) | 
 | 219 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | 	int codesize, reservedpages, datasize, initsize; | 
 | 221 | 	int tmp; | 
 | 222 | 	extern unsigned long memory_start; | 
 | 223 |  | 
 | 224 | #ifdef CONFIG_MMU | 
 | 225 | 	high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE); | 
 | 226 | #else | 
 | 227 | 	extern unsigned long memory_end; | 
 | 228 |  | 
 | 229 | 	high_memory = (void *)(memory_end & PAGE_MASK); | 
 | 230 | #endif | 
 | 231 |  | 
 | 232 | 	max_mapnr = num_physpages = MAP_NR(high_memory) - MAP_NR(memory_start); | 
 | 233 |  | 
 | 234 | 	/* clear the zero-page */ | 
 | 235 | 	memset(empty_zero_page, 0, PAGE_SIZE); | 
 | 236 | 	__flush_wback_region(empty_zero_page, PAGE_SIZE); | 
 | 237 |  | 
| Paul Mundt | 65463b7 | 2005-11-07 00:58:24 -0800 | [diff] [blame] | 238 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | 	 * Setup wrappers for copy/clear_page(), these will get overridden | 
 | 240 | 	 * later in the boot process if a better method is available. | 
 | 241 | 	 */ | 
| Yoshinori Sato | e96636c | 2006-09-27 17:21:02 +0900 | [diff] [blame] | 242 | #ifdef CONFIG_MMU | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | 	copy_page = copy_page_slow; | 
 | 244 | 	clear_page = clear_page_slow; | 
| Yoshinori Sato | e96636c | 2006-09-27 17:21:02 +0900 | [diff] [blame] | 245 | #else | 
 | 246 | 	copy_page = copy_page_nommu; | 
 | 247 | 	clear_page = clear_page_nommu; | 
 | 248 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 |  | 
 | 250 | 	/* this will put all low memory onto the freelists */ | 
 | 251 | 	totalram_pages += free_all_bootmem_node(NODE_DATA(0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | 	reservedpages = 0; | 
 | 253 | 	for (tmp = 0; tmp < num_physpages; tmp++) | 
 | 254 | 		/* | 
 | 255 | 		 * Only count reserved RAM pages | 
 | 256 | 		 */ | 
 | 257 | 		if (PageReserved(mem_map+tmp)) | 
 | 258 | 			reservedpages++; | 
 | 259 |  | 
 | 260 | 	codesize =  (unsigned long) &_etext - (unsigned long) &_text; | 
 | 261 | 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext; | 
 | 262 | 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin; | 
 | 263 |  | 
| Paul Mundt | 2cb7ce3 | 2006-09-27 18:20:58 +0900 | [diff] [blame] | 264 | 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | 
 | 265 | 	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | 
 | 266 | 		   VMALLOC_END - VMALLOC_START); | 
 | 267 |  | 
 | 268 | 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | 
 | 269 | 	       "%dk reserved, %dk data, %dk init)\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 
 | 271 | 		max_mapnr << (PAGE_SHIFT-10), | 
 | 272 | 		codesize >> 10, | 
 | 273 | 		reservedpages << (PAGE_SHIFT-10), | 
 | 274 | 		datasize >> 10, | 
 | 275 | 		initsize >> 10); | 
 | 276 |  | 
 | 277 | 	p3_cache_init(); | 
| Paul Mundt | 19f9a34 | 2006-09-27 18:33:49 +0900 | [diff] [blame] | 278 |  | 
 | 279 | 	/* Initialize the vDSO */ | 
 | 280 | 	vsyscall_init(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | } | 
 | 282 |  | 
 | 283 | void free_initmem(void) | 
 | 284 | { | 
 | 285 | 	unsigned long addr; | 
| Paul Mundt | 65463b7 | 2005-11-07 00:58:24 -0800 | [diff] [blame] | 286 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | 	addr = (unsigned long)(&__init_begin); | 
 | 288 | 	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 
 | 289 | 		ClearPageReserved(virt_to_page(addr)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 290 | 		init_page_count(virt_to_page(addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | 		free_page(addr); | 
 | 292 | 		totalram_pages++; | 
 | 293 | 	} | 
 | 294 | 	printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); | 
 | 295 | } | 
 | 296 |  | 
 | 297 | #ifdef CONFIG_BLK_DEV_INITRD | 
 | 298 | void free_initrd_mem(unsigned long start, unsigned long end) | 
 | 299 | { | 
 | 300 | 	unsigned long p; | 
 | 301 | 	for (p = start; p < end; p += PAGE_SIZE) { | 
 | 302 | 		ClearPageReserved(virt_to_page(p)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 303 | 		init_page_count(virt_to_page(p)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | 		free_page(p); | 
 | 305 | 		totalram_pages++; | 
 | 306 | 	} | 
 | 307 | 	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | 
 | 308 | } | 
 | 309 | #endif | 
 | 310 |  |