| /* | 
 |  * File:         arch/blackfin/mm/init.c | 
 |  * Based on: | 
 |  * Author: | 
 |  * | 
 |  * Created: | 
 |  * Description: | 
 |  * | 
 |  * Modified: | 
 |  *               Copyright 2004-2007 Analog Devices Inc. | 
 |  * | 
 |  * Bugs:         Enter bugs at http://blackfin.uclinux.org/ | 
 |  * | 
 |  * This program is free software; you can redistribute it and/or modify | 
 |  * it under the terms of the GNU General Public License as published by | 
 |  * the Free Software Foundation; either version 2 of the License, or | 
 |  * (at your option) any later version. | 
 |  * | 
 |  * This program is distributed in the hope that it will be useful, | 
 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 |  * GNU General Public License for more details. | 
 |  * | 
 |  * You should have received a copy of the GNU General Public License | 
 |  * along with this program; if not, see the file COPYING, or write | 
 |  * to the Free Software Foundation, Inc., | 
 |  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA | 
 |  */ | 
 |  | 
 | #include <linux/swap.h> | 
 | #include <linux/bootmem.h> | 
 | #include <linux/uaccess.h> | 
 | #include <asm/bfin-global.h> | 
 | #include <asm/l1layout.h> | 
 | #include "blackfin_sram.h" | 
 |  | 
 | /* | 
 |  * BAD_PAGE is the page that is used for page faults when linux | 
 |  * is out-of-memory. Older versions of linux just did a | 
 |  * do_exit(), but using this instead means there is less risk | 
 |  * for a process dying in kernel mode, possibly leaving a inode | 
 |  * unused etc.. | 
 |  * | 
 |  * BAD_PAGETABLE is the accompanying page-table: it is initialized | 
 |  * to point to BAD_PAGE entries. | 
 |  * | 
 |  * ZERO_PAGE is a special page that is used for zero-initialized | 
 |  * data and COW. | 
 |  */ | 
 | static unsigned long empty_bad_page_table; | 
 |  | 
 | static unsigned long empty_bad_page; | 
 |  | 
 | unsigned long empty_zero_page; | 
 |  | 
 | void show_mem(void) | 
 | { | 
 | 	unsigned long i; | 
 | 	int free = 0, total = 0, reserved = 0, shared = 0; | 
 |  | 
 | 	int cached = 0; | 
 | 	printk(KERN_INFO "Mem-info:\n"); | 
 | 	show_free_areas(); | 
 | 	i = max_mapnr; | 
 | 	while (i-- > 0) { | 
 | 		total++; | 
 | 		if (PageReserved(mem_map + i)) | 
 | 			reserved++; | 
 | 		else if (PageSwapCache(mem_map + i)) | 
 | 			cached++; | 
 | 		else if (!page_count(mem_map + i)) | 
 | 			free++; | 
 | 		else | 
 | 			shared += page_count(mem_map + i) - 1; | 
 | 	} | 
 | 	printk(KERN_INFO "%d pages of RAM\n", total); | 
 | 	printk(KERN_INFO "%d free pages\n", free); | 
 | 	printk(KERN_INFO "%d reserved pages\n", reserved); | 
 | 	printk(KERN_INFO "%d pages shared\n", shared); | 
 | 	printk(KERN_INFO "%d pages swap cached\n", cached); | 
 | } | 
 |  | 
 | /* | 
 |  * paging_init() continues the virtual memory environment setup which | 
 |  * was begun by the code in arch/head.S. | 
 |  * The parameters are pointers to where to stick the starting and ending | 
 |  * addresses  of available kernel virtual memory. | 
 |  */ | 
 | void __init paging_init(void) | 
 | { | 
 | 	/* | 
 | 	 * make sure start_mem is page aligned,  otherwise bootmem and | 
 | 	 * page_alloc get different views og the world | 
 | 	 */ | 
 | 	unsigned long end_mem = memory_end & PAGE_MASK; | 
 |  | 
 | 	pr_debug("start_mem is %#lx   virtual_end is %#lx\n", PAGE_ALIGN(memory_start), end_mem); | 
 |  | 
 | 	/* | 
 | 	 * initialize the bad page table and bad page to point | 
 | 	 * to a couple of allocated pages | 
 | 	 */ | 
 | 	empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | 
 | 	empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | 
 | 	empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); | 
 | 	memset((void *)empty_zero_page, 0, PAGE_SIZE); | 
 |  | 
 | 	/* | 
 | 	 * Set up SFC/DFC registers (user data space) | 
 | 	 */ | 
 | 	set_fs(KERNEL_DS); | 
 |  | 
 | 	pr_debug("free_area_init -> start_mem is %#lx   virtual_end is %#lx\n", | 
 | 	        PAGE_ALIGN(memory_start), end_mem); | 
 |  | 
 | 	{ | 
 | 		unsigned long zones_size[MAX_NR_ZONES] = { 0, }; | 
 |  | 
 | 		zones_size[ZONE_DMA] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; | 
 | 		zones_size[ZONE_NORMAL] = 0; | 
 | #ifdef CONFIG_HIGHMEM | 
 | 		zones_size[ZONE_HIGHMEM] = 0; | 
 | #endif | 
 | 		free_area_init(zones_size); | 
 | 	} | 
 | } | 
 |  | 
 | void __init mem_init(void) | 
 | { | 
 | 	unsigned int codek = 0, datak = 0, initk = 0; | 
 | 	unsigned int reservedpages = 0, freepages = 0; | 
 | 	unsigned long tmp; | 
 | 	unsigned long start_mem = memory_start; | 
 | 	unsigned long end_mem = memory_end; | 
 |  | 
 | 	end_mem &= PAGE_MASK; | 
 | 	high_memory = (void *)end_mem; | 
 |  | 
 | 	start_mem = PAGE_ALIGN(start_mem); | 
 | 	max_mapnr = num_physpages = MAP_NR(high_memory); | 
 | 	printk(KERN_DEBUG "Kernel managed physical pages: %lu\n", num_physpages); | 
 |  | 
 | 	/* This will put all memory onto the freelists. */ | 
 | 	totalram_pages = free_all_bootmem(); | 
 |  | 
 | 	reservedpages = 0; | 
 | 	for (tmp = 0; tmp < max_mapnr; tmp++) | 
 | 		if (PageReserved(pfn_to_page(tmp))) | 
 | 			reservedpages++; | 
 | 	freepages =  max_mapnr - reservedpages; | 
 |  | 
 | 	/* do not count in kernel image between _rambase and _ramstart */ | 
 | 	reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT; | 
 | #if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) | 
 | 	reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT; | 
 | #endif | 
 |  | 
 | 	codek = (_etext - _stext) >> 10; | 
 | 	initk = (__init_end - __init_begin) >> 10; | 
 | 	datak = ((_ramstart - _rambase) >> 10) - codek - initk; | 
 |  | 
 | 	printk(KERN_INFO | 
 | 	     "Memory available: %luk/%luk RAM, " | 
 | 		"(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n", | 
 | 		(unsigned long) freepages << (PAGE_SHIFT-10), _ramend >> 10, | 
 | 		initk, codek, datak, DMA_UNCACHED_REGION >> 10, (reservedpages << (PAGE_SHIFT-10))); | 
 |  | 
 | 	/* Initialize the blackfin L1 Memory. */ | 
 | 	l1sram_init(); | 
 | 	l1_data_sram_init(); | 
 | 	l1_inst_sram_init(); | 
 |  | 
 | 	/* Allocate this once; never free it.  We assume this gives us a | 
 | 	   pointer to the start of L1 scratchpad memory; panic if it | 
 | 	   doesn't.  */ | 
 | 	tmp = (unsigned long)l1sram_alloc(sizeof(struct l1_scratch_task_info)); | 
 | 	if (tmp != (unsigned long)L1_SCRATCH_TASK_INFO) { | 
 | 		printk(KERN_EMERG "mem_init(): Did not get the right address from l1sram_alloc: %08lx != %08lx\n", | 
 | 			tmp, (unsigned long)L1_SCRATCH_TASK_INFO); | 
 | 		panic("No L1, time to give up\n"); | 
 | 	} | 
 | } | 
 |  | 
 | static void __init free_init_pages(const char *what, unsigned long begin, unsigned long end) | 
 | { | 
 | 	unsigned long addr; | 
 | 	/* next to check that the page we free is not a partial page */ | 
 | 	for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) { | 
 | 		ClearPageReserved(virt_to_page(addr)); | 
 | 		init_page_count(virt_to_page(addr)); | 
 | 		free_page(addr); | 
 | 		totalram_pages++; | 
 | 	} | 
 | 	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | 
 | } | 
 |  | 
 | #ifdef CONFIG_BLK_DEV_INITRD | 
 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 
 | { | 
 | #ifndef CONFIG_MPU | 
 | 	free_init_pages("initrd memory", start, end); | 
 | #endif | 
 | } | 
 | #endif | 
 |  | 
 | void __init_refok free_initmem(void) | 
 | { | 
 | #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU | 
 | 	free_init_pages("unused kernel memory", | 
 | 			(unsigned long)(&__init_begin), | 
 | 			(unsigned long)(&__init_end)); | 
 | #endif | 
 | } |