| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/parisc/mm/init.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1995	Linus Torvalds | 
|  | 5 | *  Copyright 1999 SuSE GmbH | 
|  | 6 | *    changed by Philipp Rumpf | 
|  | 7 | *  Copyright 1999 Philipp Rumpf (prumpf@tux.org) | 
|  | 8 | *  Copyright 2004 Randolph Chung (tausq@debian.org) | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 9 | *  Copyright 2006 Helge Deller (deller@gmx.de) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * | 
|  | 11 | */ | 
|  | 12 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 |  | 
|  | 14 | #include <linux/module.h> | 
|  | 15 | #include <linux/mm.h> | 
|  | 16 | #include <linux/bootmem.h> | 
|  | 17 | #include <linux/delay.h> | 
|  | 18 | #include <linux/init.h> | 
|  | 19 | #include <linux/pci.h>		/* for hppa_dma_ops and pcxl_dma_ops */ | 
|  | 20 | #include <linux/initrd.h> | 
|  | 21 | #include <linux/swap.h> | 
|  | 22 | #include <linux/unistd.h> | 
|  | 23 | #include <linux/nodemask.h>	/* for node_online_map */ | 
|  | 24 | #include <linux/pagemap.h>	/* for release_pages and page_cache_release */ | 
|  | 25 |  | 
|  | 26 | #include <asm/pgalloc.h> | 
|  | 27 | #include <asm/tlb.h> | 
|  | 28 | #include <asm/pdc_chassis.h> | 
|  | 29 | #include <asm/mmzone.h> | 
| Heiko Carstens | a581c2a | 2006-07-01 04:36:30 -0700 | [diff] [blame] | 30 | #include <asm/sections.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 |  | 
|  | 32 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 
|  | 33 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | extern int  data_start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  | 
|  | 36 | #ifdef CONFIG_DISCONTIGMEM | 
| Helge Deller | 8039de1 | 2006-01-10 20:35:03 -0500 | [diff] [blame] | 37 | struct node_map_data node_data[MAX_NUMNODES] __read_mostly; | 
|  | 38 | bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly; | 
|  | 39 | unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #endif | 
|  | 41 |  | 
|  | 42 | static struct resource data_resource = { | 
|  | 43 | .name	= "Kernel data", | 
|  | 44 | .flags	= IORESOURCE_BUSY | IORESOURCE_MEM, | 
|  | 45 | }; | 
|  | 46 |  | 
|  | 47 | static struct resource code_resource = { | 
|  | 48 | .name	= "Kernel code", | 
|  | 49 | .flags	= IORESOURCE_BUSY | IORESOURCE_MEM, | 
|  | 50 | }; | 
|  | 51 |  | 
|  | 52 | static struct resource pdcdata_resource = { | 
|  | 53 | .name	= "PDC data (Page Zero)", | 
|  | 54 | .start	= 0, | 
|  | 55 | .end	= 0x9ff, | 
|  | 56 | .flags	= IORESOURCE_BUSY | IORESOURCE_MEM, | 
|  | 57 | }; | 
|  | 58 |  | 
| Helge Deller | 8039de1 | 2006-01-10 20:35:03 -0500 | [diff] [blame] | 59 | static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 |  | 
|  | 61 | /* The following array is initialized from the firmware specific | 
|  | 62 | * information retrieved in kernel/inventory.c. | 
|  | 63 | */ | 
|  | 64 |  | 
| Helge Deller | 8039de1 | 2006-01-10 20:35:03 -0500 | [diff] [blame] | 65 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; | 
|  | 66 | int npmem_ranges __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 |  | 
|  | 68 | #ifdef __LP64__ | 
|  | 69 | #define MAX_MEM         (~0UL) | 
|  | 70 | #else /* !__LP64__ */ | 
|  | 71 | #define MAX_MEM         (3584U*1024U*1024U) | 
|  | 72 | #endif /* !__LP64__ */ | 
|  | 73 |  | 
| Helge Deller | 8039de1 | 2006-01-10 20:35:03 -0500 | [diff] [blame] | 74 | static unsigned long mem_limit __read_mostly = MAX_MEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 |  | 
|  | 76 | static void __init mem_limit_func(void) | 
|  | 77 | { | 
|  | 78 | char *cp, *end; | 
|  | 79 | unsigned long limit; | 
|  | 80 | extern char saved_command_line[]; | 
|  | 81 |  | 
|  | 82 | /* We need this before __setup() functions are called */ | 
|  | 83 |  | 
|  | 84 | limit = MAX_MEM; | 
|  | 85 | for (cp = saved_command_line; *cp; ) { | 
|  | 86 | if (memcmp(cp, "mem=", 4) == 0) { | 
|  | 87 | cp += 4; | 
|  | 88 | limit = memparse(cp, &end); | 
|  | 89 | if (end != cp) | 
|  | 90 | break; | 
|  | 91 | cp = end; | 
|  | 92 | } else { | 
|  | 93 | while (*cp != ' ' && *cp) | 
|  | 94 | ++cp; | 
|  | 95 | while (*cp == ' ') | 
|  | 96 | ++cp; | 
|  | 97 | } | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | if (limit < mem_limit) | 
|  | 101 | mem_limit = limit; | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) | 
|  | 105 |  | 
|  | 106 | static void __init setup_bootmem(void) | 
|  | 107 | { | 
|  | 108 | unsigned long bootmap_size; | 
|  | 109 | unsigned long mem_max; | 
|  | 110 | unsigned long bootmap_pages; | 
|  | 111 | unsigned long bootmap_start_pfn; | 
|  | 112 | unsigned long bootmap_pfn; | 
|  | 113 | #ifndef CONFIG_DISCONTIGMEM | 
|  | 114 | physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; | 
|  | 115 | int npmem_holes; | 
|  | 116 | #endif | 
|  | 117 | int i, sysram_resource_count; | 
|  | 118 |  | 
|  | 119 | disable_sr_hashing(); /* Turn off space register hashing */ | 
|  | 120 |  | 
|  | 121 | /* | 
|  | 122 | * Sort the ranges. Since the number of ranges is typically | 
|  | 123 | * small, and performance is not an issue here, just do | 
|  | 124 | * a simple insertion sort. | 
|  | 125 | */ | 
|  | 126 |  | 
|  | 127 | for (i = 1; i < npmem_ranges; i++) { | 
|  | 128 | int j; | 
|  | 129 |  | 
|  | 130 | for (j = i; j > 0; j--) { | 
|  | 131 | unsigned long tmp; | 
|  | 132 |  | 
|  | 133 | if (pmem_ranges[j-1].start_pfn < | 
|  | 134 | pmem_ranges[j].start_pfn) { | 
|  | 135 |  | 
|  | 136 | break; | 
|  | 137 | } | 
|  | 138 | tmp = pmem_ranges[j-1].start_pfn; | 
|  | 139 | pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn; | 
|  | 140 | pmem_ranges[j].start_pfn = tmp; | 
|  | 141 | tmp = pmem_ranges[j-1].pages; | 
|  | 142 | pmem_ranges[j-1].pages = pmem_ranges[j].pages; | 
|  | 143 | pmem_ranges[j].pages = tmp; | 
|  | 144 | } | 
|  | 145 | } | 
|  | 146 |  | 
|  | 147 | #ifndef CONFIG_DISCONTIGMEM | 
|  | 148 | /* | 
|  | 149 | * Throw out ranges that are too far apart (controlled by | 
|  | 150 | * MAX_GAP). | 
|  | 151 | */ | 
|  | 152 |  | 
|  | 153 | for (i = 1; i < npmem_ranges; i++) { | 
|  | 154 | if (pmem_ranges[i].start_pfn - | 
|  | 155 | (pmem_ranges[i-1].start_pfn + | 
|  | 156 | pmem_ranges[i-1].pages) > MAX_GAP) { | 
|  | 157 | npmem_ranges = i; | 
|  | 158 | printk("Large gap in memory detected (%ld pages). " | 
|  | 159 | "Consider turning on CONFIG_DISCONTIGMEM\n", | 
|  | 160 | pmem_ranges[i].start_pfn - | 
|  | 161 | (pmem_ranges[i-1].start_pfn + | 
|  | 162 | pmem_ranges[i-1].pages)); | 
|  | 163 | break; | 
|  | 164 | } | 
|  | 165 | } | 
|  | 166 | #endif | 
|  | 167 |  | 
|  | 168 | if (npmem_ranges > 1) { | 
|  | 169 |  | 
|  | 170 | /* Print the memory ranges */ | 
|  | 171 |  | 
|  | 172 | printk(KERN_INFO "Memory Ranges:\n"); | 
|  | 173 |  | 
|  | 174 | for (i = 0; i < npmem_ranges; i++) { | 
|  | 175 | unsigned long start; | 
|  | 176 | unsigned long size; | 
|  | 177 |  | 
|  | 178 | size = (pmem_ranges[i].pages << PAGE_SHIFT); | 
|  | 179 | start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); | 
|  | 180 | printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", | 
|  | 181 | i,start, start + (size - 1), size >> 20); | 
|  | 182 | } | 
|  | 183 | } | 
|  | 184 |  | 
|  | 185 | sysram_resource_count = npmem_ranges; | 
|  | 186 | for (i = 0; i < sysram_resource_count; i++) { | 
|  | 187 | struct resource *res = &sysram_resources[i]; | 
|  | 188 | res->name = "System RAM"; | 
|  | 189 | res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; | 
|  | 190 | res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; | 
|  | 191 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 
|  | 192 | request_resource(&iomem_resource, res); | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | /* | 
|  | 196 | * For 32 bit kernels we limit the amount of memory we can | 
|  | 197 | * support, in order to preserve enough kernel address space | 
|  | 198 | * for other purposes. For 64 bit kernels we don't normally | 
|  | 199 | * limit the memory, but this mechanism can be used to | 
|  | 200 | * artificially limit the amount of memory (and it is written | 
|  | 201 | * to work with multiple memory ranges). | 
|  | 202 | */ | 
|  | 203 |  | 
|  | 204 | mem_limit_func();       /* check for "mem=" argument */ | 
|  | 205 |  | 
|  | 206 | mem_max = 0; | 
|  | 207 | num_physpages = 0; | 
|  | 208 | for (i = 0; i < npmem_ranges; i++) { | 
|  | 209 | unsigned long rsize; | 
|  | 210 |  | 
|  | 211 | rsize = pmem_ranges[i].pages << PAGE_SHIFT; | 
|  | 212 | if ((mem_max + rsize) > mem_limit) { | 
|  | 213 | printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); | 
|  | 214 | if (mem_max == mem_limit) | 
|  | 215 | npmem_ranges = i; | 
|  | 216 | else { | 
|  | 217 | pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT) | 
|  | 218 | - (mem_max >> PAGE_SHIFT); | 
|  | 219 | npmem_ranges = i + 1; | 
|  | 220 | mem_max = mem_limit; | 
|  | 221 | } | 
|  | 222 | num_physpages += pmem_ranges[i].pages; | 
|  | 223 | break; | 
|  | 224 | } | 
|  | 225 | num_physpages += pmem_ranges[i].pages; | 
|  | 226 | mem_max += rsize; | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); | 
|  | 230 |  | 
|  | 231 | #ifndef CONFIG_DISCONTIGMEM | 
|  | 232 | /* Merge the ranges, keeping track of the holes */ | 
|  | 233 |  | 
|  | 234 | { | 
|  | 235 | unsigned long end_pfn; | 
|  | 236 | unsigned long hole_pages; | 
|  | 237 |  | 
|  | 238 | npmem_holes = 0; | 
|  | 239 | end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; | 
|  | 240 | for (i = 1; i < npmem_ranges; i++) { | 
|  | 241 |  | 
|  | 242 | hole_pages = pmem_ranges[i].start_pfn - end_pfn; | 
|  | 243 | if (hole_pages) { | 
|  | 244 | pmem_holes[npmem_holes].start_pfn = end_pfn; | 
|  | 245 | pmem_holes[npmem_holes++].pages = hole_pages; | 
|  | 246 | end_pfn += hole_pages; | 
|  | 247 | } | 
|  | 248 | end_pfn += pmem_ranges[i].pages; | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; | 
|  | 252 | npmem_ranges = 1; | 
|  | 253 | } | 
|  | 254 | #endif | 
|  | 255 |  | 
|  | 256 | bootmap_pages = 0; | 
|  | 257 | for (i = 0; i < npmem_ranges; i++) | 
|  | 258 | bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages); | 
|  | 259 |  | 
|  | 260 | bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT; | 
|  | 261 |  | 
|  | 262 | #ifdef CONFIG_DISCONTIGMEM | 
|  | 263 | for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { | 
|  | 264 | memset(NODE_DATA(i), 0, sizeof(pg_data_t)); | 
|  | 265 | NODE_DATA(i)->bdata = &bmem_data[i]; | 
|  | 266 | } | 
|  | 267 | memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); | 
|  | 268 |  | 
|  | 269 | for (i = 0; i < npmem_ranges; i++) | 
|  | 270 | node_set_online(i); | 
|  | 271 | #endif | 
|  | 272 |  | 
|  | 273 | /* | 
|  | 274 | * Initialize and free the full range of memory in each range. | 
|  | 275 | * Note that the only writing these routines do are to the bootmap, | 
|  | 276 | * and we've made sure to locate the bootmap properly so that they | 
|  | 277 | * won't be writing over anything important. | 
|  | 278 | */ | 
|  | 279 |  | 
|  | 280 | bootmap_pfn = bootmap_start_pfn; | 
|  | 281 | max_pfn = 0; | 
|  | 282 | for (i = 0; i < npmem_ranges; i++) { | 
|  | 283 | unsigned long start_pfn; | 
|  | 284 | unsigned long npages; | 
|  | 285 |  | 
|  | 286 | start_pfn = pmem_ranges[i].start_pfn; | 
|  | 287 | npages = pmem_ranges[i].pages; | 
|  | 288 |  | 
|  | 289 | bootmap_size = init_bootmem_node(NODE_DATA(i), | 
|  | 290 | bootmap_pfn, | 
|  | 291 | start_pfn, | 
|  | 292 | (start_pfn + npages) ); | 
|  | 293 | free_bootmem_node(NODE_DATA(i), | 
|  | 294 | (start_pfn << PAGE_SHIFT), | 
|  | 295 | (npages << PAGE_SHIFT) ); | 
|  | 296 | bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
|  | 297 | if ((start_pfn + npages) > max_pfn) | 
|  | 298 | max_pfn = start_pfn + npages; | 
|  | 299 | } | 
|  | 300 |  | 
| Grant Grundler | 5cdb820 | 2006-01-10 20:47:57 -0500 | [diff] [blame] | 301 | /* IOMMU is always used to access "high mem" on those boxes | 
|  | 302 | * that can support enough mem that a PCI device couldn't | 
|  | 303 | * directly DMA to any physical addresses. | 
|  | 304 | * ISA DMA support will need to revisit this. | 
|  | 305 | */ | 
|  | 306 | max_low_pfn = max_pfn; | 
|  | 307 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) { | 
|  | 309 | printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n"); | 
|  | 310 | BUG(); | 
|  | 311 | } | 
|  | 312 |  | 
|  | 313 | /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ | 
|  | 314 |  | 
|  | 315 | #define PDC_CONSOLE_IO_IODC_SIZE 32768 | 
|  | 316 |  | 
|  | 317 | reserve_bootmem_node(NODE_DATA(0), 0UL, | 
|  | 318 | (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE)); | 
| Kyle McMartin | c51d476 | 2006-08-13 20:39:48 -0400 | [diff] [blame] | 319 | reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text), | 
|  | 320 | (unsigned long)(_end - _text)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), | 
|  | 322 | ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT)); | 
|  | 323 |  | 
|  | 324 | #ifndef CONFIG_DISCONTIGMEM | 
|  | 325 |  | 
|  | 326 | /* reserve the holes */ | 
|  | 327 |  | 
|  | 328 | for (i = 0; i < npmem_holes; i++) { | 
|  | 329 | reserve_bootmem_node(NODE_DATA(0), | 
|  | 330 | (pmem_holes[i].start_pfn << PAGE_SHIFT), | 
|  | 331 | (pmem_holes[i].pages << PAGE_SHIFT)); | 
|  | 332 | } | 
|  | 333 | #endif | 
|  | 334 |  | 
|  | 335 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 336 | if (initrd_start) { | 
|  | 337 | printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); | 
|  | 338 | if (__pa(initrd_start) < mem_max) { | 
|  | 339 | unsigned long initrd_reserve; | 
|  | 340 |  | 
|  | 341 | if (__pa(initrd_end) > mem_max) { | 
|  | 342 | initrd_reserve = mem_max - __pa(initrd_start); | 
|  | 343 | } else { | 
|  | 344 | initrd_reserve = initrd_end - initrd_start; | 
|  | 345 | } | 
|  | 346 | initrd_below_start_ok = 1; | 
|  | 347 | printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); | 
|  | 348 |  | 
|  | 349 | reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve); | 
|  | 350 | } | 
|  | 351 | } | 
|  | 352 | #endif | 
|  | 353 |  | 
|  | 354 | data_resource.start =  virt_to_phys(&data_start); | 
| Kyle McMartin | c51d476 | 2006-08-13 20:39:48 -0400 | [diff] [blame] | 355 | data_resource.end = virt_to_phys(_end) - 1; | 
|  | 356 | code_resource.start = virt_to_phys(_text); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | code_resource.end = virt_to_phys(&data_start)-1; | 
|  | 358 |  | 
|  | 359 | /* We don't know which region the kernel will be in, so try | 
|  | 360 | * all of them. | 
|  | 361 | */ | 
|  | 362 | for (i = 0; i < sysram_resource_count; i++) { | 
|  | 363 | struct resource *res = &sysram_resources[i]; | 
|  | 364 | request_resource(res, &code_resource); | 
|  | 365 | request_resource(res, &data_resource); | 
|  | 366 | } | 
|  | 367 | request_resource(&sysram_resources[0], &pdcdata_resource); | 
|  | 368 | } | 
|  | 369 |  | 
|  | 370 | void free_initmem(void) | 
|  | 371 | { | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 372 | unsigned long addr, init_begin, init_end; | 
|  | 373 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | printk(KERN_INFO "Freeing unused kernel memory: "); | 
|  | 375 |  | 
| Helge Deller | 81a3de3 | 2006-01-15 12:11:50 -0700 | [diff] [blame] | 376 | #ifdef CONFIG_DEBUG_KERNEL | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | /* Attempt to catch anyone trying to execute code here | 
|  | 378 | * by filling the page with BRK insns. | 
|  | 379 | * | 
|  | 380 | * If we disable interrupts for all CPUs, then IPI stops working. | 
|  | 381 | * Kinda breaks the global cache flushing. | 
|  | 382 | */ | 
|  | 383 | local_irq_disable(); | 
|  | 384 |  | 
| Kyle McMartin | c51d476 | 2006-08-13 20:39:48 -0400 | [diff] [blame] | 385 | memset(__init_begin, 0x00, | 
|  | 386 | (unsigned long)__init_end - (unsigned long)__init_begin); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 |  | 
|  | 388 | flush_data_cache(); | 
|  | 389 | asm volatile("sync" : : ); | 
| Kyle McMartin | c51d476 | 2006-08-13 20:39:48 -0400 | [diff] [blame] | 390 | flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | asm volatile("sync" : : ); | 
|  | 392 |  | 
|  | 393 | local_irq_enable(); | 
|  | 394 | #endif | 
|  | 395 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 396 | /* align __init_begin and __init_end to page size, | 
|  | 397 | ignoring linker script where we might have tried to save RAM */ | 
| Kyle McMartin | c51d476 | 2006-08-13 20:39:48 -0400 | [diff] [blame] | 398 | init_begin = PAGE_ALIGN((unsigned long)(__init_begin)); | 
|  | 399 | init_end   = PAGE_ALIGN((unsigned long)(__init_end)); | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 400 | for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | ClearPageReserved(virt_to_page(addr)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 402 | init_page_count(virt_to_page(addr)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | free_page(addr); | 
|  | 404 | num_physpages++; | 
|  | 405 | totalram_pages++; | 
|  | 406 | } | 
|  | 407 |  | 
|  | 408 | /* set up a new led state on systems shipped LED State panel */ | 
|  | 409 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); | 
|  | 410 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 411 | printk("%luk freed\n", (init_end - init_begin) >> 10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | } | 
|  | 413 |  | 
| Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 414 |  | 
|  | 415 | #ifdef CONFIG_DEBUG_RODATA | 
|  | 416 | void mark_rodata_ro(void) | 
|  | 417 | { | 
| Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 418 | /* rodata memory was already mapped with KERNEL_RO access rights by | 
|  | 419 | pagetable_init() and map_pages(). No need to do additional stuff here */ | 
|  | 420 | printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", | 
| Heiko Carstens | a581c2a | 2006-07-01 04:36:30 -0700 | [diff] [blame] | 421 | (unsigned long)(__end_rodata - __start_rodata) >> 10); | 
| Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 422 | } | 
|  | 423 | #endif | 
|  | 424 |  | 
|  | 425 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | /* | 
|  | 427 | * Just an arbitrary offset to serve as a "hole" between mapping areas | 
|  | 428 | * (between top of physical memory and a potential pcxl dma mapping | 
|  | 429 | * area, and below the vmalloc mapping area). | 
|  | 430 | * | 
|  | 431 | * The current 32K value just means that there will be a 32K "hole" | 
|  | 432 | * between mapping areas. That means that  any out-of-bounds memory | 
|  | 433 | * accesses will hopefully be caught. The vmalloc() routines leaves | 
|  | 434 | * a hole of 4kB between each vmalloced area for the same reason. | 
|  | 435 | */ | 
|  | 436 |  | 
|  | 437 | /* Leave room for gateway page expansion */ | 
|  | 438 | #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE | 
|  | 439 | #error KERNEL_MAP_START is in gateway reserved region | 
|  | 440 | #endif | 
|  | 441 | #define MAP_START (KERNEL_MAP_START) | 
|  | 442 |  | 
|  | 443 | #define VM_MAP_OFFSET  (32*1024) | 
|  | 444 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ | 
|  | 445 | & ~(VM_MAP_OFFSET-1))) | 
|  | 446 |  | 
| Helge Deller | 8039de1 | 2006-01-10 20:35:03 -0500 | [diff] [blame] | 447 | void *vmalloc_start __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | EXPORT_SYMBOL(vmalloc_start); | 
|  | 449 |  | 
|  | 450 | #ifdef CONFIG_PA11 | 
| Helge Deller | 8039de1 | 2006-01-10 20:35:03 -0500 | [diff] [blame] | 451 | unsigned long pcxl_dma_start __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | #endif | 
|  | 453 |  | 
|  | 454 | void __init mem_init(void) | 
|  | 455 | { | 
|  | 456 | high_memory = __va((max_pfn << PAGE_SHIFT)); | 
|  | 457 |  | 
|  | 458 | #ifndef CONFIG_DISCONTIGMEM | 
|  | 459 | max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1; | 
|  | 460 | totalram_pages += free_all_bootmem(); | 
|  | 461 | #else | 
|  | 462 | { | 
|  | 463 | int i; | 
|  | 464 |  | 
|  | 465 | for (i = 0; i < npmem_ranges; i++) | 
|  | 466 | totalram_pages += free_all_bootmem_node(NODE_DATA(i)); | 
|  | 467 | } | 
|  | 468 | #endif | 
|  | 469 |  | 
|  | 470 | printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10)); | 
|  | 471 |  | 
|  | 472 | #ifdef CONFIG_PA11 | 
|  | 473 | if (hppa_dma_ops == &pcxl_dma_ops) { | 
|  | 474 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); | 
|  | 475 | vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); | 
|  | 476 | } else { | 
|  | 477 | pcxl_dma_start = 0; | 
|  | 478 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | 
|  | 479 | } | 
|  | 480 | #else | 
|  | 481 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | 
|  | 482 | #endif | 
|  | 483 |  | 
|  | 484 | } | 
|  | 485 |  | 
| Helge Deller | 8039de1 | 2006-01-10 20:35:03 -0500 | [diff] [blame] | 486 | unsigned long *empty_zero_page __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 |  | 
|  | 488 | void show_mem(void) | 
|  | 489 | { | 
|  | 490 | int i,free = 0,total = 0,reserved = 0; | 
|  | 491 | int shared = 0, cached = 0; | 
|  | 492 |  | 
|  | 493 | printk(KERN_INFO "Mem-info:\n"); | 
|  | 494 | show_free_areas(); | 
|  | 495 | printk(KERN_INFO "Free swap:	 %6ldkB\n", | 
|  | 496 | nr_swap_pages<<(PAGE_SHIFT-10)); | 
|  | 497 | #ifndef CONFIG_DISCONTIGMEM | 
|  | 498 | i = max_mapnr; | 
|  | 499 | while (i-- > 0) { | 
|  | 500 | total++; | 
|  | 501 | if (PageReserved(mem_map+i)) | 
|  | 502 | reserved++; | 
|  | 503 | else if (PageSwapCache(mem_map+i)) | 
|  | 504 | cached++; | 
|  | 505 | else if (!page_count(&mem_map[i])) | 
|  | 506 | free++; | 
|  | 507 | else | 
|  | 508 | shared += page_count(&mem_map[i]) - 1; | 
|  | 509 | } | 
|  | 510 | #else | 
|  | 511 | for (i = 0; i < npmem_ranges; i++) { | 
|  | 512 | int j; | 
|  | 513 |  | 
|  | 514 | for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { | 
|  | 515 | struct page *p; | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 516 | unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 |  | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 518 | pgdat_resize_lock(NODE_DATA(i), &flags); | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 519 | p = nid_page_nr(i, j) - node_start_pfn(i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 |  | 
|  | 521 | total++; | 
|  | 522 | if (PageReserved(p)) | 
|  | 523 | reserved++; | 
|  | 524 | else if (PageSwapCache(p)) | 
|  | 525 | cached++; | 
|  | 526 | else if (!page_count(p)) | 
|  | 527 | free++; | 
|  | 528 | else | 
|  | 529 | shared += page_count(p) - 1; | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 530 | pgdat_resize_unlock(NODE_DATA(i), &flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | } | 
|  | 532 | } | 
|  | 533 | #endif | 
|  | 534 | printk(KERN_INFO "%d pages of RAM\n", total); | 
|  | 535 | printk(KERN_INFO "%d reserved pages\n", reserved); | 
|  | 536 | printk(KERN_INFO "%d pages shared\n", shared); | 
|  | 537 | printk(KERN_INFO "%d pages swap cached\n", cached); | 
|  | 538 |  | 
|  | 539 |  | 
|  | 540 | #ifdef CONFIG_DISCONTIGMEM | 
|  | 541 | { | 
|  | 542 | struct zonelist *zl; | 
|  | 543 | int i, j, k; | 
|  | 544 |  | 
|  | 545 | for (i = 0; i < npmem_ranges; i++) { | 
|  | 546 | for (j = 0; j < MAX_NR_ZONES; j++) { | 
|  | 547 | zl = NODE_DATA(i)->node_zonelists + j; | 
|  | 548 |  | 
|  | 549 | printk("Zone list for zone %d on node %d: ", j, i); | 
|  | 550 | for (k = 0; zl->zones[k] != NULL; k++) | 
| Christoph Lameter | 89fa302 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 551 | printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | printk("\n"); | 
|  | 553 | } | 
|  | 554 | } | 
|  | 555 | } | 
|  | 556 | #endif | 
|  | 557 | } | 
|  | 558 |  | 
|  | 559 |  | 
|  | 560 | static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot) | 
|  | 561 | { | 
|  | 562 | pgd_t *pg_dir; | 
|  | 563 | pmd_t *pmd; | 
|  | 564 | pte_t *pg_table; | 
|  | 565 | unsigned long end_paddr; | 
|  | 566 | unsigned long start_pmd; | 
|  | 567 | unsigned long start_pte; | 
|  | 568 | unsigned long tmp1; | 
|  | 569 | unsigned long tmp2; | 
|  | 570 | unsigned long address; | 
|  | 571 | unsigned long ro_start; | 
|  | 572 | unsigned long ro_end; | 
|  | 573 | unsigned long fv_addr; | 
|  | 574 | unsigned long gw_addr; | 
|  | 575 | extern const unsigned long fault_vector_20; | 
|  | 576 | extern void * const linux_gateway_page; | 
|  | 577 |  | 
| Kyle McMartin | c51d476 | 2006-08-13 20:39:48 -0400 | [diff] [blame] | 578 | ro_start = __pa((unsigned long)_text); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | ro_end   = __pa((unsigned long)&data_start); | 
|  | 580 | fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; | 
|  | 581 | gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; | 
|  | 582 |  | 
|  | 583 | end_paddr = start_paddr + size; | 
|  | 584 |  | 
|  | 585 | pg_dir = pgd_offset_k(start_vaddr); | 
|  | 586 |  | 
|  | 587 | #if PTRS_PER_PMD == 1 | 
|  | 588 | start_pmd = 0; | 
|  | 589 | #else | 
|  | 590 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | 
|  | 591 | #endif | 
|  | 592 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | 
|  | 593 |  | 
|  | 594 | address = start_paddr; | 
|  | 595 | while (address < end_paddr) { | 
|  | 596 | #if PTRS_PER_PMD == 1 | 
|  | 597 | pmd = (pmd_t *)__pa(pg_dir); | 
|  | 598 | #else | 
|  | 599 | pmd = (pmd_t *)pgd_address(*pg_dir); | 
|  | 600 |  | 
|  | 601 | /* | 
|  | 602 | * pmd is physical at this point | 
|  | 603 | */ | 
|  | 604 |  | 
|  | 605 | if (!pmd) { | 
|  | 606 | pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER); | 
|  | 607 | pmd = (pmd_t *) __pa(pmd); | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 | pgd_populate(NULL, pg_dir, __va(pmd)); | 
|  | 611 | #endif | 
|  | 612 | pg_dir++; | 
|  | 613 |  | 
|  | 614 | /* now change pmd to kernel virtual addresses */ | 
|  | 615 |  | 
|  | 616 | pmd = (pmd_t *)__va(pmd) + start_pmd; | 
|  | 617 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) { | 
|  | 618 |  | 
|  | 619 | /* | 
|  | 620 | * pg_table is physical at this point | 
|  | 621 | */ | 
|  | 622 |  | 
|  | 623 | pg_table = (pte_t *)pmd_address(*pmd); | 
|  | 624 | if (!pg_table) { | 
|  | 625 | pg_table = (pte_t *) | 
|  | 626 | alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE); | 
|  | 627 | pg_table = (pte_t *) __pa(pg_table); | 
|  | 628 | } | 
|  | 629 |  | 
|  | 630 | pmd_populate_kernel(NULL, pmd, __va(pg_table)); | 
|  | 631 |  | 
|  | 632 | /* now change pg_table to kernel virtual addresses */ | 
|  | 633 |  | 
|  | 634 | pg_table = (pte_t *) __va(pg_table) + start_pte; | 
|  | 635 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) { | 
|  | 636 | pte_t pte; | 
|  | 637 |  | 
|  | 638 | /* | 
|  | 639 | * Map the fault vector writable so we can | 
|  | 640 | * write the HPMC checksum. | 
|  | 641 | */ | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 642 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | if (address >= ro_start && address < ro_end | 
|  | 644 | && address != fv_addr | 
|  | 645 | && address != gw_addr) | 
|  | 646 | pte = __mk_pte(address, PAGE_KERNEL_RO); | 
|  | 647 | else | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 648 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | pte = __mk_pte(address, pgprot); | 
|  | 650 |  | 
|  | 651 | if (address >= end_paddr) | 
|  | 652 | pte_val(pte) = 0; | 
|  | 653 |  | 
|  | 654 | set_pte(pg_table, pte); | 
|  | 655 |  | 
|  | 656 | address += PAGE_SIZE; | 
|  | 657 | } | 
|  | 658 | start_pte = 0; | 
|  | 659 |  | 
|  | 660 | if (address >= end_paddr) | 
|  | 661 | break; | 
|  | 662 | } | 
|  | 663 | start_pmd = 0; | 
|  | 664 | } | 
|  | 665 | } | 
|  | 666 |  | 
|  | 667 | /* | 
|  | 668 | * pagetable_init() sets up the page tables | 
|  | 669 | * | 
|  | 670 | * Note that gateway_init() places the Linux gateway page at page 0. | 
|  | 671 | * Since gateway pages cannot be dereferenced this has the desirable | 
|  | 672 | * side effect of trapping those pesky NULL-reference errors in the | 
|  | 673 | * kernel. | 
|  | 674 | */ | 
|  | 675 | static void __init pagetable_init(void) | 
|  | 676 | { | 
|  | 677 | int range; | 
|  | 678 |  | 
|  | 679 | /* Map each physical memory range to its kernel vaddr */ | 
|  | 680 |  | 
|  | 681 | for (range = 0; range < npmem_ranges; range++) { | 
|  | 682 | unsigned long start_paddr; | 
|  | 683 | unsigned long end_paddr; | 
|  | 684 | unsigned long size; | 
|  | 685 |  | 
|  | 686 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; | 
|  | 687 | end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); | 
|  | 688 | size = pmem_ranges[range].pages << PAGE_SHIFT; | 
|  | 689 |  | 
|  | 690 | map_pages((unsigned long)__va(start_paddr), start_paddr, | 
|  | 691 | size, PAGE_KERNEL); | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 695 | if (initrd_end && initrd_end > mem_limit) { | 
| Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 696 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | map_pages(initrd_start, __pa(initrd_start), | 
|  | 698 | initrd_end - initrd_start, PAGE_KERNEL); | 
|  | 699 | } | 
|  | 700 | #endif | 
|  | 701 |  | 
|  | 702 | empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); | 
|  | 703 | memset(empty_zero_page, 0, PAGE_SIZE); | 
|  | 704 | } | 
|  | 705 |  | 
|  | 706 | static void __init gateway_init(void) | 
|  | 707 | { | 
|  | 708 | unsigned long linux_gateway_page_addr; | 
|  | 709 | /* FIXME: This is 'const' in order to trick the compiler | 
|  | 710 | into not treating it as DP-relative data. */ | 
|  | 711 | extern void * const linux_gateway_page; | 
|  | 712 |  | 
|  | 713 | linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; | 
|  | 714 |  | 
|  | 715 | /* | 
|  | 716 | * Setup Linux Gateway page. | 
|  | 717 | * | 
|  | 718 | * The Linux gateway page will reside in kernel space (on virtual | 
|  | 719 | * page 0), so it doesn't need to be aliased into user space. | 
|  | 720 | */ | 
|  | 721 |  | 
|  | 722 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), | 
|  | 723 | PAGE_SIZE, PAGE_GATEWAY); | 
|  | 724 | } | 
|  | 725 |  | 
|  | 726 | #ifdef CONFIG_HPUX | 
|  | 727 | void | 
|  | 728 | map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm) | 
|  | 729 | { | 
|  | 730 | pgd_t *pg_dir; | 
|  | 731 | pmd_t *pmd; | 
|  | 732 | pte_t *pg_table; | 
|  | 733 | unsigned long start_pmd; | 
|  | 734 | unsigned long start_pte; | 
|  | 735 | unsigned long address; | 
|  | 736 | unsigned long hpux_gw_page_addr; | 
|  | 737 | /* FIXME: This is 'const' in order to trick the compiler | 
|  | 738 | into not treating it as DP-relative data. */ | 
|  | 739 | extern void * const hpux_gateway_page; | 
|  | 740 |  | 
|  | 741 | hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK; | 
|  | 742 |  | 
|  | 743 | /* | 
|  | 744 | * Setup HP-UX Gateway page. | 
|  | 745 | * | 
|  | 746 | * The HP-UX gateway page resides in the user address space, | 
|  | 747 | * so it needs to be aliased into each process. | 
|  | 748 | */ | 
|  | 749 |  | 
|  | 750 | pg_dir = pgd_offset(mm,hpux_gw_page_addr); | 
|  | 751 |  | 
|  | 752 | #if PTRS_PER_PMD == 1 | 
|  | 753 | start_pmd = 0; | 
|  | 754 | #else | 
|  | 755 | start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | 
|  | 756 | #endif | 
|  | 757 | start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | 
|  | 758 |  | 
|  | 759 | address = __pa(&hpux_gateway_page); | 
|  | 760 | #if PTRS_PER_PMD == 1 | 
|  | 761 | pmd = (pmd_t *)__pa(pg_dir); | 
|  | 762 | #else | 
|  | 763 | pmd = (pmd_t *) pgd_address(*pg_dir); | 
|  | 764 |  | 
|  | 765 | /* | 
|  | 766 | * pmd is physical at this point | 
|  | 767 | */ | 
|  | 768 |  | 
|  | 769 | if (!pmd) { | 
|  | 770 | pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL); | 
|  | 771 | pmd = (pmd_t *) __pa(pmd); | 
|  | 772 | } | 
|  | 773 |  | 
|  | 774 | __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd); | 
|  | 775 | #endif | 
|  | 776 | /* now change pmd to kernel virtual addresses */ | 
|  | 777 |  | 
|  | 778 | pmd = (pmd_t *)__va(pmd) + start_pmd; | 
|  | 779 |  | 
|  | 780 | /* | 
|  | 781 | * pg_table is physical at this point | 
|  | 782 | */ | 
|  | 783 |  | 
|  | 784 | pg_table = (pte_t *) pmd_address(*pmd); | 
|  | 785 | if (!pg_table) | 
|  | 786 | pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL)); | 
|  | 787 |  | 
|  | 788 | __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table); | 
|  | 789 |  | 
|  | 790 | /* now change pg_table to kernel virtual addresses */ | 
|  | 791 |  | 
|  | 792 | pg_table = (pte_t *) __va(pg_table) + start_pte; | 
|  | 793 | set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY)); | 
|  | 794 | } | 
|  | 795 | EXPORT_SYMBOL(map_hpux_gateway_page); | 
|  | 796 | #endif | 
|  | 797 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | void __init paging_init(void) | 
|  | 799 | { | 
|  | 800 | int i; | 
|  | 801 |  | 
|  | 802 | setup_bootmem(); | 
|  | 803 | pagetable_init(); | 
|  | 804 | gateway_init(); | 
|  | 805 | flush_cache_all_local(); /* start with known state */ | 
| Matthew Wilcox | ce33941 | 2006-01-10 20:47:49 -0500 | [diff] [blame] | 806 | flush_tlb_all_local(NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 |  | 
|  | 808 | for (i = 0; i < npmem_ranges; i++) { | 
| Christoph Lameter | f06a968 | 2006-09-25 23:31:10 -0700 | [diff] [blame] | 809 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 810 |  | 
|  | 811 | /* We have an IOMMU, so all memory can go into a single | 
|  | 812 | ZONE_DMA zone. */ | 
|  | 813 | zones_size[ZONE_DMA] = pmem_ranges[i].pages; | 
|  | 814 |  | 
|  | 815 | #ifdef CONFIG_DISCONTIGMEM | 
|  | 816 | /* Need to initialize the pfnnid_map before we can initialize | 
|  | 817 | the zone */ | 
|  | 818 | { | 
|  | 819 | int j; | 
|  | 820 | for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT); | 
|  | 821 | j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); | 
|  | 822 | j++) { | 
|  | 823 | pfnnid_map[j] = i; | 
|  | 824 | } | 
|  | 825 | } | 
|  | 826 | #endif | 
|  | 827 |  | 
|  | 828 | free_area_init_node(i, NODE_DATA(i), zones_size, | 
|  | 829 | pmem_ranges[i].start_pfn, NULL); | 
|  | 830 | } | 
|  | 831 | } | 
|  | 832 |  | 
|  | 833 | #ifdef CONFIG_PA20 | 
|  | 834 |  | 
|  | 835 | /* | 
|  | 836 | * Currently, all PA20 chips have 18 bit protection id's, which is the | 
|  | 837 | * limiting factor (space ids are 32 bits). | 
|  | 838 | */ | 
|  | 839 |  | 
|  | 840 | #define NR_SPACE_IDS 262144 | 
|  | 841 |  | 
|  | 842 | #else | 
|  | 843 |  | 
|  | 844 | /* | 
|  | 845 | * Currently we have a one-to-one relationship between space id's and | 
|  | 846 | * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only | 
|  | 847 | * support 15 bit protection id's, so that is the limiting factor. | 
|  | 848 | * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's | 
|  | 849 | * probably not worth the effort for a special case here. | 
|  | 850 | */ | 
|  | 851 |  | 
|  | 852 | #define NR_SPACE_IDS 32768 | 
|  | 853 |  | 
|  | 854 | #endif  /* !CONFIG_PA20 */ | 
|  | 855 |  | 
|  | 856 | #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) | 
|  | 857 | #define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long))) | 
|  | 858 |  | 
|  | 859 | static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ | 
|  | 860 | static unsigned long dirty_space_id[SID_ARRAY_SIZE]; | 
|  | 861 | static unsigned long space_id_index; | 
|  | 862 | static unsigned long free_space_ids = NR_SPACE_IDS - 1; | 
|  | 863 | static unsigned long dirty_space_ids = 0; | 
|  | 864 |  | 
|  | 865 | static DEFINE_SPINLOCK(sid_lock); | 
|  | 866 |  | 
|  | 867 | unsigned long alloc_sid(void) | 
|  | 868 | { | 
|  | 869 | unsigned long index; | 
|  | 870 |  | 
|  | 871 | spin_lock(&sid_lock); | 
|  | 872 |  | 
|  | 873 | if (free_space_ids == 0) { | 
|  | 874 | if (dirty_space_ids != 0) { | 
|  | 875 | spin_unlock(&sid_lock); | 
|  | 876 | flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ | 
|  | 877 | spin_lock(&sid_lock); | 
|  | 878 | } | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 879 | BUG_ON(free_space_ids == 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | } | 
|  | 881 |  | 
|  | 882 | free_space_ids--; | 
|  | 883 |  | 
|  | 884 | index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); | 
|  | 885 | space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); | 
|  | 886 | space_id_index = index; | 
|  | 887 |  | 
|  | 888 | spin_unlock(&sid_lock); | 
|  | 889 |  | 
|  | 890 | return index << SPACEID_SHIFT; | 
|  | 891 | } | 
|  | 892 |  | 
|  | 893 | void free_sid(unsigned long spaceid) | 
|  | 894 | { | 
|  | 895 | unsigned long index = spaceid >> SPACEID_SHIFT; | 
|  | 896 | unsigned long *dirty_space_offset; | 
|  | 897 |  | 
|  | 898 | dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); | 
|  | 899 | index &= (BITS_PER_LONG - 1); | 
|  | 900 |  | 
|  | 901 | spin_lock(&sid_lock); | 
|  | 902 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 903 | BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 |  | 
|  | 905 | *dirty_space_offset |= (1L << index); | 
|  | 906 | dirty_space_ids++; | 
|  | 907 |  | 
|  | 908 | spin_unlock(&sid_lock); | 
|  | 909 | } | 
|  | 910 |  | 
|  | 911 |  | 
|  | 912 | #ifdef CONFIG_SMP | 
|  | 913 | static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) | 
|  | 914 | { | 
|  | 915 | int i; | 
|  | 916 |  | 
|  | 917 | /* NOTE: sid_lock must be held upon entry */ | 
|  | 918 |  | 
|  | 919 | *ndirtyptr = dirty_space_ids; | 
|  | 920 | if (dirty_space_ids != 0) { | 
|  | 921 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | 
|  | 922 | dirty_array[i] = dirty_space_id[i]; | 
|  | 923 | dirty_space_id[i] = 0; | 
|  | 924 | } | 
|  | 925 | dirty_space_ids = 0; | 
|  | 926 | } | 
|  | 927 |  | 
|  | 928 | return; | 
|  | 929 | } | 
|  | 930 |  | 
|  | 931 | static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) | 
|  | 932 | { | 
|  | 933 | int i; | 
|  | 934 |  | 
|  | 935 | /* NOTE: sid_lock must be held upon entry */ | 
|  | 936 |  | 
|  | 937 | if (ndirty != 0) { | 
|  | 938 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | 
|  | 939 | space_id[i] ^= dirty_array[i]; | 
|  | 940 | } | 
|  | 941 |  | 
|  | 942 | free_space_ids += ndirty; | 
|  | 943 | space_id_index = 0; | 
|  | 944 | } | 
|  | 945 | } | 
|  | 946 |  | 
|  | 947 | #else /* CONFIG_SMP */ | 
|  | 948 |  | 
|  | 949 | static void recycle_sids(void) | 
|  | 950 | { | 
|  | 951 | int i; | 
|  | 952 |  | 
|  | 953 | /* NOTE: sid_lock must be held upon entry */ | 
|  | 954 |  | 
|  | 955 | if (dirty_space_ids != 0) { | 
|  | 956 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | 
|  | 957 | space_id[i] ^= dirty_space_id[i]; | 
|  | 958 | dirty_space_id[i] = 0; | 
|  | 959 | } | 
|  | 960 |  | 
|  | 961 | free_space_ids += dirty_space_ids; | 
|  | 962 | dirty_space_ids = 0; | 
|  | 963 | space_id_index = 0; | 
|  | 964 | } | 
|  | 965 | } | 
|  | 966 | #endif | 
|  | 967 |  | 
|  | 968 | /* | 
|  | 969 | * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is | 
|  | 970 | * purged, we can safely reuse the space ids that were released but | 
|  | 971 | * not flushed from the tlb. | 
|  | 972 | */ | 
|  | 973 |  | 
|  | 974 | #ifdef CONFIG_SMP | 
|  | 975 |  | 
|  | 976 | static unsigned long recycle_ndirty; | 
|  | 977 | static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 978 | static unsigned int recycle_inuse; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 |  | 
|  | 980 | void flush_tlb_all(void) | 
|  | 981 | { | 
|  | 982 | int do_recycle; | 
|  | 983 |  | 
|  | 984 | do_recycle = 0; | 
|  | 985 | spin_lock(&sid_lock); | 
|  | 986 | if (dirty_space_ids > RECYCLE_THRESHOLD) { | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 987 | BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | get_dirty_sids(&recycle_ndirty,recycle_dirty_array); | 
|  | 989 | recycle_inuse++; | 
|  | 990 | do_recycle++; | 
|  | 991 | } | 
|  | 992 | spin_unlock(&sid_lock); | 
| Matthew Wilcox | ce33941 | 2006-01-10 20:47:49 -0500 | [diff] [blame] | 993 | on_each_cpu(flush_tlb_all_local, NULL, 1, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 | if (do_recycle) { | 
|  | 995 | spin_lock(&sid_lock); | 
|  | 996 | recycle_sids(recycle_ndirty,recycle_dirty_array); | 
|  | 997 | recycle_inuse = 0; | 
|  | 998 | spin_unlock(&sid_lock); | 
|  | 999 | } | 
|  | 1000 | } | 
|  | 1001 | #else | 
|  | 1002 | void flush_tlb_all(void) | 
|  | 1003 | { | 
|  | 1004 | spin_lock(&sid_lock); | 
| Matthew Wilcox | 1b2425e | 2006-01-10 20:47:49 -0500 | [diff] [blame] | 1005 | flush_tlb_all_local(NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | recycle_sids(); | 
|  | 1007 | spin_unlock(&sid_lock); | 
|  | 1008 | } | 
|  | 1009 | #endif | 
|  | 1010 |  | 
|  | 1011 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 1012 | void free_initrd_mem(unsigned long start, unsigned long end) | 
|  | 1013 | { | 
| Helge Deller | 94c3e87 | 2006-03-07 13:34:45 -0700 | [diff] [blame] | 1014 | if (start >= end) | 
|  | 1015 | return; | 
|  | 1016 | printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | for (; start < end; start += PAGE_SIZE) { | 
|  | 1018 | ClearPageReserved(virt_to_page(start)); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 1019 | init_page_count(virt_to_page(start)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | free_page(start); | 
|  | 1021 | num_physpages++; | 
|  | 1022 | totalram_pages++; | 
|  | 1023 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | } | 
|  | 1025 | #endif |