blob: 5ccb579b81e41bd52c8ac7474c7d36067e911859 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +10008 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
Paul Mackerras14cf11a2005-09-26 16:04:21 +100020#include <linux/module.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/string.h>
25#include <linux/types.h>
26#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/init.h>
29#include <linux/bootmem.h>
30#include <linux/highmem.h>
31#include <linux/initrd.h>
32#include <linux/pagemap.h>
Johannes Berg4e8ad3e2007-05-08 19:25:00 +100033#include <linux/suspend.h>
David S. Millerd9b2b2a2008-02-13 16:56:49 -080034#include <linux/lmb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100035
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100046#include <asm/sections.h>
Paul Mackerrasab1f9da2005-10-10 21:58:35 +100047#include <asm/vdso.h>
Kumar Gala2c419bd2008-04-23 23:05:20 +100048#include <asm/fixmap.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100049
Paul Mackerras14cf11a2005-09-26 16:04:21 +100050#include "mmu_decl.h"
51
52#ifndef CPU_FTR_COHERENT_ICACHE
53#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
54#define CPU_FTR_NOEXECUTE 0
55#endif
56
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100057int init_bootmem_done;
58int mem_init_done;
Paul Mackerrascf00a8d2005-10-31 13:07:02 +110059unsigned long memory_limit;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +100060
Kumar Gala2c419bd2008-04-23 23:05:20 +100061#ifdef CONFIG_HIGHMEM
62pte_t *kmap_pte;
63pgprot_t kmap_prot;
64
65EXPORT_SYMBOL(kmap_prot);
66EXPORT_SYMBOL(kmap_pte);
67
68static inline pte_t *virt_to_kpte(unsigned long vaddr)
69{
70 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
71 vaddr), vaddr), vaddr);
72}
73#endif
74
Paul Mackerras14cf11a2005-09-26 16:04:21 +100075int page_is_ram(unsigned long pfn)
76{
77 unsigned long paddr = (pfn << PAGE_SHIFT);
78
79#ifndef CONFIG_PPC64 /* XXX for now */
80 return paddr < __pa(high_memory);
81#else
82 int i;
83 for (i=0; i < lmb.memory.cnt; i++) {
84 unsigned long base;
85
86 base = lmb.memory.region[i].base;
87
88 if ((paddr >= base) &&
89 (paddr < (base + lmb.memory.region[i].size))) {
90 return 1;
91 }
92 }
93
94 return 0;
95#endif
96}
Paul Mackerras14cf11a2005-09-26 16:04:21 +100097
Roland Dreier8b150472005-10-28 17:46:18 -070098pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
Paul Mackerras14cf11a2005-09-26 16:04:21 +100099 unsigned long size, pgprot_t vma_prot)
100{
101 if (ppc_md.phys_mem_access_prot)
Roland Dreier8b150472005-10-28 17:46:18 -0700102 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000103
Roland Dreier8b150472005-10-28 17:46:18 -0700104 if (!page_is_ram(pfn))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000105 vma_prot = __pgprot(pgprot_val(vma_prot)
106 | _PAGE_GUARDED | _PAGE_NO_CACHE);
107 return vma_prot;
108}
109EXPORT_SYMBOL(phys_mem_access_prot);
110
Paul Mackerras23fd0772005-10-31 13:37:12 +1100111#ifdef CONFIG_MEMORY_HOTPLUG
112
113void online_page(struct page *page)
114{
115 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800116 init_page_count(page);
Nick Piggin70dc9912006-03-22 00:08:35 -0800117 __free_page(page);
Paul Mackerras23fd0772005-10-31 13:37:12 +1100118 totalram_pages++;
119 num_physpages++;
120}
121
Yasunori Gotobc02af92006-06-27 02:53:30 -0700122#ifdef CONFIG_NUMA
123int memory_add_physaddr_to_nid(u64 start)
124{
125 return hot_add_scn_to_nid(start);
126}
127#endif
128
Geert Uytterhoevenfa90f702008-03-29 03:10:50 +1100129int arch_add_memory(int nid, u64 start, u64 size)
Paul Mackerras23fd0772005-10-31 13:37:12 +1100130{
Mike Kravetz237a0982005-12-05 12:06:42 -0800131 struct pglist_data *pgdata;
Paul Mackerras23fd0772005-10-31 13:37:12 +1100132 struct zone *zone;
133 unsigned long start_pfn = start >> PAGE_SHIFT;
134 unsigned long nr_pages = size >> PAGE_SHIFT;
135
Mike Kravetz237a0982005-12-05 12:06:42 -0800136 pgdata = NODE_DATA(nid);
137
Andrew Morton2d0eee12006-03-21 23:00:05 -0800138 start = (unsigned long)__va(start);
Mike Kravetz54b79242005-11-07 16:25:48 -0800139 create_section_mapping(start, start + size);
140
Paul Mackerras23fd0772005-10-31 13:37:12 +1100141 /* this should work for most non-highmem platforms */
142 zone = pgdata->node_zones;
143
144 return __add_pages(zone, start_pfn, nr_pages);
Paul Mackerras23fd0772005-10-31 13:37:12 +1100145}
146
Badari Pulavartyaa620ab2008-02-05 00:10:16 -0800147#ifdef CONFIG_MEMORY_HOTREMOVE
148int remove_memory(u64 start, u64 size)
149{
150 unsigned long start_pfn, end_pfn;
151 int ret;
152
153 start_pfn = start >> PAGE_SHIFT;
154 end_pfn = start_pfn + (size >> PAGE_SHIFT);
155 ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
156 if (ret)
157 goto out;
158 /* Arch-specific calls go here - next patch */
159out:
160 return ret;
161}
162#endif /* CONFIG_MEMORY_HOTREMOVE */
Badari Pulavartya99824f2008-02-05 00:10:18 -0800163
164/*
165 * walk_memory_resource() needs to make sure there is no holes in a given
166 * memory range. On PPC64, since this range comes from /sysfs, the range
167 * is guaranteed to be valid, non-overlapping and can not contain any
168 * holes. By the time we get here (memory add or remove), /proc/device-tree
169 * is updated and correct. Only reason we need to check against device-tree
170 * would be if we allow user-land to specify a memory range through a
171 * system call/ioctl etc. instead of doing offline/online through /sysfs.
172 */
173int
174walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
175 int (*func)(unsigned long, unsigned long, void *))
176{
177 return (*func)(start_pfn, nr_pages, arg);
178}
179
Paul Mackerras23fd0772005-10-31 13:37:12 +1100180#endif /* CONFIG_MEMORY_HOTPLUG */
181
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000182void show_mem(void)
183{
184 unsigned long total = 0, reserved = 0;
185 unsigned long shared = 0, cached = 0;
186 unsigned long highmem = 0;
187 struct page *page;
188 pg_data_t *pgdat;
189 unsigned long i;
190
191 printk("Mem-info:\n");
192 show_free_areas();
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -0800193 for_each_online_pgdat(pgdat) {
Paul Mackerras23fd0772005-10-31 13:37:12 +1100194 unsigned long flags;
195 pgdat_resize_lock(pgdat, &flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000196 for (i = 0; i < pgdat->node_spanned_pages; i++) {
Paul Mackerrasfb6d73d2005-11-16 11:43:26 +1100197 if (!pfn_valid(pgdat->node_start_pfn + i))
198 continue;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000199 page = pgdat_page_nr(pgdat, i);
200 total++;
201 if (PageHighMem(page))
202 highmem++;
203 if (PageReserved(page))
204 reserved++;
205 else if (PageSwapCache(page))
206 cached++;
207 else if (page_count(page))
208 shared += page_count(page) - 1;
209 }
Paul Mackerras23fd0772005-10-31 13:37:12 +1100210 pgdat_resize_unlock(pgdat, &flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000211 }
212 printk("%ld pages of RAM\n", total);
213#ifdef CONFIG_HIGHMEM
214 printk("%ld pages of HIGHMEM\n", highmem);
215#endif
216 printk("%ld reserved pages\n", reserved);
217 printk("%ld pages shared\n", shared);
218 printk("%ld pages swap cached\n", cached);
219}
220
221/*
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000222 * Initialize the bootmem system and give it all the memory we
223 * have available. If we are using highmem, we only put the
224 * lowmem into the bootmem system.
225 */
226#ifndef CONFIG_NEED_MULTIPLE_NODES
227void __init do_init_bootmem(void)
228{
229 unsigned long i;
230 unsigned long start, bootmap_pages;
231 unsigned long total_pages;
232 int boot_mapsize;
233
Kumar Gala37dd2ba2008-04-22 04:22:34 +1000234 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
Kumar Galad7917ba2008-04-16 05:52:22 +1000235 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000236#ifdef CONFIG_HIGHMEM
237 total_pages = total_lowmem >> PAGE_SHIFT;
Kumar Galad7917ba2008-04-16 05:52:22 +1000238 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000239#endif
240
241 /*
242 * Find an area to use for the bootmem bitmap. Calculate the size of
243 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
244 * Add 1 additional page in case the address isn't page-aligned.
245 */
246 bootmap_pages = bootmem_bootmap_pages(total_pages);
247
248 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000249
Kumar Gala37dd2ba2008-04-22 04:22:34 +1000250 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
251 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000252
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700253 /* Add active regions with valid PFNs */
254 for (i = 0; i < lmb.memory.cnt; i++) {
255 unsigned long start_pfn, end_pfn;
256 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
257 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
258 add_active_range(0, start_pfn, end_pfn);
259 }
260
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000261 /* Add all physical memory to the bootmem map, mark each area
262 * present.
263 */
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000264#ifdef CONFIG_HIGHMEM
Kumar Galad7917ba2008-04-16 05:52:22 +1000265 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
Kumar Galaf98eeb42008-01-09 11:27:23 -0600266
267 /* reserve the sections we're already using */
268 for (i = 0; i < lmb.reserved.cnt; i++) {
269 unsigned long addr = lmb.reserved.region[i].base +
270 lmb_size_bytes(&lmb.reserved, i) - 1;
Kumar Galad7917ba2008-04-16 05:52:22 +1000271 if (addr < lowmem_end_addr)
Kumar Galaf98eeb42008-01-09 11:27:23 -0600272 reserve_bootmem(lmb.reserved.region[i].base,
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800273 lmb_size_bytes(&lmb.reserved, i),
274 BOOTMEM_DEFAULT);
Kumar Galad7917ba2008-04-16 05:52:22 +1000275 else if (lmb.reserved.region[i].base < lowmem_end_addr) {
276 unsigned long adjusted_size = lowmem_end_addr -
Kumar Galaf98eeb42008-01-09 11:27:23 -0600277 lmb.reserved.region[i].base;
278 reserve_bootmem(lmb.reserved.region[i].base,
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800279 adjusted_size, BOOTMEM_DEFAULT);
Kumar Galaf98eeb42008-01-09 11:27:23 -0600280 }
281 }
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700282#else
283 free_bootmem_with_active_regions(0, max_pfn);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000284
285 /* reserve the sections we're already using */
286 for (i = 0; i < lmb.reserved.cnt; i++)
287 reserve_bootmem(lmb.reserved.region[i].base,
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800288 lmb_size_bytes(&lmb.reserved, i),
289 BOOTMEM_DEFAULT);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000290
Kumar Galaf98eeb42008-01-09 11:27:23 -0600291#endif
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000292 /* XXX need to clip this if using highmem? */
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700293 sparse_memory_present_with_active_regions(0);
294
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000295 init_bootmem_done = 1;
296}
297
Johannes Berg4e8ad3e2007-05-08 19:25:00 +1000298/* mark pages that don't exist as nosave */
299static int __init mark_nonram_nosave(void)
300{
301 unsigned long lmb_next_region_start_pfn,
302 lmb_region_max_pfn;
303 int i;
304
305 for (i = 0; i < lmb.memory.cnt - 1; i++) {
306 lmb_region_max_pfn =
307 (lmb.memory.region[i].base >> PAGE_SHIFT) +
308 (lmb.memory.region[i].size >> PAGE_SHIFT);
309 lmb_next_region_start_pfn =
310 lmb.memory.region[i+1].base >> PAGE_SHIFT;
311
312 if (lmb_region_max_pfn < lmb_next_region_start_pfn)
313 register_nosave_region(lmb_region_max_pfn,
314 lmb_next_region_start_pfn);
315 }
316
317 return 0;
318}
319
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000320/*
321 * paging_init() sets up the page tables - in fact we've already done this.
322 */
323void __init paging_init(void)
324{
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000325 unsigned long total_ram = lmb_phys_mem_size();
326 unsigned long top_of_ram = lmb_end_of_DRAM();
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700327 unsigned long max_zone_pfns[MAX_NR_ZONES];
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000328
Kumar Gala2c419bd2008-04-23 23:05:20 +1000329#ifdef CONFIG_PPC32
330 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
331 unsigned long end = __fix_to_virt(FIX_HOLE);
332
333 for (; v < end; v += PAGE_SIZE)
334 map_page(v, 0, 0); /* XXX gross */
335#endif
336
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000337#ifdef CONFIG_HIGHMEM
338 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
Kumar Gala2c419bd2008-04-23 23:05:20 +1000339 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
340
341 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000342 kmap_prot = PAGE_KERNEL;
343#endif /* CONFIG_HIGHMEM */
344
Olof Johanssone110b282006-04-12 15:25:01 -0500345 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000346 top_of_ram, total_ram);
Olof Johanssone110b282006-04-12 15:25:01 -0500347 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000348 (top_of_ram - total_ram) >> 20);
Mel Gorman6391af12006-10-11 01:20:39 -0700349 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000350#ifdef CONFIG_HIGHMEM
Kumar Galad7917ba2008-04-16 05:52:22 +1000351 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
Mel Gorman6391af12006-10-11 01:20:39 -0700352 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000353#else
Mel Gorman6391af12006-10-11 01:20:39 -0700354 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700355#endif
356 free_area_init_nodes(max_zone_pfns);
Johannes Berg4e8ad3e2007-05-08 19:25:00 +1000357
358 mark_nonram_nosave();
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000359}
360#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
361
362void __init mem_init(void)
363{
364#ifdef CONFIG_NEED_MULTIPLE_NODES
365 int nid;
366#endif
367 pg_data_t *pgdat;
368 unsigned long i;
369 struct page *page;
370 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
371
Paul Mackerrasfb6d73d2005-11-16 11:43:26 +1100372 num_physpages = lmb.memory.size >> PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000373 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
374
375#ifdef CONFIG_NEED_MULTIPLE_NODES
376 for_each_online_node(nid) {
377 if (NODE_DATA(nid)->node_spanned_pages != 0) {
Anton Blanchardc258dd42006-03-25 17:27:09 +1100378 printk("freeing bootmem node %d\n", nid);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000379 totalram_pages +=
380 free_all_bootmem_node(NODE_DATA(nid));
381 }
382 }
383#else
Paul Mackerrasfb6d73d2005-11-16 11:43:26 +1100384 max_mapnr = max_pfn;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000385 totalram_pages += free_all_bootmem();
386#endif
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -0800387 for_each_online_pgdat(pgdat) {
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000388 for (i = 0; i < pgdat->node_spanned_pages; i++) {
Paul Mackerrasfb6d73d2005-11-16 11:43:26 +1100389 if (!pfn_valid(pgdat->node_start_pfn + i))
390 continue;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000391 page = pgdat_page_nr(pgdat, i);
392 if (PageReserved(page))
393 reservedpages++;
394 }
395 }
396
397 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
Anton Blanchardbcb35572005-11-07 17:43:07 +1100398 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000399 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
400 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
401
402#ifdef CONFIG_HIGHMEM
403 {
404 unsigned long pfn, highmem_mapnr;
405
Kumar Galad7917ba2008-04-16 05:52:22 +1000406 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000407 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
408 struct page *page = pfn_to_page(pfn);
Kumar Galaf98eeb42008-01-09 11:27:23 -0600409 if (lmb_is_reserved(pfn << PAGE_SHIFT))
410 continue;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000411 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800412 init_page_count(page);
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000413 __free_page(page);
414 totalhigh_pages++;
Kumar Galaf98eeb42008-01-09 11:27:23 -0600415 reservedpages--;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000416 }
417 totalram_pages += totalhigh_pages;
Olof Johanssone110b282006-04-12 15:25:01 -0500418 printk(KERN_DEBUG "High memory: %luk\n",
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000419 totalhigh_pages << (PAGE_SHIFT-10));
420 }
421#endif /* CONFIG_HIGHMEM */
422
423 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
424 "%luk reserved, %luk data, %luk bss, %luk init)\n",
425 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
426 num_physpages << (PAGE_SHIFT-10),
427 codesize >> 10,
428 reservedpages << (PAGE_SHIFT-10),
429 datasize >> 10,
430 bsssize >> 10,
431 initsize >> 10);
432
433 mem_init_done = 1;
Paul Mackerras7c8c6b92005-10-06 12:23:33 +1000434}
435
436/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000437 * This is called when a page has been modified by the kernel.
438 * It just marks the page as not i-cache clean. We do the i-cache
439 * flush later when the page is given to a user process, if necessary.
440 */
441void flush_dcache_page(struct page *page)
442{
443 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
444 return;
445 /* avoid an atomic op if possible */
446 if (test_bit(PG_arch_1, &page->flags))
447 clear_bit(PG_arch_1, &page->flags);
448}
449EXPORT_SYMBOL(flush_dcache_page);
450
451void flush_dcache_icache_page(struct page *page)
452{
453#ifdef CONFIG_BOOKE
454 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
455 __flush_dcache_icache(start);
456 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
Paul Mackerrasab1f9da2005-10-10 21:58:35 +1000457#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000458 /* On 8xx there is no need to kmap since highmem is not supported */
459 __flush_dcache_icache(page_address(page));
460#else
461 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
462#endif
463
464}
465void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
466{
467 clear_page(page);
468
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000469 /*
470 * We shouldnt have to do this, but some versions of glibc
471 * require it (ld.so assumes zero filled pages are icache clean)
472 * - Anton
473 */
David Gibson09f5dc42006-02-06 13:24:53 +1100474 flush_dcache_page(pg);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000475}
476EXPORT_SYMBOL(clear_user_page);
477
478void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
479 struct page *pg)
480{
481 copy_page(vto, vfrom);
482
483 /*
484 * We should be able to use the following optimisation, however
485 * there are two problems.
486 * Firstly a bug in some versions of binutils meant PLT sections
487 * were not marked executable.
488 * Secondly the first word in the GOT section is blrl, used
489 * to establish the GOT address. Until recently the GOT was
490 * not marked executable.
491 * - Anton
492 */
493#if 0
494 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
495 return;
496#endif
497
David Gibson09f5dc42006-02-06 13:24:53 +1100498 flush_dcache_page(pg);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000499}
500
501void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
502 unsigned long addr, int len)
503{
504 unsigned long maddr;
505
506 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
507 flush_icache_range(maddr, maddr + len);
508 kunmap(page);
509}
510EXPORT_SYMBOL(flush_icache_user_range);
511
512/*
513 * This is called at the end of handling a user page fault, when the
514 * fault has been handled by updating a PTE in the linux page tables.
515 * We use it to preload an HPTE into the hash table corresponding to
516 * the updated linux PTE.
517 *
Hugh Dickins01edcd82005-11-23 13:37:39 -0800518 * This must always be called with the pte lock held.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000519 */
520void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
521 pte_t pte)
522{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100523#ifdef CONFIG_PPC_STD_MMU
524 unsigned long access = 0, trap;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000525#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100526 unsigned long pfn = pte_pfn(pte);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000527
528 /* handle i-cache coherency */
529 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
530 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
531 pfn_valid(pfn)) {
532 struct page *page = pfn_to_page(pfn);
Vitaly Bordugdbbb06b2007-01-24 22:40:57 +0300533#ifdef CONFIG_8xx
534 /* On 8xx, cache control instructions (particularly
535 * "dcbst" from flush_dcache_icache) fault as write
536 * operation if there is an unpopulated TLB entry
537 * for the address in question. To workaround that,
538 * we invalidate the TLB here, thus avoiding dcbst
539 * misbehaviour.
540 */
Benjamin Herrenschmidt0b477592007-11-20 18:32:12 +1100541 _tlbie(address, 0 /* 8xx doesn't care about PID */);
Vitaly Bordugdbbb06b2007-01-24 22:40:57 +0300542#endif
Scott Wood551ed332008-02-06 06:43:26 +1100543 /* The _PAGE_USER test should really be _PAGE_EXEC, but
544 * older glibc versions execute some code from no-exec
545 * pages, which for now we are supporting. If exec-only
546 * pages are ever implemented, this will have to change.
547 */
548 if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000549 && !test_bit(PG_arch_1, &page->flags)) {
550 if (vma->vm_mm == current->active_mm) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000551 __flush_dcache_icache((void *) address);
552 } else
553 flush_dcache_icache_page(page);
554 set_bit(PG_arch_1, &page->flags);
555 }
556 }
557
558#ifdef CONFIG_PPC_STD_MMU
559 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
560 if (!pte_young(pte) || address >= TASK_SIZE)
561 return;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100562
563 /* We try to figure out if we are coming from an instruction
564 * access fault and pass that down to __hash_page so we avoid
565 * double-faulting on execution of fresh text. We have to test
566 * for regs NULL since init will get here first thing at boot
567 *
568 * We also avoid filling the hash if not coming from a fault
569 */
570 if (current->thread.regs == NULL)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000571 return;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100572 trap = TRAP(current->thread.regs);
573 if (trap == 0x400)
574 access |= _PAGE_EXEC;
575 else if (trap != 0x300)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000576 return;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100577 hash_preload(vma->vm_mm, address, access, trap);
578#endif /* CONFIG_PPC_STD_MMU */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000579}