blob: 6fb9e7c6893fd44afad45232355b47c867c7cf75 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/sched.h>
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/mm.h>
Prarit Bhargava27eb0b22007-10-17 18:04:34 +02005#include <linux/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/swap.h>
7#include <linux/smp.h>
8#include <linux/highmem.h>
9#include <linux/slab.h>
10#include <linux/pagemap.h>
11#include <linux/spinlock.h>
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -070012#include <linux/module.h>
Christoph Lameterf1d1a842007-05-12 11:15:24 -070013#include <linux/quicklist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15#include <asm/system.h>
16#include <asm/pgtable.h>
17#include <asm/pgalloc.h>
18#include <asm/fixmap.h>
19#include <asm/e820.h>
20#include <asm/tlb.h>
21#include <asm/tlbflush.h>
22
23void show_mem(void)
24{
25 int total = 0, reserved = 0;
26 int shared = 0, cached = 0;
27 int highmem = 0;
28 struct page *page;
29 pg_data_t *pgdat;
30 unsigned long i;
Dave Hansen208d54e2005-10-29 18:16:52 -070031 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Christophe Lucasf90e7182005-06-25 14:59:24 -070033 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 show_free_areas();
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -080035 for_each_online_pgdat(pgdat) {
Dave Hansen208d54e2005-10-29 18:16:52 -070036 pgdat_resize_lock(pgdat, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
Prarit Bhargava27eb0b22007-10-17 18:04:34 +020038 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
39 touch_nmi_watchdog();
Dave Hansen408fde82005-06-23 00:07:37 -070040 page = pgdat_page_nr(pgdat, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 total++;
42 if (PageHighMem(page))
43 highmem++;
44 if (PageReserved(page))
45 reserved++;
46 else if (PageSwapCache(page))
47 cached++;
48 else if (page_count(page))
49 shared += page_count(page) - 1;
50 }
Dave Hansen208d54e2005-10-29 18:16:52 -070051 pgdat_resize_unlock(pgdat, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 }
Christophe Lucasf90e7182005-06-25 14:59:24 -070053 printk(KERN_INFO "%d pages of RAM\n", total);
54 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
55 printk(KERN_INFO "%d reserved pages\n", reserved);
56 printk(KERN_INFO "%d pages shared\n", shared);
57 printk(KERN_INFO "%d pages swap cached\n", cached);
Martin J. Bligh6f4e1e52005-06-23 00:08:08 -070058
Christoph Lameterb1e7a8f2006-06-30 01:55:39 -070059 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
Christoph Lameterce866b32006-06-30 01:55:40 -070060 printk(KERN_INFO "%lu pages writeback\n",
61 global_page_state(NR_WRITEBACK));
Christoph Lameter65ba55f2006-06-30 01:55:34 -070062 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
Christoph Lameter972d1a72006-09-25 23:31:51 -070063 printk(KERN_INFO "%lu pages slab\n",
64 global_page_state(NR_SLAB_RECLAIMABLE) +
65 global_page_state(NR_SLAB_UNRECLAIMABLE));
Christoph Lameterdf849a12006-06-30 01:55:38 -070066 printk(KERN_INFO "%lu pages pagetables\n",
67 global_page_state(NR_PAGETABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
70/*
71 * Associate a virtual page frame with a given physical page frame
72 * and protection flags for that frame.
73 */
74static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
75{
76 pgd_t *pgd;
77 pud_t *pud;
78 pmd_t *pmd;
79 pte_t *pte;
80
81 pgd = swapper_pg_dir + pgd_index(vaddr);
82 if (pgd_none(*pgd)) {
83 BUG();
84 return;
85 }
86 pud = pud_offset(pgd, vaddr);
87 if (pud_none(*pud)) {
88 BUG();
89 return;
90 }
91 pmd = pmd_offset(pud, vaddr);
92 if (pmd_none(*pmd)) {
93 BUG();
94 return;
95 }
96 pte = pte_offset_kernel(pmd, vaddr);
Jan Beulichb0bfece2006-12-07 02:14:09 +010097 if (pgprot_val(flags))
Jan Beulichaa506dc2007-10-17 18:04:33 +020098 set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
Jan Beulichb0bfece2006-12-07 02:14:09 +010099 else
100 pte_clear(&init_mm, vaddr, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 /*
103 * It's enough to flush this one mapping.
104 * (PGE mappings get flushed as well)
105 */
106 __flush_tlb_one(vaddr);
107}
108
109/*
110 * Associate a large virtual page frame with a given physical page frame
111 * and protection flags for that frame. pfn is for the base of the page,
112 * vaddr is what the page gets mapped to - both must be properly aligned.
113 * The pmd must already be instantiated. Assumes PAE mode.
114 */
115void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
116{
117 pgd_t *pgd;
118 pud_t *pud;
119 pmd_t *pmd;
120
121 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
Christophe Lucasf90e7182005-06-25 14:59:24 -0700122 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 return; /* BUG(); */
124 }
125 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
Christophe Lucasf90e7182005-06-25 14:59:24 -0700126 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 return; /* BUG(); */
128 }
129 pgd = swapper_pg_dir + pgd_index(vaddr);
130 if (pgd_none(*pgd)) {
Christophe Lucasf90e7182005-06-25 14:59:24 -0700131 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 return; /* BUG(); */
133 }
134 pud = pud_offset(pgd, vaddr);
135 pmd = pmd_offset(pud, vaddr);
136 set_pmd(pmd, pfn_pmd(pfn, flags));
137 /*
138 * It's enough to flush this one mapping.
139 * (PGE mappings get flushed as well)
140 */
141 __flush_tlb_one(vaddr);
142}
143
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700144static int fixmaps;
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700145unsigned long __FIXADDR_TOP = 0xfffff000;
146EXPORT_SYMBOL(__FIXADDR_TOP);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
149{
150 unsigned long address = __fix_to_virt(idx);
151
152 if (idx >= __end_of_fixed_addresses) {
153 BUG();
154 return;
155 }
156 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700157 fixmaps++;
158}
159
160/**
161 * reserve_top_address - reserves a hole in the top of kernel address space
162 * @reserve - size of hole to reserve
163 *
164 * Can be used to relocate the fixmap area and poke a hole in the top
165 * of kernel address space to make room for a hypervisor.
166 */
167void reserve_top_address(unsigned long reserve)
168{
169 BUG_ON(fixmaps > 0);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100170 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
171 (int)-reserve);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700172 __FIXADDR_TOP = -reserve - PAGE_SIZE;
173 __VMALLOC_RESERVE += reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
176pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
177{
178 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
179}
180
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800181pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 struct page *pte;
184
185#ifdef CONFIG_HIGHPTE
186 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
187#else
188 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
189#endif
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800190 if (pte)
191 pgtable_page_ctor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 return pte;
193}
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195/*
196 * List of all pgd's needed for non-PAE so it can invalidate entries
197 * in both cached and uncached pgd's; not needed for PAE since the
198 * kernel pmd is shared. If PAE were not to share the pmd a similar
199 * tactic would be needed. This is essentially codepath-based locking
200 * against pageattr.c; it is the unique case in which a valid change
201 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
202 * vmalloc faults work because attached pagetables are never freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 * -- wli
204 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205static inline void pgd_list_add(pgd_t *pgd)
206{
207 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100208
209 list_add(&page->lru, &pgd_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
212static inline void pgd_list_del(pgd_t *pgd)
213{
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100214 struct page *page = virt_to_page(pgd);
215
216 list_del(&page->lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
218
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100219#define UNSHARED_PTRS_PER_PGD \
220 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700221
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100222static void pgd_ctor(void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223{
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100224 pgd_t *pgd = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 unsigned long flags;
226
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100227 /* Clear usermode parts of PGD */
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200228 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200230 spin_lock_irqsave(&pgd_lock, flags);
231
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100232 /* If the pgd points to a shared pagetable level (either the
233 ptes in non-PAE, or shared PMD in PAE), then just copy the
234 references from swapper_pg_dir. */
235 if (PAGETABLE_LEVELS == 2 ||
236 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
237 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200238 swapper_pg_dir + USER_PTRS_PER_PGD,
239 KERNEL_PGD_PTRS);
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100240 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
241 __pa(swapper_pg_dir) >> PAGE_SHIFT,
242 USER_PTRS_PER_PGD,
243 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200244 }
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100245
246 /* list required to sync kernel mapping updates */
247 if (!SHARED_KERNEL_PMD)
248 pgd_list_add(pgd);
249
250 spin_unlock_irqrestore(&pgd_lock, flags);
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200251}
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200252
Adrian Bunk23785692007-07-21 17:11:07 +0200253static void pgd_dtor(void *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
255 unsigned long flags; /* can be called from interrupt context */
256
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700257 if (SHARED_KERNEL_PMD)
258 return;
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 spin_lock_irqsave(&pgd_lock, flags);
261 pgd_list_del(pgd);
262 spin_unlock_irqrestore(&pgd_lock, flags);
263}
264
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100265#ifdef CONFIG_X86_PAE
266/*
267 * Mop up any pmd pages which may still be attached to the pgd.
268 * Normally they will be freed by munmap/exit_mmap, but any pmd we
269 * preallocate which never got a corresponding vma will need to be
270 * freed manually.
271 */
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800272static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100273{
274 int i;
275
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100276 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100277 pgd_t pgd = pgdp[i];
278
279 if (pgd_val(pgd) != 0) {
280 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
281
282 pgdp[i] = native_make_pgd(0);
283
284 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800285 pmd_free(mm, pmd);
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100286 }
287 }
288}
289
290/*
291 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
292 * updating the top-level pagetable entries to guarantee the
293 * processor notices the update. Since this is expensive, and
294 * all 4 top-level entries are used almost immediately in a
295 * new process's life, we just pre-populate them here.
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100296 *
297 * Also, if we're in a paravirt environment where the kernel pmd is
298 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
299 * and initialize the kernel pmds here.
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100300 */
301static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
302{
303 pud_t *pud;
304 unsigned long addr;
305 int i;
306
307 pud = pud_offset(pgd, 0);
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100308 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
309 i++, pud++, addr += PUD_SIZE) {
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100310 pmd_t *pmd = pmd_alloc_one(mm, addr);
311
312 if (!pmd) {
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800313 pgd_mop_up_pmds(mm, pgd);
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100314 return 0;
315 }
316
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100317 if (i >= USER_PTRS_PER_PGD)
318 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
319 sizeof(pmd_t) * PTRS_PER_PMD);
320
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100321 pud_populate(mm, pud, pmd);
322 }
323
324 return 1;
325}
326#else /* !CONFIG_X86_PAE */
327/* No need to prepopulate any pagetable entries in non-PAE modes. */
328static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
329{
330 return 1;
331}
332
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800333static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100334{
335}
336#endif /* CONFIG_X86_PAE */
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338pgd_t *pgd_alloc(struct mm_struct *mm)
339{
Thomas Gleixner985a34b2008-03-09 13:14:37 +0100340 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Thomas Gleixner985a34b2008-03-09 13:14:37 +0100342 /* so that alloc_pd can use it */
343 mm->pgd = pgd;
344 if (pgd)
345 pgd_ctor(pgd);
Jeremy Fitzhardinge6c435452008-01-30 13:33:39 +0100346
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100347 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
Thomas Gleixner985a34b2008-03-09 13:14:37 +0100348 pgd_dtor(pgd);
349 free_page((unsigned long)pgd);
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100350 pgd = NULL;
351 }
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 return pgd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354}
355
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800356void pgd_free(struct mm_struct *mm, pgd_t *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800358 pgd_mop_up_pmds(mm, pgd);
Thomas Gleixner985a34b2008-03-09 13:14:37 +0100359 pgd_dtor(pgd);
360 free_page((unsigned long)pgd);
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700361}
Ingo Molnar5aa05082008-01-31 22:05:48 +0100362
363void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
364{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800365 pgtable_page_dtor(pte);
Ingo Molnar5aa05082008-01-31 22:05:48 +0100366 paravirt_release_pt(page_to_pfn(pte));
367 tlb_remove_page(tlb, pte);
368}
369
370#ifdef CONFIG_X86_PAE
371
372void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
373{
Ingo Molnar5aa05082008-01-31 22:05:48 +0100374 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
375 tlb_remove_page(tlb, virt_to_page(pmd));
376}
377
378#endif
Ingo Molnar9fc34112008-03-03 09:53:17 +0100379
380int pmd_bad(pmd_t pmd)
381{
382 WARN_ON_ONCE(pmd_bad_v1(pmd) != pmd_bad_v2(pmd));
383
384 return pmd_bad_v1(pmd);
385}