| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/i386/mm/pgtable.c | 
|  | 3 | */ | 
|  | 4 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/sched.h> | 
|  | 6 | #include <linux/kernel.h> | 
|  | 7 | #include <linux/errno.h> | 
|  | 8 | #include <linux/mm.h> | 
|  | 9 | #include <linux/swap.h> | 
|  | 10 | #include <linux/smp.h> | 
|  | 11 | #include <linux/highmem.h> | 
|  | 12 | #include <linux/slab.h> | 
|  | 13 | #include <linux/pagemap.h> | 
|  | 14 | #include <linux/spinlock.h> | 
| Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 15 | #include <linux/module.h> | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 16 | #include <linux/quicklist.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  | 
|  | 18 | #include <asm/system.h> | 
|  | 19 | #include <asm/pgtable.h> | 
|  | 20 | #include <asm/pgalloc.h> | 
|  | 21 | #include <asm/fixmap.h> | 
|  | 22 | #include <asm/e820.h> | 
|  | 23 | #include <asm/tlb.h> | 
|  | 24 | #include <asm/tlbflush.h> | 
|  | 25 |  | 
|  | 26 | void show_mem(void) | 
|  | 27 | { | 
|  | 28 | int total = 0, reserved = 0; | 
|  | 29 | int shared = 0, cached = 0; | 
|  | 30 | int highmem = 0; | 
|  | 31 | struct page *page; | 
|  | 32 | pg_data_t *pgdat; | 
|  | 33 | unsigned long i; | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 34 | unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  | 
| Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 36 | printk(KERN_INFO "Mem-info:\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | show_free_areas(); | 
| Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 38 | printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 
| KAMEZAWA Hiroyuki | ec936fc | 2006-03-27 01:15:59 -0800 | [diff] [blame] | 39 | for_each_online_pgdat(pgdat) { | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 40 | pgdat_resize_lock(pgdat, &flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { | 
| Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 42 | page = pgdat_page_nr(pgdat, i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | total++; | 
|  | 44 | if (PageHighMem(page)) | 
|  | 45 | highmem++; | 
|  | 46 | if (PageReserved(page)) | 
|  | 47 | reserved++; | 
|  | 48 | else if (PageSwapCache(page)) | 
|  | 49 | cached++; | 
|  | 50 | else if (page_count(page)) | 
|  | 51 | shared += page_count(page) - 1; | 
|  | 52 | } | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 53 | pgdat_resize_unlock(pgdat, &flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | } | 
| Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 55 | printk(KERN_INFO "%d pages of RAM\n", total); | 
|  | 56 | printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); | 
|  | 57 | printk(KERN_INFO "%d reserved pages\n", reserved); | 
|  | 58 | printk(KERN_INFO "%d pages shared\n", shared); | 
|  | 59 | printk(KERN_INFO "%d pages swap cached\n", cached); | 
| Martin J. Bligh | 6f4e1e5 | 2005-06-23 00:08:08 -0700 | [diff] [blame] | 60 |  | 
| Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 61 | printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); | 
| Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 62 | printk(KERN_INFO "%lu pages writeback\n", | 
|  | 63 | global_page_state(NR_WRITEBACK)); | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 64 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 65 | printk(KERN_INFO "%lu pages slab\n", | 
|  | 66 | global_page_state(NR_SLAB_RECLAIMABLE) + | 
|  | 67 | global_page_state(NR_SLAB_UNRECLAIMABLE)); | 
| Christoph Lameter | df849a1 | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 68 | printk(KERN_INFO "%lu pages pagetables\n", | 
|  | 69 | global_page_state(NR_PAGETABLE)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | } | 
|  | 71 |  | 
|  | 72 | /* | 
|  | 73 | * Associate a virtual page frame with a given physical page frame | 
|  | 74 | * and protection flags for that frame. | 
|  | 75 | */ | 
|  | 76 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | 
|  | 77 | { | 
|  | 78 | pgd_t *pgd; | 
|  | 79 | pud_t *pud; | 
|  | 80 | pmd_t *pmd; | 
|  | 81 | pte_t *pte; | 
|  | 82 |  | 
|  | 83 | pgd = swapper_pg_dir + pgd_index(vaddr); | 
|  | 84 | if (pgd_none(*pgd)) { | 
|  | 85 | BUG(); | 
|  | 86 | return; | 
|  | 87 | } | 
|  | 88 | pud = pud_offset(pgd, vaddr); | 
|  | 89 | if (pud_none(*pud)) { | 
|  | 90 | BUG(); | 
|  | 91 | return; | 
|  | 92 | } | 
|  | 93 | pmd = pmd_offset(pud, vaddr); | 
|  | 94 | if (pmd_none(*pmd)) { | 
|  | 95 | BUG(); | 
|  | 96 | return; | 
|  | 97 | } | 
|  | 98 | pte = pte_offset_kernel(pmd, vaddr); | 
| Jan Beulich | b0bfece | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 99 | if (pgprot_val(flags)) | 
|  | 100 | /* <pfn,flags> stored as-is, to permit clearing entries */ | 
|  | 101 | set_pte(pte, pfn_pte(pfn, flags)); | 
|  | 102 | else | 
|  | 103 | pte_clear(&init_mm, vaddr, pte); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 |  | 
|  | 105 | /* | 
|  | 106 | * It's enough to flush this one mapping. | 
|  | 107 | * (PGE mappings get flushed as well) | 
|  | 108 | */ | 
|  | 109 | __flush_tlb_one(vaddr); | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | /* | 
|  | 113 | * Associate a large virtual page frame with a given physical page frame | 
|  | 114 | * and protection flags for that frame. pfn is for the base of the page, | 
|  | 115 | * vaddr is what the page gets mapped to - both must be properly aligned. | 
|  | 116 | * The pmd must already be instantiated. Assumes PAE mode. | 
|  | 117 | */ | 
|  | 118 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | 
|  | 119 | { | 
|  | 120 | pgd_t *pgd; | 
|  | 121 | pud_t *pud; | 
|  | 122 | pmd_t *pmd; | 
|  | 123 |  | 
|  | 124 | if (vaddr & (PMD_SIZE-1)) {		/* vaddr is misaligned */ | 
| Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 125 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | return; /* BUG(); */ | 
|  | 127 | } | 
|  | 128 | if (pfn & (PTRS_PER_PTE-1)) {		/* pfn is misaligned */ | 
| Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 129 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | return; /* BUG(); */ | 
|  | 131 | } | 
|  | 132 | pgd = swapper_pg_dir + pgd_index(vaddr); | 
|  | 133 | if (pgd_none(*pgd)) { | 
| Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 134 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | return; /* BUG(); */ | 
|  | 136 | } | 
|  | 137 | pud = pud_offset(pgd, vaddr); | 
|  | 138 | pmd = pmd_offset(pud, vaddr); | 
|  | 139 | set_pmd(pmd, pfn_pmd(pfn, flags)); | 
|  | 140 | /* | 
|  | 141 | * It's enough to flush this one mapping. | 
|  | 142 | * (PGE mappings get flushed as well) | 
|  | 143 | */ | 
|  | 144 | __flush_tlb_one(vaddr); | 
|  | 145 | } | 
|  | 146 |  | 
| Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 147 | static int fixmaps; | 
| Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 148 | unsigned long __FIXADDR_TOP = 0xfffff000; | 
|  | 149 | EXPORT_SYMBOL(__FIXADDR_TOP); | 
| Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 150 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | 
|  | 152 | { | 
|  | 153 | unsigned long address = __fix_to_virt(idx); | 
|  | 154 |  | 
|  | 155 | if (idx >= __end_of_fixed_addresses) { | 
|  | 156 | BUG(); | 
|  | 157 | return; | 
|  | 158 | } | 
|  | 159 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | 
| Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 160 | fixmaps++; | 
|  | 161 | } | 
|  | 162 |  | 
|  | 163 | /** | 
|  | 164 | * reserve_top_address - reserves a hole in the top of kernel address space | 
|  | 165 | * @reserve - size of hole to reserve | 
|  | 166 | * | 
|  | 167 | * Can be used to relocate the fixmap area and poke a hole in the top | 
|  | 168 | * of kernel address space to make room for a hypervisor. | 
|  | 169 | */ | 
|  | 170 | void reserve_top_address(unsigned long reserve) | 
|  | 171 | { | 
|  | 172 | BUG_ON(fixmaps > 0); | 
| Zachary Amsden | 7ce0bcf | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 173 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", | 
|  | 174 | (int)-reserve); | 
| Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 175 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | 
|  | 176 | __VMALLOC_RESERVE += reserve; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | } | 
|  | 178 |  | 
|  | 179 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 
|  | 180 | { | 
|  | 181 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | 
|  | 185 | { | 
|  | 186 | struct page *pte; | 
|  | 187 |  | 
|  | 188 | #ifdef CONFIG_HIGHPTE | 
|  | 189 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | 
|  | 190 | #else | 
|  | 191 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | 
|  | 192 | #endif | 
|  | 193 | return pte; | 
|  | 194 | } | 
|  | 195 |  | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 196 | void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | { | 
|  | 198 | memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | /* | 
|  | 202 | * List of all pgd's needed for non-PAE so it can invalidate entries | 
|  | 203 | * in both cached and uncached pgd's; not needed for PAE since the | 
|  | 204 | * kernel pmd is shared. If PAE were not to share the pmd a similar | 
|  | 205 | * tactic would be needed. This is essentially codepath-based locking | 
|  | 206 | * against pageattr.c; it is the unique case in which a valid change | 
|  | 207 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | 
|  | 208 | * vmalloc faults work because attached pagetables are never freed. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | * -- wli | 
|  | 210 | */ | 
|  | 211 | DEFINE_SPINLOCK(pgd_lock); | 
|  | 212 | struct page *pgd_list; | 
|  | 213 |  | 
|  | 214 | static inline void pgd_list_add(pgd_t *pgd) | 
|  | 215 | { | 
|  | 216 | struct page *page = virt_to_page(pgd); | 
|  | 217 | page->index = (unsigned long)pgd_list; | 
|  | 218 | if (pgd_list) | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 219 | set_page_private(pgd_list, (unsigned long)&page->index); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | pgd_list = page; | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 221 | set_page_private(page, (unsigned long)&pgd_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } | 
|  | 223 |  | 
|  | 224 | static inline void pgd_list_del(pgd_t *pgd) | 
|  | 225 | { | 
|  | 226 | struct page *next, **pprev, *page = virt_to_page(pgd); | 
|  | 227 | next = (struct page *)page->index; | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 228 | pprev = (struct page **)page_private(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | *pprev = next; | 
|  | 230 | if (next) | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 231 | set_page_private(next, (unsigned long)pprev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } | 
|  | 233 |  | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 234 |  | 
|  | 235 |  | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 236 | #if (PTRS_PER_PMD == 1) | 
|  | 237 | /* Non-PAE pgd constructor */ | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 238 | void pgd_ctor(void *pgd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | { | 
|  | 240 | unsigned long flags; | 
|  | 241 |  | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 242 | /* !PAE, no pagetable sharing */ | 
|  | 243 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 |  | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 245 | spin_lock_irqsave(&pgd_lock, flags); | 
|  | 246 |  | 
|  | 247 | /* must happen under lock */ | 
| Zachary Amsden | d7271b1 | 2005-09-03 15:56:50 -0700 | [diff] [blame] | 248 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | swapper_pg_dir + USER_PTRS_PER_PGD, | 
| Zachary Amsden | d7271b1 | 2005-09-03 15:56:50 -0700 | [diff] [blame] | 250 | KERNEL_PGD_PTRS); | 
| Zachary Amsden | c119ecc | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 251 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 252 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | 
|  | 253 | USER_PTRS_PER_PGD, | 
|  | 254 | KERNEL_PGD_PTRS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | pgd_list_add(pgd); | 
|  | 256 | spin_unlock_irqrestore(&pgd_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | } | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 258 | #else  /* PTRS_PER_PMD > 1 */ | 
|  | 259 | /* PAE pgd constructor */ | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 260 | void pgd_ctor(void *pgd) | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 261 | { | 
|  | 262 | /* PAE, kernel PMD may be shared */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 |  | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 264 | if (SHARED_KERNEL_PMD) { | 
|  | 265 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, | 
|  | 266 | swapper_pg_dir + USER_PTRS_PER_PGD, | 
|  | 267 | KERNEL_PGD_PTRS); | 
|  | 268 | } else { | 
|  | 269 | unsigned long flags; | 
|  | 270 |  | 
|  | 271 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | 
|  | 272 | spin_lock_irqsave(&pgd_lock, flags); | 
|  | 273 | pgd_list_add(pgd); | 
|  | 274 | spin_unlock_irqrestore(&pgd_lock, flags); | 
|  | 275 | } | 
|  | 276 | } | 
|  | 277 | #endif	/* PTRS_PER_PMD */ | 
|  | 278 |  | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 279 | void pgd_dtor(void *pgd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | { | 
|  | 281 | unsigned long flags; /* can be called from interrupt context */ | 
|  | 282 |  | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 283 | if (SHARED_KERNEL_PMD) | 
|  | 284 | return; | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 285 |  | 
| Zachary Amsden | c119ecc | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 286 | paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | spin_lock_irqsave(&pgd_lock, flags); | 
|  | 288 | pgd_list_del(pgd); | 
|  | 289 | spin_unlock_irqrestore(&pgd_lock, flags); | 
|  | 290 | } | 
|  | 291 |  | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 292 | #define UNSHARED_PTRS_PER_PGD				\ | 
|  | 293 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | 
|  | 294 |  | 
|  | 295 | /* If we allocate a pmd for part of the kernel address space, then | 
|  | 296 | make sure its initialized with the appropriate kernel mappings. | 
|  | 297 | Otherwise use a cached zeroed pmd.  */ | 
|  | 298 | static pmd_t *pmd_cache_alloc(int idx) | 
|  | 299 | { | 
|  | 300 | pmd_t *pmd; | 
|  | 301 |  | 
|  | 302 | if (idx >= USER_PTRS_PER_PGD) { | 
|  | 303 | pmd = (pmd_t *)__get_free_page(GFP_KERNEL); | 
|  | 304 |  | 
|  | 305 | if (pmd) | 
|  | 306 | memcpy(pmd, | 
|  | 307 | (void *)pgd_page_vaddr(swapper_pg_dir[idx]), | 
|  | 308 | sizeof(pmd_t) * PTRS_PER_PMD); | 
|  | 309 | } else | 
|  | 310 | pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); | 
|  | 311 |  | 
|  | 312 | return pmd; | 
|  | 313 | } | 
|  | 314 |  | 
|  | 315 | static void pmd_cache_free(pmd_t *pmd, int idx) | 
|  | 316 | { | 
|  | 317 | if (idx >= USER_PTRS_PER_PGD) | 
|  | 318 | free_page((unsigned long)pmd); | 
|  | 319 | else | 
|  | 320 | kmem_cache_free(pmd_cache, pmd); | 
|  | 321 | } | 
|  | 322 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | pgd_t *pgd_alloc(struct mm_struct *mm) | 
|  | 324 | { | 
|  | 325 | int i; | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 326 | pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 |  | 
|  | 328 | if (PTRS_PER_PMD == 1 || !pgd) | 
|  | 329 | return pgd; | 
|  | 330 |  | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 331 | for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) { | 
|  | 332 | pmd_t *pmd = pmd_cache_alloc(i); | 
|  | 333 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | if (!pmd) | 
|  | 335 | goto out_oom; | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 336 |  | 
| Zachary Amsden | c119ecc | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 337 | paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); | 
|  | 339 | } | 
|  | 340 | return pgd; | 
|  | 341 |  | 
|  | 342 | out_oom: | 
| Zachary Amsden | c119ecc | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 343 | for (i--; i >= 0; i--) { | 
|  | 344 | pgd_t pgdent = pgd[i]; | 
|  | 345 | void* pmd = (void *)__va(pgd_val(pgdent)-1); | 
|  | 346 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 347 | pmd_cache_free(pmd, i); | 
| Zachary Amsden | c119ecc | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 348 | } | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 349 | quicklist_free(0, pgd_dtor, pgd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | return NULL; | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 | void pgd_free(pgd_t *pgd) | 
|  | 354 | { | 
|  | 355 | int i; | 
|  | 356 |  | 
|  | 357 | /* in the PAE case user pgd entries are overwritten before usage */ | 
|  | 358 | if (PTRS_PER_PMD > 1) | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 359 | for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) { | 
| Zachary Amsden | c119ecc | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 360 | pgd_t pgdent = pgd[i]; | 
|  | 361 | void* pmd = (void *)__va(pgd_val(pgdent)-1); | 
|  | 362 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); | 
| Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 363 | pmd_cache_free(pmd, i); | 
| Zachary Amsden | c119ecc | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 364 | } | 
| Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 365 | /* in the non-PAE case, free_pgtables() clears user pgd entries */ | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 366 | quicklist_free(0, pgd_dtor, pgd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | } | 
| Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 368 |  | 
|  | 369 | void check_pgt_cache(void) | 
|  | 370 | { | 
|  | 371 | quicklist_trim(0, pgd_dtor, 25, 16); | 
|  | 372 | } | 
|  | 373 |  |