| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 1 | #include <linux/gfp.h> | 
| Jaswinder Singh Rajput | 2c1b284 | 2009-04-11 00:03:10 +0530 | [diff] [blame] | 2 | #include <linux/initrd.h> | 
| Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 3 | #include <linux/ioport.h> | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 4 | #include <linux/swap.h> | 
| Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 5 | #include <linux/memblock.h> | 
| Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 6 |  | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 7 | #include <asm/cacheflush.h> | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 8 | #include <asm/e820.h> | 
| Pekka Enberg | 4fcb208 | 2009-03-05 14:55:08 +0200 | [diff] [blame] | 9 | #include <asm/init.h> | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 10 | #include <asm/page.h> | 
| Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 11 | #include <asm/page_types.h> | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 12 | #include <asm/sections.h> | 
| Jan Beulich | 4983439 | 2009-05-06 13:06:47 +0100 | [diff] [blame] | 13 | #include <asm/setup.h> | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 14 | #include <asm/system.h> | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 15 | #include <asm/tlbflush.h> | 
| Pekka Enberg | 9518e0e | 2009-04-28 16:00:50 +0300 | [diff] [blame] | 16 | #include <asm/tlb.h> | 
| Jaswinder Singh Rajput | 76c0692 | 2009-07-01 19:54:23 +0530 | [diff] [blame] | 17 | #include <asm/proto.h> | 
| Pekka Enberg | 9518e0e | 2009-04-28 16:00:50 +0300 | [diff] [blame] | 18 |  | 
| Yinghai Lu | d1b1942 | 2011-02-24 14:46:24 +0100 | [diff] [blame] | 19 | unsigned long __initdata pgt_buf_start; | 
|  | 20 | unsigned long __meminitdata pgt_buf_end; | 
|  | 21 | unsigned long __meminitdata pgt_buf_top; | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 22 |  | 
|  | 23 | int after_bootmem; | 
|  | 24 |  | 
|  | 25 | int direct_gbpages | 
|  | 26 | #ifdef CONFIG_DIRECT_GBPAGES | 
|  | 27 | = 1 | 
|  | 28 | #endif | 
|  | 29 | ; | 
|  | 30 |  | 
|  | 31 | static void __init find_early_table_space(unsigned long end, int use_pse, | 
|  | 32 | int use_gbpages) | 
|  | 33 | { | 
| Yinghai Lu | 4b239f4 | 2010-12-17 16:58:28 -0800 | [diff] [blame] | 34 | unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; | 
| Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 35 | phys_addr_t base; | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 36 |  | 
|  | 37 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 
|  | 38 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); | 
|  | 39 |  | 
|  | 40 | if (use_gbpages) { | 
|  | 41 | unsigned long extra; | 
|  | 42 |  | 
|  | 43 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | 
|  | 44 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | 
|  | 45 | } else | 
|  | 46 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 
|  | 47 |  | 
|  | 48 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); | 
|  | 49 |  | 
|  | 50 | if (use_pse) { | 
|  | 51 | unsigned long extra; | 
|  | 52 |  | 
|  | 53 | extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); | 
|  | 54 | #ifdef CONFIG_X86_32 | 
|  | 55 | extra += PMD_SIZE; | 
|  | 56 | #endif | 
|  | 57 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
|  | 58 | } else | 
|  | 59 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
|  | 60 |  | 
|  | 61 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); | 
|  | 62 |  | 
|  | 63 | #ifdef CONFIG_X86_32 | 
|  | 64 | /* for fixmap */ | 
|  | 65 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); | 
| Yinghai Lu | 80989ce | 2009-05-09 23:47:42 -0700 | [diff] [blame] | 66 | #endif | 
| Takashi Iwai | 8548c84 | 2011-10-23 23:19:12 +0200 | [diff] [blame] | 67 | good_end = max_pfn_mapped << PAGE_SHIFT; | 
| Yinghai Lu | 1411e0e | 2010-12-27 16:48:17 -0800 | [diff] [blame] | 68 |  | 
| Yinghai Lu | 4b239f4 | 2010-12-17 16:58:28 -0800 | [diff] [blame] | 69 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); | 
| Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 70 | if (base == MEMBLOCK_ERROR) | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 71 | panic("Cannot find space for the kernel page tables"); | 
|  | 72 |  | 
| Yinghai Lu | d1b1942 | 2011-02-24 14:46:24 +0100 | [diff] [blame] | 73 | pgt_buf_start = base >> PAGE_SHIFT; | 
|  | 74 | pgt_buf_end = pgt_buf_start; | 
|  | 75 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 76 |  | 
|  | 77 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", | 
| Yinghai Lu | d1b1942 | 2011-02-24 14:46:24 +0100 | [diff] [blame] | 78 | end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
| Sedat Dilek | 53f8023 | 2011-04-17 16:17:34 +0200 | [diff] [blame] | 81 | void __init native_pagetable_reserve(u64 start, u64 end) | 
| Stefano Stabellini | 279b706 | 2011-04-14 15:49:41 +0100 | [diff] [blame] | 82 | { | 
|  | 83 | memblock_x86_reserve_range(start, end, "PGTABLE"); | 
|  | 84 | } | 
|  | 85 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 86 | struct map_range { | 
|  | 87 | unsigned long start; | 
|  | 88 | unsigned long end; | 
|  | 89 | unsigned page_size_mask; | 
|  | 90 | }; | 
|  | 91 |  | 
|  | 92 | #ifdef CONFIG_X86_32 | 
|  | 93 | #define NR_RANGE_MR 3 | 
|  | 94 | #else /* CONFIG_X86_64 */ | 
|  | 95 | #define NR_RANGE_MR 5 | 
|  | 96 | #endif | 
|  | 97 |  | 
| Jan Beulich | dc9dd5c | 2009-03-12 12:40:06 +0000 | [diff] [blame] | 98 | static int __meminit save_mr(struct map_range *mr, int nr_range, | 
|  | 99 | unsigned long start_pfn, unsigned long end_pfn, | 
|  | 100 | unsigned long page_size_mask) | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 101 | { | 
|  | 102 | if (start_pfn < end_pfn) { | 
|  | 103 | if (nr_range >= NR_RANGE_MR) | 
|  | 104 | panic("run out of range for init_memory_mapping\n"); | 
|  | 105 | mr[nr_range].start = start_pfn<<PAGE_SHIFT; | 
|  | 106 | mr[nr_range].end   = end_pfn<<PAGE_SHIFT; | 
|  | 107 | mr[nr_range].page_size_mask = page_size_mask; | 
|  | 108 | nr_range++; | 
|  | 109 | } | 
|  | 110 |  | 
|  | 111 | return nr_range; | 
|  | 112 | } | 
|  | 113 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 114 | /* | 
|  | 115 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | 
|  | 116 | * This runs before bootmem is initialized and gets pages directly from | 
|  | 117 | * the physical memory. To access them they are temporarily mapped. | 
|  | 118 | */ | 
|  | 119 | unsigned long __init_refok init_memory_mapping(unsigned long start, | 
|  | 120 | unsigned long end) | 
|  | 121 | { | 
|  | 122 | unsigned long page_size_mask = 0; | 
|  | 123 | unsigned long start_pfn, end_pfn; | 
| Pekka Enberg | c77a3b5 | 2009-03-05 17:04:26 +0200 | [diff] [blame] | 124 | unsigned long ret = 0; | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 125 | unsigned long pos; | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 126 |  | 
|  | 127 | struct map_range mr[NR_RANGE_MR]; | 
|  | 128 | int nr_range, i; | 
|  | 129 | int use_pse, use_gbpages; | 
|  | 130 |  | 
|  | 131 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); | 
|  | 132 |  | 
| Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 133 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 134 | /* | 
|  | 135 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. | 
|  | 136 | * This will simplify cpa(), which otherwise needs to support splitting | 
|  | 137 | * large pages into small in interrupt context, etc. | 
|  | 138 | */ | 
|  | 139 | use_pse = use_gbpages = 0; | 
|  | 140 | #else | 
|  | 141 | use_pse = cpu_has_pse; | 
|  | 142 | use_gbpages = direct_gbpages; | 
|  | 143 | #endif | 
|  | 144 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 145 | /* Enable PSE if available */ | 
|  | 146 | if (cpu_has_pse) | 
|  | 147 | set_in_cr4(X86_CR4_PSE); | 
|  | 148 |  | 
|  | 149 | /* Enable PGE if available */ | 
|  | 150 | if (cpu_has_pge) { | 
|  | 151 | set_in_cr4(X86_CR4_PGE); | 
|  | 152 | __supported_pte_mask |= _PAGE_GLOBAL; | 
|  | 153 | } | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 154 |  | 
|  | 155 | if (use_gbpages) | 
|  | 156 | page_size_mask |= 1 << PG_LEVEL_1G; | 
|  | 157 | if (use_pse) | 
|  | 158 | page_size_mask |= 1 << PG_LEVEL_2M; | 
|  | 159 |  | 
|  | 160 | memset(mr, 0, sizeof(mr)); | 
|  | 161 | nr_range = 0; | 
|  | 162 |  | 
|  | 163 | /* head if not big page alignment ? */ | 
|  | 164 | start_pfn = start >> PAGE_SHIFT; | 
|  | 165 | pos = start_pfn << PAGE_SHIFT; | 
|  | 166 | #ifdef CONFIG_X86_32 | 
|  | 167 | /* | 
|  | 168 | * Don't use a large page for the first 2/4MB of memory | 
|  | 169 | * because there are often fixed size MTRRs in there | 
|  | 170 | * and overlapping MTRRs into large pages can cause | 
|  | 171 | * slowdowns. | 
|  | 172 | */ | 
|  | 173 | if (pos == 0) | 
|  | 174 | end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT); | 
|  | 175 | else | 
|  | 176 | end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 
|  | 177 | << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 178 | #else /* CONFIG_X86_64 */ | 
|  | 179 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) | 
|  | 180 | << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 181 | #endif | 
|  | 182 | if (end_pfn > (end >> PAGE_SHIFT)) | 
|  | 183 | end_pfn = end >> PAGE_SHIFT; | 
|  | 184 | if (start_pfn < end_pfn) { | 
|  | 185 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 
|  | 186 | pos = end_pfn << PAGE_SHIFT; | 
|  | 187 | } | 
|  | 188 |  | 
|  | 189 | /* big page (2M) range */ | 
|  | 190 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 
|  | 191 | << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 192 | #ifdef CONFIG_X86_32 | 
|  | 193 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 194 | #else /* CONFIG_X86_64 */ | 
|  | 195 | end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) | 
|  | 196 | << (PUD_SHIFT - PAGE_SHIFT); | 
|  | 197 | if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) | 
|  | 198 | end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); | 
|  | 199 | #endif | 
|  | 200 |  | 
|  | 201 | if (start_pfn < end_pfn) { | 
|  | 202 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 
|  | 203 | page_size_mask & (1<<PG_LEVEL_2M)); | 
|  | 204 | pos = end_pfn << PAGE_SHIFT; | 
|  | 205 | } | 
|  | 206 |  | 
|  | 207 | #ifdef CONFIG_X86_64 | 
|  | 208 | /* big page (1G) range */ | 
|  | 209 | start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) | 
|  | 210 | << (PUD_SHIFT - PAGE_SHIFT); | 
|  | 211 | end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); | 
|  | 212 | if (start_pfn < end_pfn) { | 
|  | 213 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 
|  | 214 | page_size_mask & | 
|  | 215 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); | 
|  | 216 | pos = end_pfn << PAGE_SHIFT; | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | /* tail is not big page (1G) alignment */ | 
|  | 220 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 
|  | 221 | << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 222 | end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 223 | if (start_pfn < end_pfn) { | 
|  | 224 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 
|  | 225 | page_size_mask & (1<<PG_LEVEL_2M)); | 
|  | 226 | pos = end_pfn << PAGE_SHIFT; | 
|  | 227 | } | 
|  | 228 | #endif | 
|  | 229 |  | 
|  | 230 | /* tail is not big page (2M) alignment */ | 
|  | 231 | start_pfn = pos>>PAGE_SHIFT; | 
|  | 232 | end_pfn = end>>PAGE_SHIFT; | 
|  | 233 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 
|  | 234 |  | 
|  | 235 | /* try to merge same page size and continuous */ | 
|  | 236 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { | 
|  | 237 | unsigned long old_start; | 
|  | 238 | if (mr[i].end != mr[i+1].start || | 
|  | 239 | mr[i].page_size_mask != mr[i+1].page_size_mask) | 
|  | 240 | continue; | 
|  | 241 | /* move it */ | 
|  | 242 | old_start = mr[i].start; | 
|  | 243 | memmove(&mr[i], &mr[i+1], | 
|  | 244 | (nr_range - 1 - i) * sizeof(struct map_range)); | 
|  | 245 | mr[i--].start = old_start; | 
|  | 246 | nr_range--; | 
|  | 247 | } | 
|  | 248 |  | 
|  | 249 | for (i = 0; i < nr_range; i++) | 
|  | 250 | printk(KERN_DEBUG " %010lx - %010lx page %s\n", | 
|  | 251 | mr[i].start, mr[i].end, | 
|  | 252 | (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( | 
|  | 253 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); | 
|  | 254 |  | 
|  | 255 | /* | 
|  | 256 | * Find space for the kernel direct mapping tables. | 
|  | 257 | * | 
|  | 258 | * Later we should allocate these tables in the local node of the | 
|  | 259 | * memory mapped. Unfortunately this is done currently before the | 
|  | 260 | * nodes are discovered. | 
|  | 261 | */ | 
|  | 262 | if (!after_bootmem) | 
|  | 263 | find_early_table_space(end, use_pse, use_gbpages); | 
|  | 264 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 265 | for (i = 0; i < nr_range; i++) | 
|  | 266 | ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, | 
|  | 267 | mr[i].page_size_mask); | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 268 |  | 
|  | 269 | #ifdef CONFIG_X86_32 | 
|  | 270 | early_ioremap_page_table_range_init(); | 
|  | 271 |  | 
|  | 272 | load_cr3(swapper_pg_dir); | 
|  | 273 | #endif | 
|  | 274 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 275 | __flush_tlb_all(); | 
|  | 276 |  | 
| Stefano Stabellini | 279b706 | 2011-04-14 15:49:41 +0100 | [diff] [blame] | 277 | /* | 
|  | 278 | * Reserve the kernel pagetable pages we used (pgt_buf_start - | 
|  | 279 | * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) | 
|  | 280 | * so that they can be reused for other purposes. | 
|  | 281 | * | 
|  | 282 | * On native it just means calling memblock_x86_reserve_range, on Xen it | 
|  | 283 | * also means marking RW the pagetable pages that we allocated before | 
|  | 284 | * but that haven't been used. | 
|  | 285 | * | 
|  | 286 | * In fact on xen we mark RO the whole range pgt_buf_start - | 
|  | 287 | * pgt_buf_top, because we have to make sure that when | 
|  | 288 | * init_memory_mapping reaches the pagetable pages area, it maps | 
|  | 289 | * RO all the pagetable pages, including the ones that are beyond | 
|  | 290 | * pgt_buf_end at that time. | 
|  | 291 | */ | 
| Yinghai Lu | d1b1942 | 2011-02-24 14:46:24 +0100 | [diff] [blame] | 292 | if (!after_bootmem && pgt_buf_end > pgt_buf_start) | 
| Stefano Stabellini | 279b706 | 2011-04-14 15:49:41 +0100 | [diff] [blame] | 293 | x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start), | 
|  | 294 | PFN_PHYS(pgt_buf_end)); | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 295 |  | 
|  | 296 | if (!after_bootmem) | 
|  | 297 | early_memtest(start, end); | 
|  | 298 |  | 
|  | 299 | return ret >> PAGE_SHIFT; | 
|  | 300 | } | 
|  | 301 |  | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 302 |  | 
| Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 303 | /* | 
|  | 304 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address | 
|  | 305 | * is valid. The argument is a physical page number. | 
|  | 306 | * | 
|  | 307 | * | 
|  | 308 | * On x86, access has to be given to the first megabyte of ram because that area | 
|  | 309 | * contains bios code and data regions used by X and dosemu and similar apps. | 
|  | 310 | * Access has to be given to non-kernel-ram areas as well, these contain the PCI | 
|  | 311 | * mmio resources as well as potential bios/acpi data regions. | 
|  | 312 | */ | 
|  | 313 | int devmem_is_allowed(unsigned long pagenr) | 
|  | 314 | { | 
|  | 315 | if (pagenr <= 256) | 
|  | 316 | return 1; | 
|  | 317 | if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) | 
|  | 318 | return 0; | 
|  | 319 | if (!page_is_ram(pagenr)) | 
|  | 320 | return 1; | 
|  | 321 | return 0; | 
|  | 322 | } | 
|  | 323 |  | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 324 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | 
|  | 325 | { | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 326 | unsigned long addr; | 
|  | 327 | unsigned long begin_aligned, end_aligned; | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 328 |  | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 329 | /* Make sure boundaries are page aligned */ | 
|  | 330 | begin_aligned = PAGE_ALIGN(begin); | 
|  | 331 | end_aligned   = end & PAGE_MASK; | 
|  | 332 |  | 
|  | 333 | if (WARN_ON(begin_aligned != begin || end_aligned != end)) { | 
|  | 334 | begin = begin_aligned; | 
|  | 335 | end   = end_aligned; | 
|  | 336 | } | 
|  | 337 |  | 
|  | 338 | if (begin >= end) | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 339 | return; | 
|  | 340 |  | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 341 | addr = begin; | 
|  | 342 |  | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 343 | /* | 
|  | 344 | * If debugging page accesses then do not free this memory but | 
|  | 345 | * mark them not present - any buggy init-section access will | 
|  | 346 | * create a kernel page fault: | 
|  | 347 | */ | 
|  | 348 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
|  | 349 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 350 | begin, end); | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 351 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | 
|  | 352 | #else | 
|  | 353 | /* | 
|  | 354 | * We just marked the kernel text read only above, now that | 
|  | 355 | * we are going to free part of that, we need to make that | 
| Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 356 | * writeable and non-executable first. | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 357 | */ | 
| Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 358 | set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 359 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); | 
|  | 360 |  | 
|  | 361 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | 
|  | 362 |  | 
|  | 363 | for (; addr < end; addr += PAGE_SIZE) { | 
|  | 364 | ClearPageReserved(virt_to_page(addr)); | 
|  | 365 | init_page_count(virt_to_page(addr)); | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 366 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 367 | free_page(addr); | 
|  | 368 | totalram_pages++; | 
|  | 369 | } | 
|  | 370 | #endif | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 | void free_initmem(void) | 
|  | 374 | { | 
|  | 375 | free_init_pages("unused kernel memory", | 
|  | 376 | (unsigned long)(&__init_begin), | 
|  | 377 | (unsigned long)(&__init_end)); | 
|  | 378 | } | 
| Pekka Enberg | 731ddea | 2009-03-04 11:13:40 +0200 | [diff] [blame] | 379 |  | 
|  | 380 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 381 | void free_initrd_mem(unsigned long start, unsigned long end) | 
|  | 382 | { | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 383 | /* | 
|  | 384 | * end could be not aligned, and We can not align that, | 
|  | 385 | * decompresser could be confused by aligned initrd_end | 
|  | 386 | * We already reserve the end partial page before in | 
|  | 387 | *   - i386_start_kernel() | 
|  | 388 | *   - x86_64_start_kernel() | 
|  | 389 | *   - relocate_initrd() | 
|  | 390 | * So here We can do PAGE_ALIGN() safely to get partial page to be freed | 
|  | 391 | */ | 
|  | 392 | free_init_pages("initrd memory", start, PAGE_ALIGN(end)); | 
| Pekka Enberg | 731ddea | 2009-03-04 11:13:40 +0200 | [diff] [blame] | 393 | } | 
|  | 394 | #endif |