Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * IA-32 Huge TLB Page Support for Kernel. |
| 3 | * |
| 4 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> |
| 5 | */ |
| 6 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/init.h> |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/hugetlb.h> |
| 11 | #include <linux/pagemap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/err.h> |
| 13 | #include <linux/sysctl.h> |
| 14 | #include <asm/mman.h> |
| 15 | #include <asm/tlb.h> |
| 16 | #include <asm/tlbflush.h> |
Jeremy Fitzhardinge | a5a19c6 | 2008-01-30 13:33:39 +0100 | [diff] [blame] | 17 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 19 | static unsigned long page_table_shareable(struct vm_area_struct *svma, |
| 20 | struct vm_area_struct *vma, |
| 21 | unsigned long addr, pgoff_t idx) |
| 22 | { |
| 23 | unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + |
| 24 | svma->vm_start; |
| 25 | unsigned long sbase = saddr & PUD_MASK; |
| 26 | unsigned long s_end = sbase + PUD_SIZE; |
| 27 | |
Mel Gorman | 32b154c | 2009-05-28 14:34:37 -0700 | [diff] [blame] | 28 | /* Allow segments to share if only one is marked locked */ |
| 29 | unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; |
| 30 | unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED; |
| 31 | |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 32 | /* |
| 33 | * match the virtual addresses, permission and the alignment of the |
| 34 | * page table page. |
| 35 | */ |
| 36 | if (pmd_index(addr) != pmd_index(saddr) || |
Mel Gorman | 32b154c | 2009-05-28 14:34:37 -0700 | [diff] [blame] | 37 | vm_flags != svm_flags || |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 38 | sbase < svma->vm_start || svma->vm_end < s_end) |
| 39 | return 0; |
| 40 | |
| 41 | return saddr; |
| 42 | } |
| 43 | |
| 44 | static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) |
| 45 | { |
| 46 | unsigned long base = addr & PUD_MASK; |
| 47 | unsigned long end = base + PUD_SIZE; |
| 48 | |
| 49 | /* |
| 50 | * check on proper vm_flags and page table alignment |
| 51 | */ |
| 52 | if (vma->vm_flags & VM_MAYSHARE && |
| 53 | vma->vm_start <= base && end <= vma->vm_end) |
| 54 | return 1; |
| 55 | return 0; |
| 56 | } |
| 57 | |
| 58 | /* |
Michal Hocko | eb48c07 | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 59 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() |
| 60 | * and returns the corresponding pte. While this is not necessary for the |
| 61 | * !shared pmd case because we can allocate the pmd later as well, it makes the |
| 62 | * code much cleaner. pmd allocation is essential for the shared case because |
| 63 | * pud has to be populated inside the same i_mmap_mutex section - otherwise |
| 64 | * racing tasks could either miss the sharing (see huge_pte_offset) or select a |
| 65 | * bad pmd for sharing. |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 66 | */ |
Michal Hocko | eb48c07 | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 67 | static pte_t * |
| 68 | huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 69 | { |
| 70 | struct vm_area_struct *vma = find_vma(mm, addr); |
| 71 | struct address_space *mapping = vma->vm_file->f_mapping; |
| 72 | pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + |
| 73 | vma->vm_pgoff; |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 74 | struct vm_area_struct *svma; |
| 75 | unsigned long saddr; |
| 76 | pte_t *spte = NULL; |
Michal Hocko | eb48c07 | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 77 | pte_t *pte; |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 78 | |
| 79 | if (!vma_shareable(vma, addr)) |
Michal Hocko | eb48c07 | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 80 | return (pte_t *)pmd_alloc(mm, pud, addr); |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 81 | |
Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 82 | mutex_lock(&mapping->i_mmap_mutex); |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 83 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 84 | if (svma == vma) |
| 85 | continue; |
| 86 | |
| 87 | saddr = page_table_shareable(svma, vma, addr, idx); |
| 88 | if (saddr) { |
| 89 | spte = huge_pte_offset(svma->vm_mm, saddr); |
| 90 | if (spte) { |
| 91 | get_page(virt_to_page(spte)); |
| 92 | break; |
| 93 | } |
| 94 | } |
| 95 | } |
| 96 | |
| 97 | if (!spte) |
| 98 | goto out; |
| 99 | |
| 100 | spin_lock(&mm->page_table_lock); |
| 101 | if (pud_none(*pud)) |
Jeremy Fitzhardinge | a5a19c6 | 2008-01-30 13:33:39 +0100 | [diff] [blame] | 102 | pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 103 | else |
| 104 | put_page(virt_to_page(spte)); |
| 105 | spin_unlock(&mm->page_table_lock); |
| 106 | out: |
Michal Hocko | eb48c07 | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 107 | pte = (pte_t *)pmd_alloc(mm, pud, addr); |
Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 108 | mutex_unlock(&mapping->i_mmap_mutex); |
Michal Hocko | eb48c07 | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 109 | return pte; |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | /* |
| 113 | * unmap huge page backed by shared pte. |
| 114 | * |
| 115 | * Hugetlb pte page is ref counted at the time of mapping. If pte is shared |
| 116 | * indicated by page_count > 1, unmap is achieved by clearing pud and |
| 117 | * decrementing the ref count. If count == 1, the pte page is not shared. |
| 118 | * |
| 119 | * called with vma->vm_mm->page_table_lock held. |
| 120 | * |
| 121 | * returns: 1 successfully unmapped a shared pte page |
| 122 | * 0 the underlying pte page is not shared, or it is the last user |
| 123 | */ |
| 124 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
| 125 | { |
| 126 | pgd_t *pgd = pgd_offset(mm, *addr); |
| 127 | pud_t *pud = pud_offset(pgd, *addr); |
| 128 | |
| 129 | BUG_ON(page_count(virt_to_page(ptep)) == 0); |
| 130 | if (page_count(virt_to_page(ptep)) == 1) |
| 131 | return 0; |
| 132 | |
| 133 | pud_clear(pud); |
| 134 | put_page(virt_to_page(ptep)); |
| 135 | *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; |
| 136 | return 1; |
| 137 | } |
| 138 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 139 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
| 140 | unsigned long addr, unsigned long sz) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | { |
| 142 | pgd_t *pgd; |
| 143 | pud_t *pud; |
Adam Litke | 7bf07f3 | 2005-09-03 15:55:00 -0700 | [diff] [blame] | 144 | pte_t *pte = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
| 146 | pgd = pgd_offset(mm, addr); |
| 147 | pud = pud_alloc(mm, pgd, addr); |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 148 | if (pud) { |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 149 | if (sz == PUD_SIZE) { |
| 150 | pte = (pte_t *)pud; |
| 151 | } else { |
| 152 | BUG_ON(sz != PMD_SIZE); |
| 153 | if (pud_none(*pud)) |
Michal Hocko | eb48c07 | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 154 | pte = huge_pmd_share(mm, addr, pud); |
| 155 | else |
| 156 | pte = (pte_t *)pmd_alloc(mm, pud, addr); |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 157 | } |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 158 | } |
Chen, Kenneth W | 0e5c9f3 | 2005-09-03 15:55:02 -0700 | [diff] [blame] | 159 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); |
Adam Litke | 7bf07f3 | 2005-09-03 15:55:00 -0700 | [diff] [blame] | 160 | |
Adam Litke | 7bf07f3 | 2005-09-03 15:55:00 -0700 | [diff] [blame] | 161 | return pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 164 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | { |
| 166 | pgd_t *pgd; |
| 167 | pud_t *pud; |
| 168 | pmd_t *pmd = NULL; |
| 169 | |
| 170 | pgd = pgd_offset(mm, addr); |
Adam Litke | 02b0cce | 2005-09-03 15:55:01 -0700 | [diff] [blame] | 171 | if (pgd_present(*pgd)) { |
| 172 | pud = pud_offset(pgd, addr); |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 173 | if (pud_present(*pud)) { |
| 174 | if (pud_large(*pud)) |
| 175 | return (pte_t *)pud; |
Adam Litke | 02b0cce | 2005-09-03 15:55:01 -0700 | [diff] [blame] | 176 | pmd = pmd_offset(pud, addr); |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 177 | } |
Adam Litke | 02b0cce | 2005-09-03 15:55:01 -0700 | [diff] [blame] | 178 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | return (pte_t *) pmd; |
| 180 | } |
| 181 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | #if 0 /* This is just for testing */ |
| 183 | struct page * |
| 184 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
| 185 | { |
| 186 | unsigned long start = address; |
| 187 | int length = 1; |
| 188 | int nr; |
| 189 | struct page *page; |
| 190 | struct vm_area_struct *vma; |
| 191 | |
| 192 | vma = find_vma(mm, addr); |
| 193 | if (!vma || !is_vm_hugetlb_page(vma)) |
| 194 | return ERR_PTR(-EINVAL); |
| 195 | |
| 196 | pte = huge_pte_offset(mm, address); |
| 197 | |
| 198 | /* hugetlb should be locked, and hence, prefaulted */ |
| 199 | WARN_ON(!pte || pte_none(*pte)); |
| 200 | |
| 201 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; |
| 202 | |
Christoph Lameter | 25e5988 | 2008-03-26 21:03:04 -0700 | [diff] [blame] | 203 | WARN_ON(!PageHead(page)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | |
| 205 | return page; |
| 206 | } |
| 207 | |
| 208 | int pmd_huge(pmd_t pmd) |
| 209 | { |
| 210 | return 0; |
| 211 | } |
| 212 | |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 213 | int pud_huge(pud_t pud) |
| 214 | { |
| 215 | return 0; |
| 216 | } |
| 217 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | struct page * |
| 219 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
| 220 | pmd_t *pmd, int write) |
| 221 | { |
| 222 | return NULL; |
| 223 | } |
| 224 | |
| 225 | #else |
| 226 | |
| 227 | struct page * |
| 228 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
| 229 | { |
| 230 | return ERR_PTR(-EINVAL); |
| 231 | } |
| 232 | |
| 233 | int pmd_huge(pmd_t pmd) |
| 234 | { |
| 235 | return !!(pmd_val(pmd) & _PAGE_PSE); |
| 236 | } |
| 237 | |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 238 | int pud_huge(pud_t pud) |
| 239 | { |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 240 | return !!(pud_val(pud) & _PAGE_PSE); |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 241 | } |
| 242 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | struct page * |
| 244 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
| 245 | pmd_t *pmd, int write) |
| 246 | { |
| 247 | struct page *page; |
| 248 | |
| 249 | page = pte_page(*(pte_t *)pmd); |
| 250 | if (page) |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 251 | page += ((address & ~PMD_MASK) >> PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | return page; |
| 253 | } |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 254 | |
| 255 | struct page * |
| 256 | follow_huge_pud(struct mm_struct *mm, unsigned long address, |
| 257 | pud_t *pud, int write) |
| 258 | { |
| 259 | struct page *page; |
| 260 | |
| 261 | page = pte_page(*(pte_t *)pud); |
| 262 | if (page) |
| 263 | page += ((address & ~PUD_MASK) >> PAGE_SHIFT); |
| 264 | return page; |
| 265 | } |
| 266 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | #endif |
| 268 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | /* x86_64 also uses this file */ |
| 270 | |
| 271 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 272 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, |
| 273 | unsigned long addr, unsigned long len, |
| 274 | unsigned long pgoff, unsigned long flags) |
| 275 | { |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 276 | struct hstate *h = hstate_file(file); |
Michel Lespinasse | cdc1734 | 2012-12-11 16:02:02 -0800 | [diff] [blame] | 277 | struct vm_unmapped_area_info info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | |
Michel Lespinasse | cdc1734 | 2012-12-11 16:02:02 -0800 | [diff] [blame] | 279 | info.flags = 0; |
| 280 | info.length = len; |
| 281 | info.low_limit = TASK_UNMAPPED_BASE; |
| 282 | info.high_limit = TASK_SIZE; |
| 283 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 284 | info.align_offset = 0; |
| 285 | return vm_unmapped_area(&info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | } |
| 287 | |
| 288 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, |
| 289 | unsigned long addr0, unsigned long len, |
| 290 | unsigned long pgoff, unsigned long flags) |
| 291 | { |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 292 | struct hstate *h = hstate_file(file); |
Michel Lespinasse | cdc1734 | 2012-12-11 16:02:02 -0800 | [diff] [blame] | 293 | struct vm_unmapped_area_info info; |
| 294 | unsigned long addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | |
Michel Lespinasse | cdc1734 | 2012-12-11 16:02:02 -0800 | [diff] [blame] | 296 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 297 | info.length = len; |
| 298 | info.low_limit = PAGE_SIZE; |
| 299 | info.high_limit = current->mm->mmap_base; |
| 300 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 301 | info.align_offset = 0; |
| 302 | addr = vm_unmapped_area(&info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | /* |
| 305 | * A failed mmap() very likely causes application failure, |
| 306 | * so fall back to the bottom-up function here. This scenario |
| 307 | * can happen with large stack limits and large mmap() |
| 308 | * allocations. |
| 309 | */ |
Michel Lespinasse | cdc1734 | 2012-12-11 16:02:02 -0800 | [diff] [blame] | 310 | if (addr & ~PAGE_MASK) { |
| 311 | VM_BUG_ON(addr != -ENOMEM); |
| 312 | info.flags = 0; |
| 313 | info.low_limit = TASK_UNMAPPED_BASE; |
| 314 | info.high_limit = TASK_SIZE; |
| 315 | addr = vm_unmapped_area(&info); |
| 316 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | |
| 318 | return addr; |
| 319 | } |
| 320 | |
| 321 | unsigned long |
| 322 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 323 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 324 | { |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 325 | struct hstate *h = hstate_file(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | struct mm_struct *mm = current->mm; |
| 327 | struct vm_area_struct *vma; |
| 328 | |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 329 | if (len & ~huge_page_mask(h)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | return -EINVAL; |
| 331 | if (len > TASK_SIZE) |
| 332 | return -ENOMEM; |
| 333 | |
Benjamin Herrenschmidt | 5a8130f | 2007-05-06 14:50:08 -0700 | [diff] [blame] | 334 | if (flags & MAP_FIXED) { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 335 | if (prepare_hugepage_range(file, addr, len)) |
Benjamin Herrenschmidt | 5a8130f | 2007-05-06 14:50:08 -0700 | [diff] [blame] | 336 | return -EINVAL; |
| 337 | return addr; |
| 338 | } |
| 339 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | if (addr) { |
Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 341 | addr = ALIGN(addr, huge_page_size(h)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | vma = find_vma(mm, addr); |
| 343 | if (TASK_SIZE - len >= addr && |
| 344 | (!vma || addr + len <= vma->vm_start)) |
| 345 | return addr; |
| 346 | } |
| 347 | if (mm->get_unmapped_area == arch_get_unmapped_area) |
| 348 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
| 349 | pgoff, flags); |
| 350 | else |
| 351 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
| 352 | pgoff, flags); |
| 353 | } |
| 354 | |
| 355 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ |
| 356 | |
Andi Kleen | b4718e6 | 2008-07-23 21:27:51 -0700 | [diff] [blame] | 357 | #ifdef CONFIG_X86_64 |
| 358 | static __init int setup_hugepagesz(char *opt) |
| 359 | { |
| 360 | unsigned long ps = memparse(opt, &opt); |
| 361 | if (ps == PMD_SIZE) { |
| 362 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); |
| 363 | } else if (ps == PUD_SIZE && cpu_has_gbpages) { |
| 364 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); |
| 365 | } else { |
| 366 | printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", |
| 367 | ps >> 20); |
| 368 | return 0; |
| 369 | } |
| 370 | return 1; |
| 371 | } |
| 372 | __setup("hugepagesz=", setup_hugepagesz); |
| 373 | #endif |