| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * IA-32 Huge TLB Page Support for Kernel. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> | 
|  | 5 | */ | 
|  | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/init.h> | 
|  | 8 | #include <linux/fs.h> | 
|  | 9 | #include <linux/mm.h> | 
|  | 10 | #include <linux/hugetlb.h> | 
|  | 11 | #include <linux/pagemap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/err.h> | 
|  | 13 | #include <linux/sysctl.h> | 
|  | 14 | #include <asm/mman.h> | 
|  | 15 | #include <asm/tlb.h> | 
|  | 16 | #include <asm/tlbflush.h> | 
| Jeremy Fitzhardinge | a5a19c6 | 2008-01-30 13:33:39 +0100 | [diff] [blame] | 17 | #include <asm/pgalloc.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 19 | static unsigned long page_table_shareable(struct vm_area_struct *svma, | 
|  | 20 | struct vm_area_struct *vma, | 
|  | 21 | unsigned long addr, pgoff_t idx) | 
|  | 22 | { | 
|  | 23 | unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + | 
|  | 24 | svma->vm_start; | 
|  | 25 | unsigned long sbase = saddr & PUD_MASK; | 
|  | 26 | unsigned long s_end = sbase + PUD_SIZE; | 
|  | 27 |  | 
| Mel Gorman | 32b154c | 2009-05-28 14:34:37 -0700 | [diff] [blame] | 28 | /* Allow segments to share if only one is marked locked */ | 
|  | 29 | unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; | 
|  | 30 | unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED; | 
|  | 31 |  | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 32 | /* | 
|  | 33 | * match the virtual addresses, permission and the alignment of the | 
|  | 34 | * page table page. | 
|  | 35 | */ | 
|  | 36 | if (pmd_index(addr) != pmd_index(saddr) || | 
| Mel Gorman | 32b154c | 2009-05-28 14:34:37 -0700 | [diff] [blame] | 37 | vm_flags != svm_flags || | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 38 | sbase < svma->vm_start || svma->vm_end < s_end) | 
|  | 39 | return 0; | 
|  | 40 |  | 
|  | 41 | return saddr; | 
|  | 42 | } | 
|  | 43 |  | 
|  | 44 | static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) | 
|  | 45 | { | 
|  | 46 | unsigned long base = addr & PUD_MASK; | 
|  | 47 | unsigned long end = base + PUD_SIZE; | 
|  | 48 |  | 
|  | 49 | /* | 
|  | 50 | * check on proper vm_flags and page table alignment | 
|  | 51 | */ | 
|  | 52 | if (vma->vm_flags & VM_MAYSHARE && | 
|  | 53 | vma->vm_start <= base && end <= vma->vm_end) | 
|  | 54 | return 1; | 
|  | 55 | return 0; | 
|  | 56 | } | 
|  | 57 |  | 
|  | 58 | /* | 
| Michal Hocko | c3a0afd | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 59 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() | 
|  | 60 | * and returns the corresponding pte. While this is not necessary for the | 
|  | 61 | * !shared pmd case because we can allocate the pmd later as well, it makes the | 
|  | 62 | * code much cleaner. pmd allocation is essential for the shared case because | 
|  | 63 | * pud has to be populated inside the same i_mmap_mutex section - otherwise | 
|  | 64 | * racing tasks could either miss the sharing (see huge_pte_offset) or select a | 
|  | 65 | * bad pmd for sharing. | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 66 | */ | 
| Michal Hocko | c3a0afd | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 67 | static pte_t * | 
|  | 68 | huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 69 | { | 
|  | 70 | struct vm_area_struct *vma = find_vma(mm, addr); | 
|  | 71 | struct address_space *mapping = vma->vm_file->f_mapping; | 
|  | 72 | pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + | 
|  | 73 | vma->vm_pgoff; | 
|  | 74 | struct prio_tree_iter iter; | 
|  | 75 | struct vm_area_struct *svma; | 
|  | 76 | unsigned long saddr; | 
|  | 77 | pte_t *spte = NULL; | 
| Michal Hocko | c3a0afd | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 78 | pte_t *pte; | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 79 |  | 
|  | 80 | if (!vma_shareable(vma, addr)) | 
| Michal Hocko | c3a0afd | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 81 | return (pte_t *)pmd_alloc(mm, pud, addr); | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 82 |  | 
| Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 83 | mutex_lock(&mapping->i_mmap_mutex); | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 84 | vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { | 
|  | 85 | if (svma == vma) | 
|  | 86 | continue; | 
|  | 87 |  | 
|  | 88 | saddr = page_table_shareable(svma, vma, addr, idx); | 
|  | 89 | if (saddr) { | 
|  | 90 | spte = huge_pte_offset(svma->vm_mm, saddr); | 
|  | 91 | if (spte) { | 
|  | 92 | get_page(virt_to_page(spte)); | 
|  | 93 | break; | 
|  | 94 | } | 
|  | 95 | } | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | if (!spte) | 
|  | 99 | goto out; | 
|  | 100 |  | 
|  | 101 | spin_lock(&mm->page_table_lock); | 
|  | 102 | if (pud_none(*pud)) | 
| Jeremy Fitzhardinge | a5a19c6 | 2008-01-30 13:33:39 +0100 | [diff] [blame] | 103 | pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 104 | else | 
|  | 105 | put_page(virt_to_page(spte)); | 
|  | 106 | spin_unlock(&mm->page_table_lock); | 
|  | 107 | out: | 
| Michal Hocko | c3a0afd | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 108 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | 
| Peter Zijlstra | 3d48ae4 | 2011-05-24 17:12:06 -0700 | [diff] [blame] | 109 | mutex_unlock(&mapping->i_mmap_mutex); | 
| Michal Hocko | c3a0afd | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 110 | return pte; | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 111 | } | 
|  | 112 |  | 
|  | 113 | /* | 
|  | 114 | * unmap huge page backed by shared pte. | 
|  | 115 | * | 
|  | 116 | * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared | 
|  | 117 | * indicated by page_count > 1, unmap is achieved by clearing pud and | 
|  | 118 | * decrementing the ref count. If count == 1, the pte page is not shared. | 
|  | 119 | * | 
|  | 120 | * called with vma->vm_mm->page_table_lock held. | 
|  | 121 | * | 
|  | 122 | * returns: 1 successfully unmapped a shared pte page | 
|  | 123 | *	    0 the underlying pte page is not shared, or it is the last user | 
|  | 124 | */ | 
|  | 125 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | 
|  | 126 | { | 
|  | 127 | pgd_t *pgd = pgd_offset(mm, *addr); | 
|  | 128 | pud_t *pud = pud_offset(pgd, *addr); | 
|  | 129 |  | 
|  | 130 | BUG_ON(page_count(virt_to_page(ptep)) == 0); | 
|  | 131 | if (page_count(virt_to_page(ptep)) == 1) | 
|  | 132 | return 0; | 
|  | 133 |  | 
|  | 134 | pud_clear(pud); | 
|  | 135 | put_page(virt_to_page(ptep)); | 
|  | 136 | *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; | 
|  | 137 | return 1; | 
|  | 138 | } | 
|  | 139 |  | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 140 | pte_t *huge_pte_alloc(struct mm_struct *mm, | 
|  | 141 | unsigned long addr, unsigned long sz) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | { | 
|  | 143 | pgd_t *pgd; | 
|  | 144 | pud_t *pud; | 
| Adam Litke | 7bf07f3 | 2005-09-03 15:55:00 -0700 | [diff] [blame] | 145 | pte_t *pte = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 |  | 
|  | 147 | pgd = pgd_offset(mm, addr); | 
|  | 148 | pud = pud_alloc(mm, pgd, addr); | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 149 | if (pud) { | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 150 | if (sz == PUD_SIZE) { | 
|  | 151 | pte = (pte_t *)pud; | 
|  | 152 | } else { | 
|  | 153 | BUG_ON(sz != PMD_SIZE); | 
|  | 154 | if (pud_none(*pud)) | 
| Michal Hocko | c3a0afd | 2012-08-21 16:15:52 -0700 | [diff] [blame] | 155 | pte = huge_pmd_share(mm, addr, pud); | 
|  | 156 | else | 
|  | 157 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 158 | } | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 159 | } | 
| Chen, Kenneth W | 0e5c9f3 | 2005-09-03 15:55:02 -0700 | [diff] [blame] | 160 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); | 
| Adam Litke | 7bf07f3 | 2005-09-03 15:55:00 -0700 | [diff] [blame] | 161 |  | 
| Adam Litke | 7bf07f3 | 2005-09-03 15:55:00 -0700 | [diff] [blame] | 162 | return pte; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | } | 
|  | 164 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 165 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | { | 
|  | 167 | pgd_t *pgd; | 
|  | 168 | pud_t *pud; | 
|  | 169 | pmd_t *pmd = NULL; | 
|  | 170 |  | 
|  | 171 | pgd = pgd_offset(mm, addr); | 
| Adam Litke | 02b0cce | 2005-09-03 15:55:01 -0700 | [diff] [blame] | 172 | if (pgd_present(*pgd)) { | 
|  | 173 | pud = pud_offset(pgd, addr); | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 174 | if (pud_present(*pud)) { | 
|  | 175 | if (pud_large(*pud)) | 
|  | 176 | return (pte_t *)pud; | 
| Adam Litke | 02b0cce | 2005-09-03 15:55:01 -0700 | [diff] [blame] | 177 | pmd = pmd_offset(pud, addr); | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 178 | } | 
| Adam Litke | 02b0cce | 2005-09-03 15:55:01 -0700 | [diff] [blame] | 179 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | return (pte_t *) pmd; | 
|  | 181 | } | 
|  | 182 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | #if 0	/* This is just for testing */ | 
|  | 184 | struct page * | 
|  | 185 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 
|  | 186 | { | 
|  | 187 | unsigned long start = address; | 
|  | 188 | int length = 1; | 
|  | 189 | int nr; | 
|  | 190 | struct page *page; | 
|  | 191 | struct vm_area_struct *vma; | 
|  | 192 |  | 
|  | 193 | vma = find_vma(mm, addr); | 
|  | 194 | if (!vma || !is_vm_hugetlb_page(vma)) | 
|  | 195 | return ERR_PTR(-EINVAL); | 
|  | 196 |  | 
|  | 197 | pte = huge_pte_offset(mm, address); | 
|  | 198 |  | 
|  | 199 | /* hugetlb should be locked, and hence, prefaulted */ | 
|  | 200 | WARN_ON(!pte || pte_none(*pte)); | 
|  | 201 |  | 
|  | 202 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | 
|  | 203 |  | 
| Christoph Lameter | 25e5988 | 2008-03-26 21:03:04 -0700 | [diff] [blame] | 204 | WARN_ON(!PageHead(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 |  | 
|  | 206 | return page; | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | int pmd_huge(pmd_t pmd) | 
|  | 210 | { | 
|  | 211 | return 0; | 
|  | 212 | } | 
|  | 213 |  | 
| Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 214 | int pud_huge(pud_t pud) | 
|  | 215 | { | 
|  | 216 | return 0; | 
|  | 217 | } | 
|  | 218 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | struct page * | 
|  | 220 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | 
|  | 221 | pmd_t *pmd, int write) | 
|  | 222 | { | 
|  | 223 | return NULL; | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | #else | 
|  | 227 |  | 
|  | 228 | struct page * | 
|  | 229 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 
|  | 230 | { | 
|  | 231 | return ERR_PTR(-EINVAL); | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | int pmd_huge(pmd_t pmd) | 
|  | 235 | { | 
|  | 236 | return !!(pmd_val(pmd) & _PAGE_PSE); | 
|  | 237 | } | 
|  | 238 |  | 
| Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 239 | int pud_huge(pud_t pud) | 
|  | 240 | { | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 241 | return !!(pud_val(pud) & _PAGE_PSE); | 
| Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 242 | } | 
|  | 243 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | struct page * | 
|  | 245 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | 
|  | 246 | pmd_t *pmd, int write) | 
|  | 247 | { | 
|  | 248 | struct page *page; | 
|  | 249 |  | 
|  | 250 | page = pte_page(*(pte_t *)pmd); | 
|  | 251 | if (page) | 
| Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 252 | page += ((address & ~PMD_MASK) >> PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | return page; | 
|  | 254 | } | 
| Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 255 |  | 
|  | 256 | struct page * | 
|  | 257 | follow_huge_pud(struct mm_struct *mm, unsigned long address, | 
|  | 258 | pud_t *pud, int write) | 
|  | 259 | { | 
|  | 260 | struct page *page; | 
|  | 261 |  | 
|  | 262 | page = pte_page(*(pte_t *)pud); | 
|  | 263 | if (page) | 
|  | 264 | page += ((address & ~PUD_MASK) >> PAGE_SHIFT); | 
|  | 265 | return page; | 
|  | 266 | } | 
|  | 267 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | #endif | 
|  | 269 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | /* x86_64 also uses this file */ | 
|  | 271 |  | 
|  | 272 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 
|  | 273 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, | 
|  | 274 | unsigned long addr, unsigned long len, | 
|  | 275 | unsigned long pgoff, unsigned long flags) | 
|  | 276 | { | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 277 | struct hstate *h = hstate_file(file); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | struct mm_struct *mm = current->mm; | 
|  | 279 | struct vm_area_struct *vma; | 
|  | 280 | unsigned long start_addr; | 
|  | 281 |  | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 282 | if (len > mm->cached_hole_size) { | 
|  | 283 | start_addr = mm->free_area_cache; | 
|  | 284 | } else { | 
|  | 285 | start_addr = TASK_UNMAPPED_BASE; | 
|  | 286 | mm->cached_hole_size = 0; | 
|  | 287 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 |  | 
|  | 289 | full_search: | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 290 | addr = ALIGN(start_addr, huge_page_size(h)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 |  | 
|  | 292 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 
|  | 293 | /* At this point:  (!vma || addr < vma->vm_end). */ | 
|  | 294 | if (TASK_SIZE - len < addr) { | 
|  | 295 | /* | 
|  | 296 | * Start a new search - just in case we missed | 
|  | 297 | * some holes. | 
|  | 298 | */ | 
|  | 299 | if (start_addr != TASK_UNMAPPED_BASE) { | 
|  | 300 | start_addr = TASK_UNMAPPED_BASE; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 301 | mm->cached_hole_size = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | goto full_search; | 
|  | 303 | } | 
|  | 304 | return -ENOMEM; | 
|  | 305 | } | 
|  | 306 | if (!vma || addr + len <= vma->vm_start) { | 
|  | 307 | mm->free_area_cache = addr + len; | 
|  | 308 | return addr; | 
|  | 309 | } | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 310 | if (addr + mm->cached_hole_size < vma->vm_start) | 
|  | 311 | mm->cached_hole_size = vma->vm_start - addr; | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 312 | addr = ALIGN(vma->vm_end, huge_page_size(h)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | } | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, | 
|  | 317 | unsigned long addr0, unsigned long len, | 
|  | 318 | unsigned long pgoff, unsigned long flags) | 
|  | 319 | { | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 320 | struct hstate *h = hstate_file(file); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | struct mm_struct *mm = current->mm; | 
| Xiao Guangrong | b69add2 | 2012-03-21 16:34:14 -0700 | [diff] [blame] | 322 | struct vm_area_struct *vma; | 
| Xiao Guangrong | cbde83e | 2012-03-21 16:33:55 -0700 | [diff] [blame] | 323 | unsigned long base = mm->mmap_base; | 
|  | 324 | unsigned long addr = addr0; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 325 | unsigned long largest_hole = mm->cached_hole_size; | 
| Xiao Guangrong | cbde83e | 2012-03-21 16:33:55 -0700 | [diff] [blame] | 326 | unsigned long start_addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 |  | 
|  | 328 | /* don't allow allocations above current base */ | 
|  | 329 | if (mm->free_area_cache > base) | 
|  | 330 | mm->free_area_cache = base; | 
|  | 331 |  | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 332 | if (len <= largest_hole) { | 
|  | 333 | largest_hole = 0; | 
|  | 334 | mm->free_area_cache  = base; | 
|  | 335 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | try_again: | 
| Xiao Guangrong | cbde83e | 2012-03-21 16:33:55 -0700 | [diff] [blame] | 337 | start_addr = mm->free_area_cache; | 
|  | 338 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | /* make sure it can fit in the remaining address space */ | 
|  | 340 | if (mm->free_area_cache < len) | 
|  | 341 | goto fail; | 
|  | 342 |  | 
| Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 343 | /* either no address requested or can't fit in requested address hole */ | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 344 | addr = (mm->free_area_cache - len) & huge_page_mask(h); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | do { | 
|  | 346 | /* | 
|  | 347 | * Lookup failure means no vma is above this address, | 
|  | 348 | * i.e. return with success: | 
|  | 349 | */ | 
| Linus Torvalds | 55062d0 | 2012-03-06 18:48:13 -0800 | [diff] [blame] | 350 | vma = find_vma(mm, addr); | 
| Linus Torvalds | 097d591 | 2012-03-06 18:23:36 -0800 | [diff] [blame] | 351 | if (!vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | return addr; | 
|  | 353 |  | 
| Xiao Guangrong | b69add2 | 2012-03-21 16:34:14 -0700 | [diff] [blame] | 354 | if (addr + len <= vma->vm_start) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | /* remember the address as a hint for next time */ | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 356 | mm->cached_hole_size = largest_hole; | 
|  | 357 | return (mm->free_area_cache = addr); | 
| Xiao Guangrong | b69add2 | 2012-03-21 16:34:14 -0700 | [diff] [blame] | 358 | } else if (mm->free_area_cache == vma->vm_end) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | /* pull free_area_cache down to the first hole */ | 
| Xiao Guangrong | b69add2 | 2012-03-21 16:34:14 -0700 | [diff] [blame] | 360 | mm->free_area_cache = vma->vm_start; | 
|  | 361 | mm->cached_hole_size = largest_hole; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 362 | } | 
|  | 363 |  | 
|  | 364 | /* remember the largest hole we saw so far */ | 
|  | 365 | if (addr + largest_hole < vma->vm_start) | 
|  | 366 | largest_hole = vma->vm_start - addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 |  | 
|  | 368 | /* try just below the current vma->vm_start */ | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 369 | addr = (vma->vm_start - len) & huge_page_mask(h); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | } while (len <= vma->vm_start); | 
|  | 371 |  | 
|  | 372 | fail: | 
|  | 373 | /* | 
|  | 374 | * if hint left us with no space for the requested | 
|  | 375 | * mapping then try again: | 
|  | 376 | */ | 
| Xiao Guangrong | cbde83e | 2012-03-21 16:33:55 -0700 | [diff] [blame] | 377 | if (start_addr != base) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | mm->free_area_cache = base; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 379 | largest_hole = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | goto try_again; | 
|  | 381 | } | 
|  | 382 | /* | 
|  | 383 | * A failed mmap() very likely causes application failure, | 
|  | 384 | * so fall back to the bottom-up function here. This scenario | 
|  | 385 | * can happen with large stack limits and large mmap() | 
|  | 386 | * allocations. | 
|  | 387 | */ | 
|  | 388 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 389 | mm->cached_hole_size = ~0UL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | addr = hugetlb_get_unmapped_area_bottomup(file, addr0, | 
|  | 391 | len, pgoff, flags); | 
|  | 392 |  | 
|  | 393 | /* | 
|  | 394 | * Restore the topdown base: | 
|  | 395 | */ | 
|  | 396 | mm->free_area_cache = base; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 397 | mm->cached_hole_size = ~0UL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 |  | 
|  | 399 | return addr; | 
|  | 400 | } | 
|  | 401 |  | 
|  | 402 | unsigned long | 
|  | 403 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | 
|  | 404 | unsigned long len, unsigned long pgoff, unsigned long flags) | 
|  | 405 | { | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 406 | struct hstate *h = hstate_file(file); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | struct mm_struct *mm = current->mm; | 
|  | 408 | struct vm_area_struct *vma; | 
|  | 409 |  | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 410 | if (len & ~huge_page_mask(h)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | return -EINVAL; | 
|  | 412 | if (len > TASK_SIZE) | 
|  | 413 | return -ENOMEM; | 
|  | 414 |  | 
| Benjamin Herrenschmidt | 5a8130f | 2007-05-06 14:50:08 -0700 | [diff] [blame] | 415 | if (flags & MAP_FIXED) { | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 416 | if (prepare_hugepage_range(file, addr, len)) | 
| Benjamin Herrenschmidt | 5a8130f | 2007-05-06 14:50:08 -0700 | [diff] [blame] | 417 | return -EINVAL; | 
|  | 418 | return addr; | 
|  | 419 | } | 
|  | 420 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | if (addr) { | 
| Andi Kleen | 39c11e6 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 422 | addr = ALIGN(addr, huge_page_size(h)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | vma = find_vma(mm, addr); | 
|  | 424 | if (TASK_SIZE - len >= addr && | 
|  | 425 | (!vma || addr + len <= vma->vm_start)) | 
|  | 426 | return addr; | 
|  | 427 | } | 
|  | 428 | if (mm->get_unmapped_area == arch_get_unmapped_area) | 
|  | 429 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | 
|  | 430 | pgoff, flags); | 
|  | 431 | else | 
|  | 432 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | 
|  | 433 | pgoff, flags); | 
|  | 434 | } | 
|  | 435 |  | 
|  | 436 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ | 
|  | 437 |  | 
| Andi Kleen | b4718e6 | 2008-07-23 21:27:51 -0700 | [diff] [blame] | 438 | #ifdef CONFIG_X86_64 | 
|  | 439 | static __init int setup_hugepagesz(char *opt) | 
|  | 440 | { | 
|  | 441 | unsigned long ps = memparse(opt, &opt); | 
|  | 442 | if (ps == PMD_SIZE) { | 
|  | 443 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); | 
|  | 444 | } else if (ps == PUD_SIZE && cpu_has_gbpages) { | 
|  | 445 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | 
|  | 446 | } else { | 
|  | 447 | printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", | 
|  | 448 | ps >> 20); | 
|  | 449 | return 0; | 
|  | 450 | } | 
|  | 451 | return 1; | 
|  | 452 | } | 
|  | 453 | __setup("hugepagesz=", setup_hugepagesz); | 
|  | 454 | #endif |