| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * IA-32 Huge TLB Page Support for Kernel. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> | 
 | 5 |  */ | 
 | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/init.h> | 
 | 8 | #include <linux/fs.h> | 
 | 9 | #include <linux/mm.h> | 
 | 10 | #include <linux/hugetlb.h> | 
 | 11 | #include <linux/pagemap.h> | 
 | 12 | #include <linux/smp_lock.h> | 
 | 13 | #include <linux/slab.h> | 
 | 14 | #include <linux/err.h> | 
 | 15 | #include <linux/sysctl.h> | 
 | 16 | #include <asm/mman.h> | 
 | 17 | #include <asm/tlb.h> | 
 | 18 | #include <asm/tlbflush.h> | 
 | 19 |  | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 20 | static unsigned long page_table_shareable(struct vm_area_struct *svma, | 
 | 21 | 				struct vm_area_struct *vma, | 
 | 22 | 				unsigned long addr, pgoff_t idx) | 
 | 23 | { | 
 | 24 | 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + | 
 | 25 | 				svma->vm_start; | 
 | 26 | 	unsigned long sbase = saddr & PUD_MASK; | 
 | 27 | 	unsigned long s_end = sbase + PUD_SIZE; | 
 | 28 |  | 
 | 29 | 	/* | 
 | 30 | 	 * match the virtual addresses, permission and the alignment of the | 
 | 31 | 	 * page table page. | 
 | 32 | 	 */ | 
 | 33 | 	if (pmd_index(addr) != pmd_index(saddr) || | 
 | 34 | 	    vma->vm_flags != svma->vm_flags || | 
 | 35 | 	    sbase < svma->vm_start || svma->vm_end < s_end) | 
 | 36 | 		return 0; | 
 | 37 |  | 
 | 38 | 	return saddr; | 
 | 39 | } | 
 | 40 |  | 
 | 41 | static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) | 
 | 42 | { | 
 | 43 | 	unsigned long base = addr & PUD_MASK; | 
 | 44 | 	unsigned long end = base + PUD_SIZE; | 
 | 45 |  | 
 | 46 | 	/* | 
 | 47 | 	 * check on proper vm_flags and page table alignment | 
 | 48 | 	 */ | 
 | 49 | 	if (vma->vm_flags & VM_MAYSHARE && | 
 | 50 | 	    vma->vm_start <= base && end <= vma->vm_end) | 
 | 51 | 		return 1; | 
 | 52 | 	return 0; | 
 | 53 | } | 
 | 54 |  | 
 | 55 | /* | 
 | 56 |  * search for a shareable pmd page for hugetlb. | 
 | 57 |  */ | 
 | 58 | static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | 
 | 59 | { | 
 | 60 | 	struct vm_area_struct *vma = find_vma(mm, addr); | 
 | 61 | 	struct address_space *mapping = vma->vm_file->f_mapping; | 
 | 62 | 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + | 
 | 63 | 			vma->vm_pgoff; | 
 | 64 | 	struct prio_tree_iter iter; | 
 | 65 | 	struct vm_area_struct *svma; | 
 | 66 | 	unsigned long saddr; | 
 | 67 | 	pte_t *spte = NULL; | 
 | 68 |  | 
 | 69 | 	if (!vma_shareable(vma, addr)) | 
 | 70 | 		return; | 
 | 71 |  | 
 | 72 | 	spin_lock(&mapping->i_mmap_lock); | 
 | 73 | 	vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { | 
 | 74 | 		if (svma == vma) | 
 | 75 | 			continue; | 
 | 76 |  | 
 | 77 | 		saddr = page_table_shareable(svma, vma, addr, idx); | 
 | 78 | 		if (saddr) { | 
 | 79 | 			spte = huge_pte_offset(svma->vm_mm, saddr); | 
 | 80 | 			if (spte) { | 
 | 81 | 				get_page(virt_to_page(spte)); | 
 | 82 | 				break; | 
 | 83 | 			} | 
 | 84 | 		} | 
 | 85 | 	} | 
 | 86 |  | 
 | 87 | 	if (!spte) | 
 | 88 | 		goto out; | 
 | 89 |  | 
 | 90 | 	spin_lock(&mm->page_table_lock); | 
 | 91 | 	if (pud_none(*pud)) | 
 | 92 | 		pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK); | 
 | 93 | 	else | 
 | 94 | 		put_page(virt_to_page(spte)); | 
 | 95 | 	spin_unlock(&mm->page_table_lock); | 
 | 96 | out: | 
 | 97 | 	spin_unlock(&mapping->i_mmap_lock); | 
 | 98 | } | 
 | 99 |  | 
 | 100 | /* | 
 | 101 |  * unmap huge page backed by shared pte. | 
 | 102 |  * | 
 | 103 |  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared | 
 | 104 |  * indicated by page_count > 1, unmap is achieved by clearing pud and | 
 | 105 |  * decrementing the ref count. If count == 1, the pte page is not shared. | 
 | 106 |  * | 
 | 107 |  * called with vma->vm_mm->page_table_lock held. | 
 | 108 |  * | 
 | 109 |  * returns: 1 successfully unmapped a shared pte page | 
 | 110 |  *	    0 the underlying pte page is not shared, or it is the last user | 
 | 111 |  */ | 
 | 112 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | 
 | 113 | { | 
 | 114 | 	pgd_t *pgd = pgd_offset(mm, *addr); | 
 | 115 | 	pud_t *pud = pud_offset(pgd, *addr); | 
 | 116 |  | 
 | 117 | 	BUG_ON(page_count(virt_to_page(ptep)) == 0); | 
 | 118 | 	if (page_count(virt_to_page(ptep)) == 1) | 
 | 119 | 		return 0; | 
 | 120 |  | 
 | 121 | 	pud_clear(pud); | 
 | 122 | 	put_page(virt_to_page(ptep)); | 
 | 123 | 	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; | 
 | 124 | 	return 1; | 
 | 125 | } | 
 | 126 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 127 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | { | 
 | 129 | 	pgd_t *pgd; | 
 | 130 | 	pud_t *pud; | 
| Adam Litke | 7bf07f3 | 2005-09-03 15:55:00 -0700 | [diff] [blame] | 131 | 	pte_t *pte = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 |  | 
 | 133 | 	pgd = pgd_offset(mm, addr); | 
 | 134 | 	pud = pud_alloc(mm, pgd, addr); | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 135 | 	if (pud) { | 
 | 136 | 		if (pud_none(*pud)) | 
 | 137 | 			huge_pmd_share(mm, addr, pud); | 
| Chen, Kenneth W | 0e5c9f3 | 2005-09-03 15:55:02 -0700 | [diff] [blame] | 138 | 		pte = (pte_t *) pmd_alloc(mm, pud, addr); | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 139 | 	} | 
| Chen, Kenneth W | 0e5c9f3 | 2005-09-03 15:55:02 -0700 | [diff] [blame] | 140 | 	BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); | 
| Adam Litke | 7bf07f3 | 2005-09-03 15:55:00 -0700 | [diff] [blame] | 141 |  | 
| Adam Litke | 7bf07f3 | 2005-09-03 15:55:00 -0700 | [diff] [blame] | 142 | 	return pte; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | } | 
 | 144 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 145 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | { | 
 | 147 | 	pgd_t *pgd; | 
 | 148 | 	pud_t *pud; | 
 | 149 | 	pmd_t *pmd = NULL; | 
 | 150 |  | 
 | 151 | 	pgd = pgd_offset(mm, addr); | 
| Adam Litke | 02b0cce | 2005-09-03 15:55:01 -0700 | [diff] [blame] | 152 | 	if (pgd_present(*pgd)) { | 
 | 153 | 		pud = pud_offset(pgd, addr); | 
 | 154 | 		if (pud_present(*pud)) | 
 | 155 | 			pmd = pmd_offset(pud, addr); | 
 | 156 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | 	return (pte_t *) pmd; | 
 | 158 | } | 
 | 159 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | #if 0	/* This is just for testing */ | 
 | 161 | struct page * | 
 | 162 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 
 | 163 | { | 
 | 164 | 	unsigned long start = address; | 
 | 165 | 	int length = 1; | 
 | 166 | 	int nr; | 
 | 167 | 	struct page *page; | 
 | 168 | 	struct vm_area_struct *vma; | 
 | 169 |  | 
 | 170 | 	vma = find_vma(mm, addr); | 
 | 171 | 	if (!vma || !is_vm_hugetlb_page(vma)) | 
 | 172 | 		return ERR_PTR(-EINVAL); | 
 | 173 |  | 
 | 174 | 	pte = huge_pte_offset(mm, address); | 
 | 175 |  | 
 | 176 | 	/* hugetlb should be locked, and hence, prefaulted */ | 
 | 177 | 	WARN_ON(!pte || pte_none(*pte)); | 
 | 178 |  | 
 | 179 | 	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | 
 | 180 |  | 
 | 181 | 	WARN_ON(!PageCompound(page)); | 
 | 182 |  | 
 | 183 | 	return page; | 
 | 184 | } | 
 | 185 |  | 
 | 186 | int pmd_huge(pmd_t pmd) | 
 | 187 | { | 
 | 188 | 	return 0; | 
 | 189 | } | 
 | 190 |  | 
 | 191 | struct page * | 
 | 192 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | 
 | 193 | 		pmd_t *pmd, int write) | 
 | 194 | { | 
 | 195 | 	return NULL; | 
 | 196 | } | 
 | 197 |  | 
 | 198 | #else | 
 | 199 |  | 
 | 200 | struct page * | 
 | 201 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 
 | 202 | { | 
 | 203 | 	return ERR_PTR(-EINVAL); | 
 | 204 | } | 
 | 205 |  | 
 | 206 | int pmd_huge(pmd_t pmd) | 
 | 207 | { | 
 | 208 | 	return !!(pmd_val(pmd) & _PAGE_PSE); | 
 | 209 | } | 
 | 210 |  | 
 | 211 | struct page * | 
 | 212 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | 
 | 213 | 		pmd_t *pmd, int write) | 
 | 214 | { | 
 | 215 | 	struct page *page; | 
 | 216 |  | 
 | 217 | 	page = pte_page(*(pte_t *)pmd); | 
 | 218 | 	if (page) | 
 | 219 | 		page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); | 
 | 220 | 	return page; | 
 | 221 | } | 
 | 222 | #endif | 
 | 223 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | /* x86_64 also uses this file */ | 
 | 225 |  | 
 | 226 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 
 | 227 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, | 
 | 228 | 		unsigned long addr, unsigned long len, | 
 | 229 | 		unsigned long pgoff, unsigned long flags) | 
 | 230 | { | 
 | 231 | 	struct mm_struct *mm = current->mm; | 
 | 232 | 	struct vm_area_struct *vma; | 
 | 233 | 	unsigned long start_addr; | 
 | 234 |  | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 235 | 	if (len > mm->cached_hole_size) { | 
 | 236 | 	        start_addr = mm->free_area_cache; | 
 | 237 | 	} else { | 
 | 238 | 	        start_addr = TASK_UNMAPPED_BASE; | 
 | 239 | 	        mm->cached_hole_size = 0; | 
 | 240 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 |  | 
 | 242 | full_search: | 
 | 243 | 	addr = ALIGN(start_addr, HPAGE_SIZE); | 
 | 244 |  | 
 | 245 | 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 
 | 246 | 		/* At this point:  (!vma || addr < vma->vm_end). */ | 
 | 247 | 		if (TASK_SIZE - len < addr) { | 
 | 248 | 			/* | 
 | 249 | 			 * Start a new search - just in case we missed | 
 | 250 | 			 * some holes. | 
 | 251 | 			 */ | 
 | 252 | 			if (start_addr != TASK_UNMAPPED_BASE) { | 
 | 253 | 				start_addr = TASK_UNMAPPED_BASE; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 254 | 				mm->cached_hole_size = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | 				goto full_search; | 
 | 256 | 			} | 
 | 257 | 			return -ENOMEM; | 
 | 258 | 		} | 
 | 259 | 		if (!vma || addr + len <= vma->vm_start) { | 
 | 260 | 			mm->free_area_cache = addr + len; | 
 | 261 | 			return addr; | 
 | 262 | 		} | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 263 | 		if (addr + mm->cached_hole_size < vma->vm_start) | 
 | 264 | 		        mm->cached_hole_size = vma->vm_start - addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | 		addr = ALIGN(vma->vm_end, HPAGE_SIZE); | 
 | 266 | 	} | 
 | 267 | } | 
 | 268 |  | 
 | 269 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, | 
 | 270 | 		unsigned long addr0, unsigned long len, | 
 | 271 | 		unsigned long pgoff, unsigned long flags) | 
 | 272 | { | 
 | 273 | 	struct mm_struct *mm = current->mm; | 
 | 274 | 	struct vm_area_struct *vma, *prev_vma; | 
 | 275 | 	unsigned long base = mm->mmap_base, addr = addr0; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 276 | 	unsigned long largest_hole = mm->cached_hole_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | 	int first_time = 1; | 
 | 278 |  | 
 | 279 | 	/* don't allow allocations above current base */ | 
 | 280 | 	if (mm->free_area_cache > base) | 
 | 281 | 		mm->free_area_cache = base; | 
 | 282 |  | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 283 | 	if (len <= largest_hole) { | 
 | 284 | 	        largest_hole = 0; | 
 | 285 | 		mm->free_area_cache  = base; | 
 | 286 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | try_again: | 
 | 288 | 	/* make sure it can fit in the remaining address space */ | 
 | 289 | 	if (mm->free_area_cache < len) | 
 | 290 | 		goto fail; | 
 | 291 |  | 
 | 292 | 	/* either no address requested or cant fit in requested address hole */ | 
 | 293 | 	addr = (mm->free_area_cache - len) & HPAGE_MASK; | 
 | 294 | 	do { | 
 | 295 | 		/* | 
 | 296 | 		 * Lookup failure means no vma is above this address, | 
 | 297 | 		 * i.e. return with success: | 
 | 298 | 		 */ | 
 | 299 | 		if (!(vma = find_vma_prev(mm, addr, &prev_vma))) | 
 | 300 | 			return addr; | 
 | 301 |  | 
 | 302 | 		/* | 
 | 303 | 		 * new region fits between prev_vma->vm_end and | 
 | 304 | 		 * vma->vm_start, use it: | 
 | 305 | 		 */ | 
 | 306 | 		if (addr + len <= vma->vm_start && | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 307 | 		            (!prev_vma || (addr >= prev_vma->vm_end))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | 			/* remember the address as a hint for next time */ | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 309 | 		        mm->cached_hole_size = largest_hole; | 
 | 310 | 		        return (mm->free_area_cache = addr); | 
 | 311 | 		} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | 			/* pull free_area_cache down to the first hole */ | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 313 | 		        if (mm->free_area_cache == vma->vm_end) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | 				mm->free_area_cache = vma->vm_start; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 315 | 				mm->cached_hole_size = largest_hole; | 
 | 316 | 			} | 
 | 317 | 		} | 
 | 318 |  | 
 | 319 | 		/* remember the largest hole we saw so far */ | 
 | 320 | 		if (addr + largest_hole < vma->vm_start) | 
 | 321 | 		        largest_hole = vma->vm_start - addr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 |  | 
 | 323 | 		/* try just below the current vma->vm_start */ | 
 | 324 | 		addr = (vma->vm_start - len) & HPAGE_MASK; | 
 | 325 | 	} while (len <= vma->vm_start); | 
 | 326 |  | 
 | 327 | fail: | 
 | 328 | 	/* | 
 | 329 | 	 * if hint left us with no space for the requested | 
 | 330 | 	 * mapping then try again: | 
 | 331 | 	 */ | 
 | 332 | 	if (first_time) { | 
 | 333 | 		mm->free_area_cache = base; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 334 | 		largest_hole = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 		first_time = 0; | 
 | 336 | 		goto try_again; | 
 | 337 | 	} | 
 | 338 | 	/* | 
 | 339 | 	 * A failed mmap() very likely causes application failure, | 
 | 340 | 	 * so fall back to the bottom-up function here. This scenario | 
 | 341 | 	 * can happen with large stack limits and large mmap() | 
 | 342 | 	 * allocations. | 
 | 343 | 	 */ | 
 | 344 | 	mm->free_area_cache = TASK_UNMAPPED_BASE; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 345 | 	mm->cached_hole_size = ~0UL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | 	addr = hugetlb_get_unmapped_area_bottomup(file, addr0, | 
 | 347 | 			len, pgoff, flags); | 
 | 348 |  | 
 | 349 | 	/* | 
 | 350 | 	 * Restore the topdown base: | 
 | 351 | 	 */ | 
 | 352 | 	mm->free_area_cache = base; | 
| Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 353 | 	mm->cached_hole_size = ~0UL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 |  | 
 | 355 | 	return addr; | 
 | 356 | } | 
 | 357 |  | 
 | 358 | unsigned long | 
 | 359 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | 
 | 360 | 		unsigned long len, unsigned long pgoff, unsigned long flags) | 
 | 361 | { | 
 | 362 | 	struct mm_struct *mm = current->mm; | 
 | 363 | 	struct vm_area_struct *vma; | 
 | 364 |  | 
 | 365 | 	if (len & ~HPAGE_MASK) | 
 | 366 | 		return -EINVAL; | 
 | 367 | 	if (len > TASK_SIZE) | 
 | 368 | 		return -ENOMEM; | 
 | 369 |  | 
 | 370 | 	if (addr) { | 
 | 371 | 		addr = ALIGN(addr, HPAGE_SIZE); | 
 | 372 | 		vma = find_vma(mm, addr); | 
 | 373 | 		if (TASK_SIZE - len >= addr && | 
 | 374 | 		    (!vma || addr + len <= vma->vm_start)) | 
 | 375 | 			return addr; | 
 | 376 | 	} | 
 | 377 | 	if (mm->get_unmapped_area == arch_get_unmapped_area) | 
 | 378 | 		return hugetlb_get_unmapped_area_bottomup(file, addr, len, | 
 | 379 | 				pgoff, flags); | 
 | 380 | 	else | 
 | 381 | 		return hugetlb_get_unmapped_area_topdown(file, addr, len, | 
 | 382 | 				pgoff, flags); | 
 | 383 | } | 
 | 384 |  | 
 | 385 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ | 
 | 386 |  |