| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * SPARC64 Huge TLB page support. | 
 | 3 |  * | 
| David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 4 |  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 |  */ | 
 | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/init.h> | 
 | 8 | #include <linux/module.h> | 
 | 9 | #include <linux/fs.h> | 
 | 10 | #include <linux/mm.h> | 
 | 11 | #include <linux/hugetlb.h> | 
 | 12 | #include <linux/pagemap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/slab.h> | 
 | 14 | #include <linux/sysctl.h> | 
 | 15 |  | 
 | 16 | #include <asm/mman.h> | 
 | 17 | #include <asm/pgalloc.h> | 
 | 18 | #include <asm/tlb.h> | 
 | 19 | #include <asm/tlbflush.h> | 
 | 20 | #include <asm/cacheflush.h> | 
 | 21 | #include <asm/mmu_context.h> | 
 | 22 |  | 
| David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 23 | /* Slightly simplified from the non-hugepage variant because by | 
 | 24 |  * definition we don't have to worry about any page coloring stuff | 
 | 25 |  */ | 
 | 26 | #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) | 
 | 27 | #define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL)) | 
 | 28 |  | 
 | 29 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, | 
 | 30 | 							unsigned long addr, | 
 | 31 | 							unsigned long len, | 
 | 32 | 							unsigned long pgoff, | 
 | 33 | 							unsigned long flags) | 
 | 34 | { | 
 | 35 | 	struct mm_struct *mm = current->mm; | 
 | 36 | 	struct vm_area_struct * vma; | 
 | 37 | 	unsigned long task_size = TASK_SIZE; | 
 | 38 | 	unsigned long start_addr; | 
 | 39 |  | 
 | 40 | 	if (test_thread_flag(TIF_32BIT)) | 
 | 41 | 		task_size = STACK_TOP32; | 
 | 42 | 	if (unlikely(len >= VA_EXCLUDE_START)) | 
 | 43 | 		return -ENOMEM; | 
 | 44 |  | 
 | 45 | 	if (len > mm->cached_hole_size) { | 
 | 46 | 	        start_addr = addr = mm->free_area_cache; | 
 | 47 | 	} else { | 
 | 48 | 	        start_addr = addr = TASK_UNMAPPED_BASE; | 
 | 49 | 	        mm->cached_hole_size = 0; | 
 | 50 | 	} | 
 | 51 |  | 
 | 52 | 	task_size -= len; | 
 | 53 |  | 
 | 54 | full_search: | 
 | 55 | 	addr = ALIGN(addr, HPAGE_SIZE); | 
 | 56 |  | 
 | 57 | 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 
 | 58 | 		/* At this point:  (!vma || addr < vma->vm_end). */ | 
 | 59 | 		if (addr < VA_EXCLUDE_START && | 
 | 60 | 		    (addr + len) >= VA_EXCLUDE_START) { | 
 | 61 | 			addr = VA_EXCLUDE_END; | 
 | 62 | 			vma = find_vma(mm, VA_EXCLUDE_END); | 
 | 63 | 		} | 
 | 64 | 		if (unlikely(task_size < addr)) { | 
 | 65 | 			if (start_addr != TASK_UNMAPPED_BASE) { | 
 | 66 | 				start_addr = addr = TASK_UNMAPPED_BASE; | 
 | 67 | 				mm->cached_hole_size = 0; | 
 | 68 | 				goto full_search; | 
 | 69 | 			} | 
 | 70 | 			return -ENOMEM; | 
 | 71 | 		} | 
 | 72 | 		if (likely(!vma || addr + len <= vma->vm_start)) { | 
 | 73 | 			/* | 
 | 74 | 			 * Remember the place where we stopped the search: | 
 | 75 | 			 */ | 
 | 76 | 			mm->free_area_cache = addr + len; | 
 | 77 | 			return addr; | 
 | 78 | 		} | 
 | 79 | 		if (addr + mm->cached_hole_size < vma->vm_start) | 
 | 80 | 		        mm->cached_hole_size = vma->vm_start - addr; | 
 | 81 |  | 
 | 82 | 		addr = ALIGN(vma->vm_end, HPAGE_SIZE); | 
 | 83 | 	} | 
 | 84 | } | 
 | 85 |  | 
 | 86 | static unsigned long | 
 | 87 | hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | 
 | 88 | 				  const unsigned long len, | 
 | 89 | 				  const unsigned long pgoff, | 
 | 90 | 				  const unsigned long flags) | 
 | 91 | { | 
 | 92 | 	struct vm_area_struct *vma; | 
 | 93 | 	struct mm_struct *mm = current->mm; | 
 | 94 | 	unsigned long addr = addr0; | 
 | 95 |  | 
 | 96 | 	/* This should only ever run for 32-bit processes.  */ | 
 | 97 | 	BUG_ON(!test_thread_flag(TIF_32BIT)); | 
 | 98 |  | 
 | 99 | 	/* check if free_area_cache is useful for us */ | 
 | 100 | 	if (len <= mm->cached_hole_size) { | 
 | 101 |  	        mm->cached_hole_size = 0; | 
 | 102 |  		mm->free_area_cache = mm->mmap_base; | 
 | 103 |  	} | 
 | 104 |  | 
 | 105 | 	/* either no address requested or can't fit in requested address hole */ | 
 | 106 | 	addr = mm->free_area_cache & HPAGE_MASK; | 
 | 107 |  | 
 | 108 | 	/* make sure it can fit in the remaining address space */ | 
 | 109 | 	if (likely(addr > len)) { | 
 | 110 | 		vma = find_vma(mm, addr-len); | 
 | 111 | 		if (!vma || addr <= vma->vm_start) { | 
 | 112 | 			/* remember the address as a hint for next time */ | 
 | 113 | 			return (mm->free_area_cache = addr-len); | 
 | 114 | 		} | 
 | 115 | 	} | 
 | 116 |  | 
 | 117 | 	if (unlikely(mm->mmap_base < len)) | 
 | 118 | 		goto bottomup; | 
 | 119 |  | 
 | 120 | 	addr = (mm->mmap_base-len) & HPAGE_MASK; | 
 | 121 |  | 
 | 122 | 	do { | 
 | 123 | 		/* | 
 | 124 | 		 * Lookup failure means no vma is above this address, | 
 | 125 | 		 * else if new region fits below vma->vm_start, | 
 | 126 | 		 * return with success: | 
 | 127 | 		 */ | 
 | 128 | 		vma = find_vma(mm, addr); | 
 | 129 | 		if (likely(!vma || addr+len <= vma->vm_start)) { | 
 | 130 | 			/* remember the address as a hint for next time */ | 
 | 131 | 			return (mm->free_area_cache = addr); | 
 | 132 | 		} | 
 | 133 |  | 
 | 134 |  		/* remember the largest hole we saw so far */ | 
 | 135 |  		if (addr + mm->cached_hole_size < vma->vm_start) | 
 | 136 |  		        mm->cached_hole_size = vma->vm_start - addr; | 
 | 137 |  | 
 | 138 | 		/* try just below the current vma->vm_start */ | 
 | 139 | 		addr = (vma->vm_start-len) & HPAGE_MASK; | 
 | 140 | 	} while (likely(len < vma->vm_start)); | 
 | 141 |  | 
 | 142 | bottomup: | 
 | 143 | 	/* | 
 | 144 | 	 * A failed mmap() very likely causes application failure, | 
 | 145 | 	 * so fall back to the bottom-up function here. This scenario | 
 | 146 | 	 * can happen with large stack limits and large mmap() | 
 | 147 | 	 * allocations. | 
 | 148 | 	 */ | 
 | 149 | 	mm->cached_hole_size = ~0UL; | 
 | 150 |   	mm->free_area_cache = TASK_UNMAPPED_BASE; | 
 | 151 | 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | 
 | 152 | 	/* | 
 | 153 | 	 * Restore the topdown base: | 
 | 154 | 	 */ | 
 | 155 | 	mm->free_area_cache = mm->mmap_base; | 
 | 156 | 	mm->cached_hole_size = ~0UL; | 
 | 157 |  | 
 | 158 | 	return addr; | 
 | 159 | } | 
 | 160 |  | 
 | 161 | unsigned long | 
 | 162 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | 
 | 163 | 		unsigned long len, unsigned long pgoff, unsigned long flags) | 
 | 164 | { | 
 | 165 | 	struct mm_struct *mm = current->mm; | 
 | 166 | 	struct vm_area_struct *vma; | 
 | 167 | 	unsigned long task_size = TASK_SIZE; | 
 | 168 |  | 
 | 169 | 	if (test_thread_flag(TIF_32BIT)) | 
 | 170 | 		task_size = STACK_TOP32; | 
 | 171 |  | 
 | 172 | 	if (len & ~HPAGE_MASK) | 
 | 173 | 		return -EINVAL; | 
 | 174 | 	if (len > task_size) | 
 | 175 | 		return -ENOMEM; | 
 | 176 |  | 
| Benjamin Herrenschmidt | ac35ee4 | 2007-05-06 14:50:10 -0700 | [diff] [blame] | 177 | 	if (flags & MAP_FIXED) { | 
| David Gibson | dec4ad8 | 2007-08-30 23:56:40 -0700 | [diff] [blame] | 178 | 		if (prepare_hugepage_range(addr, len)) | 
| Benjamin Herrenschmidt | ac35ee4 | 2007-05-06 14:50:10 -0700 | [diff] [blame] | 179 | 			return -EINVAL; | 
 | 180 | 		return addr; | 
 | 181 | 	} | 
 | 182 |  | 
| David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 183 | 	if (addr) { | 
 | 184 | 		addr = ALIGN(addr, HPAGE_SIZE); | 
 | 185 | 		vma = find_vma(mm, addr); | 
 | 186 | 		if (task_size - len >= addr && | 
 | 187 | 		    (!vma || addr + len <= vma->vm_start)) | 
 | 188 | 			return addr; | 
 | 189 | 	} | 
 | 190 | 	if (mm->get_unmapped_area == arch_get_unmapped_area) | 
 | 191 | 		return hugetlb_get_unmapped_area_bottomup(file, addr, len, | 
 | 192 | 				pgoff, flags); | 
 | 193 | 	else | 
 | 194 | 		return hugetlb_get_unmapped_area_topdown(file, addr, len, | 
 | 195 | 				pgoff, flags); | 
 | 196 | } | 
 | 197 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 198 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | { | 
 | 200 | 	pgd_t *pgd; | 
 | 201 | 	pud_t *pud; | 
 | 202 | 	pmd_t *pmd; | 
 | 203 | 	pte_t *pte = NULL; | 
 | 204 |  | 
| David S. Miller | 9df1dab | 2006-03-31 00:36:25 -0800 | [diff] [blame] | 205 | 	/* We must align the address, because our caller will run | 
 | 206 | 	 * set_huge_pte_at() on whatever we return, which writes out | 
 | 207 | 	 * all of the sub-ptes for the hugepage range.  So we have | 
 | 208 | 	 * to give it the first such sub-pte. | 
 | 209 | 	 */ | 
 | 210 | 	addr &= HPAGE_MASK; | 
 | 211 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | 	pgd = pgd_offset(mm, addr); | 
| David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 213 | 	pud = pud_alloc(mm, pgd, addr); | 
 | 214 | 	if (pud) { | 
 | 215 | 		pmd = pmd_alloc(mm, pud, addr); | 
 | 216 | 		if (pmd) | 
 | 217 | 			pte = pte_alloc_map(mm, pmd, addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | 	} | 
 | 219 | 	return pte; | 
 | 220 | } | 
 | 221 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 222 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | { | 
 | 224 | 	pgd_t *pgd; | 
 | 225 | 	pud_t *pud; | 
 | 226 | 	pmd_t *pmd; | 
 | 227 | 	pte_t *pte = NULL; | 
 | 228 |  | 
| David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 229 | 	addr &= HPAGE_MASK; | 
 | 230 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | 	pgd = pgd_offset(mm, addr); | 
| David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 232 | 	if (!pgd_none(*pgd)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | 		pud = pud_offset(pgd, addr); | 
| David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 234 | 		if (!pud_none(*pud)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | 			pmd = pmd_offset(pud, addr); | 
| David S. Miller | f6b83f0 | 2006-03-20 01:17:17 -0800 | [diff] [blame] | 236 | 			if (!pmd_none(*pmd)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | 				pte = pte_offset_map(pmd, addr); | 
 | 238 | 		} | 
 | 239 | 	} | 
 | 240 | 	return pte; | 
 | 241 | } | 
 | 242 |  | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 243 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | 
 | 244 | { | 
 | 245 | 	return 0; | 
 | 246 | } | 
 | 247 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 248 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | 
 | 249 | 		     pte_t *ptep, pte_t entry) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | { | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 251 | 	int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 |  | 
| David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 253 | 	if (!pte_present(*ptep) && pte_present(entry)) | 
 | 254 | 		mm->context.huge_pte_count++; | 
 | 255 |  | 
| David S. Miller | bb8236f | 2007-03-12 22:55:39 -0700 | [diff] [blame] | 256 | 	addr &= HPAGE_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 258 | 		set_pte_at(mm, addr, ptep, entry); | 
 | 259 | 		ptep++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | 		addr += PAGE_SIZE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | 		pte_val(entry) += PAGE_SIZE; | 
 | 262 | 	} | 
 | 263 | } | 
 | 264 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 265 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | 
 | 266 | 			      pte_t *ptep) | 
 | 267 | { | 
 | 268 | 	pte_t entry; | 
 | 269 | 	int i; | 
 | 270 |  | 
 | 271 | 	entry = *ptep; | 
| David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 272 | 	if (pte_present(entry)) | 
 | 273 | 		mm->context.huge_pte_count--; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 274 |  | 
| David S. Miller | bb8236f | 2007-03-12 22:55:39 -0700 | [diff] [blame] | 275 | 	addr &= HPAGE_MASK; | 
 | 276 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 277 | 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | 
 | 278 | 		pte_clear(mm, addr, ptep); | 
 | 279 | 		addr += PAGE_SIZE; | 
 | 280 | 		ptep++; | 
 | 281 | 	} | 
 | 282 |  | 
 | 283 | 	return entry; | 
 | 284 | } | 
 | 285 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | struct page *follow_huge_addr(struct mm_struct *mm, | 
 | 287 | 			      unsigned long address, int write) | 
 | 288 | { | 
 | 289 | 	return ERR_PTR(-EINVAL); | 
 | 290 | } | 
 | 291 |  | 
 | 292 | int pmd_huge(pmd_t pmd) | 
 | 293 | { | 
 | 294 | 	return 0; | 
 | 295 | } | 
 | 296 |  | 
 | 297 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | 
 | 298 | 			     pmd_t *pmd, int write) | 
 | 299 | { | 
 | 300 | 	return NULL; | 
 | 301 | } | 
 | 302 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | static void context_reload(void *__data) | 
 | 304 | { | 
 | 305 | 	struct mm_struct *mm = __data; | 
 | 306 |  | 
 | 307 | 	if (mm == current->mm) | 
 | 308 | 		load_secondary_context(mm); | 
 | 309 | } | 
 | 310 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 311 | void hugetlb_prefault_arch_hook(struct mm_struct *mm) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | { | 
| David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 313 | 	struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; | 
 | 314 |  | 
 | 315 | 	if (likely(tp->tsb != NULL)) | 
 | 316 | 		return; | 
 | 317 |  | 
 | 318 | 	tsb_grow(mm, MM_TSB_HUGE, 0); | 
 | 319 | 	tsb_context_switch(mm); | 
 | 320 | 	smp_tsb_sync(mm); | 
 | 321 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | 	/* On UltraSPARC-III+ and later, configure the second half of | 
 | 323 | 	 * the Data-TLB for huge pages. | 
 | 324 | 	 */ | 
 | 325 | 	if (tlb_type == cheetah_plus) { | 
 | 326 | 		unsigned long ctx; | 
 | 327 |  | 
 | 328 | 		spin_lock(&ctx_alloc_lock); | 
 | 329 | 		ctx = mm->context.sparc64_ctx_val; | 
 | 330 | 		ctx &= ~CTX_PGSZ_MASK; | 
 | 331 | 		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; | 
 | 332 | 		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; | 
 | 333 |  | 
 | 334 | 		if (ctx != mm->context.sparc64_ctx_val) { | 
 | 335 | 			/* When changing the page size fields, we | 
 | 336 | 			 * must perform a context flush so that no | 
 | 337 | 			 * stale entries match.  This flush must | 
 | 338 | 			 * occur with the original context register | 
 | 339 | 			 * settings. | 
 | 340 | 			 */ | 
 | 341 | 			do_flush_tlb_mm(mm); | 
 | 342 |  | 
 | 343 | 			/* Reload the context register of all processors | 
 | 344 | 			 * also executing in this address space. | 
 | 345 | 			 */ | 
 | 346 | 			mm->context.sparc64_ctx_val = ctx; | 
 | 347 | 			on_each_cpu(context_reload, mm, 0, 0); | 
 | 348 | 		} | 
 | 349 | 		spin_unlock(&ctx_alloc_lock); | 
 | 350 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | } |