| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 1 | /* | 
|  | 2 | * address space "slices" (meta-segments) support | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation. | 
|  | 5 | * | 
|  | 6 | * Based on hugetlb implementation | 
|  | 7 | * | 
|  | 8 | * Copyright (C) 2003 David Gibson, IBM Corporation. | 
|  | 9 | * | 
|  | 10 | * This program is free software; you can redistribute it and/or modify | 
|  | 11 | * it under the terms of the GNU General Public License as published by | 
|  | 12 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 13 | * (at your option) any later version. | 
|  | 14 | * | 
|  | 15 | * This program is distributed in the hope that it will be useful, | 
|  | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 18 | * GNU General Public License for more details. | 
|  | 19 | * | 
|  | 20 | * You should have received a copy of the GNU General Public License | 
|  | 21 | * along with this program; if not, write to the Free Software | 
|  | 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA | 
|  | 23 | */ | 
|  | 24 |  | 
|  | 25 | #undef DEBUG | 
|  | 26 |  | 
|  | 27 | #include <linux/kernel.h> | 
|  | 28 | #include <linux/mm.h> | 
|  | 29 | #include <linux/pagemap.h> | 
|  | 30 | #include <linux/err.h> | 
|  | 31 | #include <linux/spinlock.h> | 
|  | 32 | #include <linux/module.h> | 
|  | 33 | #include <asm/mman.h> | 
|  | 34 | #include <asm/mmu.h> | 
|  | 35 | #include <asm/spu.h> | 
|  | 36 |  | 
| Roel Kluin | f7a75f0 | 2007-10-16 23:30:25 -0700 | [diff] [blame] | 37 | static DEFINE_SPINLOCK(slice_convert_lock); | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 38 |  | 
|  | 39 |  | 
|  | 40 | #ifdef DEBUG | 
|  | 41 | int _slice_debug = 1; | 
|  | 42 |  | 
|  | 43 | static void slice_print_mask(const char *label, struct slice_mask mask) | 
|  | 44 | { | 
|  | 45 | char	*p, buf[16 + 3 + 16 + 1]; | 
|  | 46 | int	i; | 
|  | 47 |  | 
|  | 48 | if (!_slice_debug) | 
|  | 49 | return; | 
|  | 50 | p = buf; | 
|  | 51 | for (i = 0; i < SLICE_NUM_LOW; i++) | 
|  | 52 | *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0'; | 
|  | 53 | *(p++) = ' '; | 
|  | 54 | *(p++) = '-'; | 
|  | 55 | *(p++) = ' '; | 
|  | 56 | for (i = 0; i < SLICE_NUM_HIGH; i++) | 
|  | 57 | *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0'; | 
|  | 58 | *(p++) = 0; | 
|  | 59 |  | 
|  | 60 | printk(KERN_DEBUG "%s:%s\n", label, buf); | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | #define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0) | 
|  | 64 |  | 
|  | 65 | #else | 
|  | 66 |  | 
|  | 67 | static void slice_print_mask(const char *label, struct slice_mask mask) {} | 
|  | 68 | #define slice_dbg(fmt...) | 
|  | 69 |  | 
|  | 70 | #endif | 
|  | 71 |  | 
|  | 72 | static struct slice_mask slice_range_to_mask(unsigned long start, | 
|  | 73 | unsigned long len) | 
|  | 74 | { | 
|  | 75 | unsigned long end = start + len - 1; | 
|  | 76 | struct slice_mask ret = { 0, 0 }; | 
|  | 77 |  | 
|  | 78 | if (start < SLICE_LOW_TOP) { | 
|  | 79 | unsigned long mend = min(end, SLICE_LOW_TOP); | 
|  | 80 | unsigned long mstart = min(start, SLICE_LOW_TOP); | 
|  | 81 |  | 
|  | 82 | ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) | 
|  | 83 | - (1u << GET_LOW_SLICE_INDEX(mstart)); | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | if ((start + len) > SLICE_LOW_TOP) | 
|  | 87 | ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1)) | 
|  | 88 | - (1u << GET_HIGH_SLICE_INDEX(start)); | 
|  | 89 |  | 
|  | 90 | return ret; | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, | 
|  | 94 | unsigned long len) | 
|  | 95 | { | 
|  | 96 | struct vm_area_struct *vma; | 
|  | 97 |  | 
|  | 98 | if ((mm->task_size - len) < addr) | 
|  | 99 | return 0; | 
|  | 100 | vma = find_vma(mm, addr); | 
|  | 101 | return (!vma || (addr + len) <= vma->vm_start); | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) | 
|  | 105 | { | 
|  | 106 | return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, | 
|  | 107 | 1ul << SLICE_LOW_SHIFT); | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) | 
|  | 111 | { | 
|  | 112 | unsigned long start = slice << SLICE_HIGH_SHIFT; | 
|  | 113 | unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); | 
|  | 114 |  | 
|  | 115 | /* Hack, so that each addresses is controlled by exactly one | 
|  | 116 | * of the high or low area bitmaps, the first high area starts | 
|  | 117 | * at 4GB, not 0 */ | 
|  | 118 | if (start == 0) | 
|  | 119 | start = SLICE_LOW_TOP; | 
|  | 120 |  | 
|  | 121 | return !slice_area_is_free(mm, start, end - start); | 
|  | 122 | } | 
|  | 123 |  | 
|  | 124 | static struct slice_mask slice_mask_for_free(struct mm_struct *mm) | 
|  | 125 | { | 
|  | 126 | struct slice_mask ret = { 0, 0 }; | 
|  | 127 | unsigned long i; | 
|  | 128 |  | 
|  | 129 | for (i = 0; i < SLICE_NUM_LOW; i++) | 
|  | 130 | if (!slice_low_has_vma(mm, i)) | 
|  | 131 | ret.low_slices |= 1u << i; | 
|  | 132 |  | 
|  | 133 | if (mm->task_size <= SLICE_LOW_TOP) | 
|  | 134 | return ret; | 
|  | 135 |  | 
|  | 136 | for (i = 0; i < SLICE_NUM_HIGH; i++) | 
|  | 137 | if (!slice_high_has_vma(mm, i)) | 
|  | 138 | ret.high_slices |= 1u << i; | 
|  | 139 |  | 
|  | 140 | return ret; | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) | 
|  | 144 | { | 
|  | 145 | struct slice_mask ret = { 0, 0 }; | 
|  | 146 | unsigned long i; | 
|  | 147 | u64 psizes; | 
|  | 148 |  | 
|  | 149 | psizes = mm->context.low_slices_psize; | 
|  | 150 | for (i = 0; i < SLICE_NUM_LOW; i++) | 
|  | 151 | if (((psizes >> (i * 4)) & 0xf) == psize) | 
|  | 152 | ret.low_slices |= 1u << i; | 
|  | 153 |  | 
|  | 154 | psizes = mm->context.high_slices_psize; | 
|  | 155 | for (i = 0; i < SLICE_NUM_HIGH; i++) | 
|  | 156 | if (((psizes >> (i * 4)) & 0xf) == psize) | 
|  | 157 | ret.high_slices |= 1u << i; | 
|  | 158 |  | 
|  | 159 | return ret; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | static int slice_check_fit(struct slice_mask mask, struct slice_mask available) | 
|  | 163 | { | 
|  | 164 | return (mask.low_slices & available.low_slices) == mask.low_slices && | 
|  | 165 | (mask.high_slices & available.high_slices) == mask.high_slices; | 
|  | 166 | } | 
|  | 167 |  | 
|  | 168 | static void slice_flush_segments(void *parm) | 
|  | 169 | { | 
|  | 170 | struct mm_struct *mm = parm; | 
|  | 171 | unsigned long flags; | 
|  | 172 |  | 
|  | 173 | if (mm != current->active_mm) | 
|  | 174 | return; | 
|  | 175 |  | 
|  | 176 | /* update the paca copy of the context struct */ | 
|  | 177 | get_paca()->context = current->active_mm->context; | 
|  | 178 |  | 
|  | 179 | local_irq_save(flags); | 
|  | 180 | slb_flush_and_rebolt(); | 
|  | 181 | local_irq_restore(flags); | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) | 
|  | 185 | { | 
|  | 186 | /* Write the new slice psize bits */ | 
|  | 187 | u64 lpsizes, hpsizes; | 
|  | 188 | unsigned long i, flags; | 
|  | 189 |  | 
|  | 190 | slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); | 
|  | 191 | slice_print_mask(" mask", mask); | 
|  | 192 |  | 
|  | 193 | /* We need to use a spinlock here to protect against | 
|  | 194 | * concurrent 64k -> 4k demotion ... | 
|  | 195 | */ | 
|  | 196 | spin_lock_irqsave(&slice_convert_lock, flags); | 
|  | 197 |  | 
|  | 198 | lpsizes = mm->context.low_slices_psize; | 
|  | 199 | for (i = 0; i < SLICE_NUM_LOW; i++) | 
|  | 200 | if (mask.low_slices & (1u << i)) | 
|  | 201 | lpsizes = (lpsizes & ~(0xful << (i * 4))) | | 
|  | 202 | (((unsigned long)psize) << (i * 4)); | 
|  | 203 |  | 
|  | 204 | hpsizes = mm->context.high_slices_psize; | 
|  | 205 | for (i = 0; i < SLICE_NUM_HIGH; i++) | 
|  | 206 | if (mask.high_slices & (1u << i)) | 
|  | 207 | hpsizes = (hpsizes & ~(0xful << (i * 4))) | | 
|  | 208 | (((unsigned long)psize) << (i * 4)); | 
|  | 209 |  | 
|  | 210 | mm->context.low_slices_psize = lpsizes; | 
|  | 211 | mm->context.high_slices_psize = hpsizes; | 
|  | 212 |  | 
|  | 213 | slice_dbg(" lsps=%lx, hsps=%lx\n", | 
|  | 214 | mm->context.low_slices_psize, | 
|  | 215 | mm->context.high_slices_psize); | 
|  | 216 |  | 
|  | 217 | spin_unlock_irqrestore(&slice_convert_lock, flags); | 
|  | 218 | mb(); | 
|  | 219 |  | 
|  | 220 | /* XXX this is sub-optimal but will do for now */ | 
| Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 221 | on_each_cpu(slice_flush_segments, mm, 1); | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 222 | #ifdef CONFIG_SPU_BASE | 
|  | 223 | spu_flush_all_slbs(mm); | 
|  | 224 | #endif | 
|  | 225 | } | 
|  | 226 |  | 
|  | 227 | static unsigned long slice_find_area_bottomup(struct mm_struct *mm, | 
|  | 228 | unsigned long len, | 
|  | 229 | struct slice_mask available, | 
|  | 230 | int psize, int use_cache) | 
|  | 231 | { | 
|  | 232 | struct vm_area_struct *vma; | 
|  | 233 | unsigned long start_addr, addr; | 
|  | 234 | struct slice_mask mask; | 
|  | 235 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); | 
|  | 236 |  | 
|  | 237 | if (use_cache) { | 
|  | 238 | if (len <= mm->cached_hole_size) { | 
|  | 239 | start_addr = addr = TASK_UNMAPPED_BASE; | 
|  | 240 | mm->cached_hole_size = 0; | 
|  | 241 | } else | 
|  | 242 | start_addr = addr = mm->free_area_cache; | 
|  | 243 | } else | 
|  | 244 | start_addr = addr = TASK_UNMAPPED_BASE; | 
|  | 245 |  | 
|  | 246 | full_search: | 
|  | 247 | for (;;) { | 
|  | 248 | addr = _ALIGN_UP(addr, 1ul << pshift); | 
|  | 249 | if ((TASK_SIZE - len) < addr) | 
|  | 250 | break; | 
|  | 251 | vma = find_vma(mm, addr); | 
|  | 252 | BUG_ON(vma && (addr >= vma->vm_end)); | 
|  | 253 |  | 
|  | 254 | mask = slice_range_to_mask(addr, len); | 
|  | 255 | if (!slice_check_fit(mask, available)) { | 
|  | 256 | if (addr < SLICE_LOW_TOP) | 
|  | 257 | addr = _ALIGN_UP(addr + 1,  1ul << SLICE_LOW_SHIFT); | 
|  | 258 | else | 
|  | 259 | addr = _ALIGN_UP(addr + 1,  1ul << SLICE_HIGH_SHIFT); | 
|  | 260 | continue; | 
|  | 261 | } | 
|  | 262 | if (!vma || addr + len <= vma->vm_start) { | 
|  | 263 | /* | 
|  | 264 | * Remember the place where we stopped the search: | 
|  | 265 | */ | 
|  | 266 | if (use_cache) | 
|  | 267 | mm->free_area_cache = addr + len; | 
|  | 268 | return addr; | 
|  | 269 | } | 
|  | 270 | if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) | 
|  | 271 | mm->cached_hole_size = vma->vm_start - addr; | 
|  | 272 | addr = vma->vm_end; | 
|  | 273 | } | 
|  | 274 |  | 
|  | 275 | /* Make sure we didn't miss any holes */ | 
|  | 276 | if (use_cache && start_addr != TASK_UNMAPPED_BASE) { | 
|  | 277 | start_addr = addr = TASK_UNMAPPED_BASE; | 
|  | 278 | mm->cached_hole_size = 0; | 
|  | 279 | goto full_search; | 
|  | 280 | } | 
|  | 281 | return -ENOMEM; | 
|  | 282 | } | 
|  | 283 |  | 
|  | 284 | static unsigned long slice_find_area_topdown(struct mm_struct *mm, | 
|  | 285 | unsigned long len, | 
|  | 286 | struct slice_mask available, | 
|  | 287 | int psize, int use_cache) | 
|  | 288 | { | 
|  | 289 | struct vm_area_struct *vma; | 
|  | 290 | unsigned long addr; | 
|  | 291 | struct slice_mask mask; | 
|  | 292 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); | 
|  | 293 |  | 
|  | 294 | /* check if free_area_cache is useful for us */ | 
|  | 295 | if (use_cache) { | 
|  | 296 | if (len <= mm->cached_hole_size) { | 
|  | 297 | mm->cached_hole_size = 0; | 
|  | 298 | mm->free_area_cache = mm->mmap_base; | 
|  | 299 | } | 
|  | 300 |  | 
|  | 301 | /* either no address requested or can't fit in requested | 
|  | 302 | * address hole | 
|  | 303 | */ | 
|  | 304 | addr = mm->free_area_cache; | 
|  | 305 |  | 
|  | 306 | /* make sure it can fit in the remaining address space */ | 
|  | 307 | if (addr > len) { | 
|  | 308 | addr = _ALIGN_DOWN(addr - len, 1ul << pshift); | 
|  | 309 | mask = slice_range_to_mask(addr, len); | 
|  | 310 | if (slice_check_fit(mask, available) && | 
|  | 311 | slice_area_is_free(mm, addr, len)) | 
|  | 312 | /* remember the address as a hint for | 
|  | 313 | * next time | 
|  | 314 | */ | 
|  | 315 | return (mm->free_area_cache = addr); | 
|  | 316 | } | 
|  | 317 | } | 
|  | 318 |  | 
|  | 319 | addr = mm->mmap_base; | 
|  | 320 | while (addr > len) { | 
|  | 321 | /* Go down by chunk size */ | 
|  | 322 | addr = _ALIGN_DOWN(addr - len, 1ul << pshift); | 
|  | 323 |  | 
|  | 324 | /* Check for hit with different page size */ | 
|  | 325 | mask = slice_range_to_mask(addr, len); | 
|  | 326 | if (!slice_check_fit(mask, available)) { | 
|  | 327 | if (addr < SLICE_LOW_TOP) | 
|  | 328 | addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT); | 
|  | 329 | else if (addr < (1ul << SLICE_HIGH_SHIFT)) | 
|  | 330 | addr = SLICE_LOW_TOP; | 
|  | 331 | else | 
|  | 332 | addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT); | 
|  | 333 | continue; | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | /* | 
|  | 337 | * Lookup failure means no vma is above this address, | 
|  | 338 | * else if new region fits below vma->vm_start, | 
|  | 339 | * return with success: | 
|  | 340 | */ | 
|  | 341 | vma = find_vma(mm, addr); | 
|  | 342 | if (!vma || (addr + len) <= vma->vm_start) { | 
|  | 343 | /* remember the address as a hint for next time */ | 
|  | 344 | if (use_cache) | 
|  | 345 | mm->free_area_cache = addr; | 
|  | 346 | return addr; | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | /* remember the largest hole we saw so far */ | 
|  | 350 | if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start) | 
|  | 351 | mm->cached_hole_size = vma->vm_start - addr; | 
|  | 352 |  | 
|  | 353 | /* try just below the current vma->vm_start */ | 
|  | 354 | addr = vma->vm_start; | 
|  | 355 | } | 
|  | 356 |  | 
|  | 357 | /* | 
|  | 358 | * A failed mmap() very likely causes application failure, | 
|  | 359 | * so fall back to the bottom-up function here. This scenario | 
|  | 360 | * can happen with large stack limits and large mmap() | 
|  | 361 | * allocations. | 
|  | 362 | */ | 
|  | 363 | addr = slice_find_area_bottomup(mm, len, available, psize, 0); | 
|  | 364 |  | 
|  | 365 | /* | 
|  | 366 | * Restore the topdown base: | 
|  | 367 | */ | 
|  | 368 | if (use_cache) { | 
|  | 369 | mm->free_area_cache = mm->mmap_base; | 
|  | 370 | mm->cached_hole_size = ~0UL; | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 | return addr; | 
|  | 374 | } | 
|  | 375 |  | 
|  | 376 |  | 
|  | 377 | static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, | 
|  | 378 | struct slice_mask mask, int psize, | 
|  | 379 | int topdown, int use_cache) | 
|  | 380 | { | 
|  | 381 | if (topdown) | 
|  | 382 | return slice_find_area_topdown(mm, len, mask, psize, use_cache); | 
|  | 383 | else | 
|  | 384 | return slice_find_area_bottomup(mm, len, mask, psize, use_cache); | 
|  | 385 | } | 
|  | 386 |  | 
|  | 387 | unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, | 
|  | 388 | unsigned long flags, unsigned int psize, | 
|  | 389 | int topdown, int use_cache) | 
|  | 390 | { | 
|  | 391 | struct slice_mask mask; | 
|  | 392 | struct slice_mask good_mask; | 
|  | 393 | struct slice_mask potential_mask = {0,0} /* silence stupid warning */; | 
|  | 394 | int pmask_set = 0; | 
|  | 395 | int fixed = (flags & MAP_FIXED); | 
|  | 396 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); | 
|  | 397 | struct mm_struct *mm = current->mm; | 
|  | 398 |  | 
|  | 399 | /* Sanity checks */ | 
|  | 400 | BUG_ON(mm->task_size == 0); | 
|  | 401 |  | 
|  | 402 | slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); | 
|  | 403 | slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n", | 
|  | 404 | addr, len, flags, topdown, use_cache); | 
|  | 405 |  | 
|  | 406 | if (len > mm->task_size) | 
|  | 407 | return -ENOMEM; | 
| Benjamin Herrenschmidt | d1f5a77 | 2007-08-08 15:44:15 +1000 | [diff] [blame] | 408 | if (len & ((1ul << pshift) - 1)) | 
|  | 409 | return -EINVAL; | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 410 | if (fixed && (addr & ((1ul << pshift) - 1))) | 
|  | 411 | return -EINVAL; | 
|  | 412 | if (fixed && addr > (mm->task_size - len)) | 
|  | 413 | return -EINVAL; | 
|  | 414 |  | 
|  | 415 | /* If hint, make sure it matches our alignment restrictions */ | 
|  | 416 | if (!fixed && addr) { | 
|  | 417 | addr = _ALIGN_UP(addr, 1ul << pshift); | 
|  | 418 | slice_dbg(" aligned addr=%lx\n", addr); | 
|  | 419 | } | 
|  | 420 |  | 
|  | 421 | /* First makeup a "good" mask of slices that have the right size | 
|  | 422 | * already | 
|  | 423 | */ | 
|  | 424 | good_mask = slice_mask_for_size(mm, psize); | 
|  | 425 | slice_print_mask(" good_mask", good_mask); | 
|  | 426 |  | 
|  | 427 | /* First check hint if it's valid or if we have MAP_FIXED */ | 
|  | 428 | if ((addr != 0 || fixed) && (mm->task_size - len) >= addr) { | 
|  | 429 |  | 
|  | 430 | /* Don't bother with hint if it overlaps a VMA */ | 
|  | 431 | if (!fixed && !slice_area_is_free(mm, addr, len)) | 
|  | 432 | goto search; | 
|  | 433 |  | 
|  | 434 | /* Build a mask for the requested range */ | 
|  | 435 | mask = slice_range_to_mask(addr, len); | 
|  | 436 | slice_print_mask(" mask", mask); | 
|  | 437 |  | 
|  | 438 | /* Check if we fit in the good mask. If we do, we just return, | 
|  | 439 | * nothing else to do | 
|  | 440 | */ | 
|  | 441 | if (slice_check_fit(mask, good_mask)) { | 
|  | 442 | slice_dbg(" fits good !\n"); | 
|  | 443 | return addr; | 
|  | 444 | } | 
|  | 445 |  | 
|  | 446 | /* We don't fit in the good mask, check what other slices are | 
|  | 447 | * empty and thus can be converted | 
|  | 448 | */ | 
|  | 449 | potential_mask = slice_mask_for_free(mm); | 
|  | 450 | potential_mask.low_slices |= good_mask.low_slices; | 
|  | 451 | potential_mask.high_slices |= good_mask.high_slices; | 
|  | 452 | pmask_set = 1; | 
|  | 453 | slice_print_mask(" potential", potential_mask); | 
|  | 454 | if (slice_check_fit(mask, potential_mask)) { | 
|  | 455 | slice_dbg(" fits potential !\n"); | 
|  | 456 | goto convert; | 
|  | 457 | } | 
|  | 458 | } | 
|  | 459 |  | 
|  | 460 | /* If we have MAP_FIXED and failed the above step, then error out */ | 
|  | 461 | if (fixed) | 
|  | 462 | return -EBUSY; | 
|  | 463 |  | 
|  | 464 | search: | 
|  | 465 | slice_dbg(" search...\n"); | 
|  | 466 |  | 
|  | 467 | /* Now let's see if we can find something in the existing slices | 
|  | 468 | * for that size | 
|  | 469 | */ | 
|  | 470 | addr = slice_find_area(mm, len, good_mask, psize, topdown, use_cache); | 
|  | 471 | if (addr != -ENOMEM) { | 
|  | 472 | /* Found within the good mask, we don't have to setup, | 
|  | 473 | * we thus return directly | 
|  | 474 | */ | 
|  | 475 | slice_dbg(" found area at 0x%lx\n", addr); | 
|  | 476 | return addr; | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 | /* Won't fit, check what can be converted */ | 
|  | 480 | if (!pmask_set) { | 
|  | 481 | potential_mask = slice_mask_for_free(mm); | 
|  | 482 | potential_mask.low_slices |= good_mask.low_slices; | 
|  | 483 | potential_mask.high_slices |= good_mask.high_slices; | 
|  | 484 | pmask_set = 1; | 
|  | 485 | slice_print_mask(" potential", potential_mask); | 
|  | 486 | } | 
|  | 487 |  | 
|  | 488 | /* Now let's see if we can find something in the existing slices | 
|  | 489 | * for that size | 
|  | 490 | */ | 
|  | 491 | addr = slice_find_area(mm, len, potential_mask, psize, topdown, | 
|  | 492 | use_cache); | 
|  | 493 | if (addr == -ENOMEM) | 
|  | 494 | return -ENOMEM; | 
|  | 495 |  | 
|  | 496 | mask = slice_range_to_mask(addr, len); | 
|  | 497 | slice_dbg(" found potential area at 0x%lx\n", addr); | 
|  | 498 | slice_print_mask(" mask", mask); | 
|  | 499 |  | 
|  | 500 | convert: | 
|  | 501 | slice_convert(mm, mask, psize); | 
|  | 502 | return addr; | 
|  | 503 |  | 
|  | 504 | } | 
|  | 505 | EXPORT_SYMBOL_GPL(slice_get_unmapped_area); | 
|  | 506 |  | 
|  | 507 | unsigned long arch_get_unmapped_area(struct file *filp, | 
|  | 508 | unsigned long addr, | 
|  | 509 | unsigned long len, | 
|  | 510 | unsigned long pgoff, | 
|  | 511 | unsigned long flags) | 
|  | 512 | { | 
|  | 513 | return slice_get_unmapped_area(addr, len, flags, | 
|  | 514 | current->mm->context.user_psize, | 
|  | 515 | 0, 1); | 
|  | 516 | } | 
|  | 517 |  | 
|  | 518 | unsigned long arch_get_unmapped_area_topdown(struct file *filp, | 
|  | 519 | const unsigned long addr0, | 
|  | 520 | const unsigned long len, | 
|  | 521 | const unsigned long pgoff, | 
|  | 522 | const unsigned long flags) | 
|  | 523 | { | 
|  | 524 | return slice_get_unmapped_area(addr0, len, flags, | 
|  | 525 | current->mm->context.user_psize, | 
|  | 526 | 1, 1); | 
|  | 527 | } | 
|  | 528 |  | 
|  | 529 | unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) | 
|  | 530 | { | 
|  | 531 | u64 psizes; | 
|  | 532 | int index; | 
|  | 533 |  | 
|  | 534 | if (addr < SLICE_LOW_TOP) { | 
|  | 535 | psizes = mm->context.low_slices_psize; | 
|  | 536 | index = GET_LOW_SLICE_INDEX(addr); | 
|  | 537 | } else { | 
|  | 538 | psizes = mm->context.high_slices_psize; | 
|  | 539 | index = GET_HIGH_SLICE_INDEX(addr); | 
|  | 540 | } | 
|  | 541 |  | 
|  | 542 | return (psizes >> (index * 4)) & 0xf; | 
|  | 543 | } | 
|  | 544 | EXPORT_SYMBOL_GPL(get_slice_psize); | 
|  | 545 |  | 
|  | 546 | /* | 
|  | 547 | * This is called by hash_page when it needs to do a lazy conversion of | 
|  | 548 | * an address space from real 64K pages to combo 4K pages (typically | 
|  | 549 | * when hitting a non cacheable mapping on a processor or hypervisor | 
|  | 550 | * that won't allow them for 64K pages). | 
|  | 551 | * | 
|  | 552 | * This is also called in init_new_context() to change back the user | 
|  | 553 | * psize from whatever the parent context had it set to | 
| Stephen Rothwell | 9dfe5c53 | 2007-08-15 16:33:55 +1000 | [diff] [blame] | 554 | * N.B. This may be called before mm->context.id has been set. | 
| Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 555 | * | 
|  | 556 | * This function will only change the content of the {low,high)_slice_psize | 
|  | 557 | * masks, it will not flush SLBs as this shall be handled lazily by the | 
|  | 558 | * caller. | 
|  | 559 | */ | 
|  | 560 | void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) | 
|  | 561 | { | 
|  | 562 | unsigned long flags, lpsizes, hpsizes; | 
|  | 563 | unsigned int old_psize; | 
|  | 564 | int i; | 
|  | 565 |  | 
|  | 566 | slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize); | 
|  | 567 |  | 
|  | 568 | spin_lock_irqsave(&slice_convert_lock, flags); | 
|  | 569 |  | 
|  | 570 | old_psize = mm->context.user_psize; | 
|  | 571 | slice_dbg(" old_psize=%d\n", old_psize); | 
|  | 572 | if (old_psize == psize) | 
|  | 573 | goto bail; | 
|  | 574 |  | 
|  | 575 | mm->context.user_psize = psize; | 
|  | 576 | wmb(); | 
|  | 577 |  | 
|  | 578 | lpsizes = mm->context.low_slices_psize; | 
|  | 579 | for (i = 0; i < SLICE_NUM_LOW; i++) | 
|  | 580 | if (((lpsizes >> (i * 4)) & 0xf) == old_psize) | 
|  | 581 | lpsizes = (lpsizes & ~(0xful << (i * 4))) | | 
|  | 582 | (((unsigned long)psize) << (i * 4)); | 
|  | 583 |  | 
|  | 584 | hpsizes = mm->context.high_slices_psize; | 
|  | 585 | for (i = 0; i < SLICE_NUM_HIGH; i++) | 
|  | 586 | if (((hpsizes >> (i * 4)) & 0xf) == old_psize) | 
|  | 587 | hpsizes = (hpsizes & ~(0xful << (i * 4))) | | 
|  | 588 | (((unsigned long)psize) << (i * 4)); | 
|  | 589 |  | 
|  | 590 | mm->context.low_slices_psize = lpsizes; | 
|  | 591 | mm->context.high_slices_psize = hpsizes; | 
|  | 592 |  | 
|  | 593 | slice_dbg(" lsps=%lx, hsps=%lx\n", | 
|  | 594 | mm->context.low_slices_psize, | 
|  | 595 | mm->context.high_slices_psize); | 
|  | 596 |  | 
|  | 597 | bail: | 
|  | 598 | spin_unlock_irqrestore(&slice_convert_lock, flags); | 
|  | 599 | } | 
|  | 600 |  | 
|  | 601 | /* | 
|  | 602 | * is_hugepage_only_range() is used by generic code to verify wether | 
|  | 603 | * a normal mmap mapping (non hugetlbfs) is valid on a given area. | 
|  | 604 | * | 
|  | 605 | * until the generic code provides a more generic hook and/or starts | 
|  | 606 | * calling arch get_unmapped_area for MAP_FIXED (which our implementation | 
|  | 607 | * here knows how to deal with), we hijack it to keep standard mappings | 
|  | 608 | * away from us. | 
|  | 609 | * | 
|  | 610 | * because of that generic code limitation, MAP_FIXED mapping cannot | 
|  | 611 | * "convert" back a slice with no VMAs to the standard page size, only | 
|  | 612 | * get_unmapped_area() can. It would be possible to fix it here but I | 
|  | 613 | * prefer working on fixing the generic code instead. | 
|  | 614 | * | 
|  | 615 | * WARNING: This will not work if hugetlbfs isn't enabled since the | 
|  | 616 | * generic code will redefine that function as 0 in that. This is ok | 
|  | 617 | * for now as we only use slices with hugetlbfs enabled. This should | 
|  | 618 | * be fixed as the generic code gets fixed. | 
|  | 619 | */ | 
|  | 620 | int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, | 
|  | 621 | unsigned long len) | 
|  | 622 | { | 
|  | 623 | struct slice_mask mask, available; | 
|  | 624 |  | 
|  | 625 | mask = slice_range_to_mask(addr, len); | 
|  | 626 | available = slice_mask_for_size(mm, mm->context.user_psize); | 
|  | 627 |  | 
|  | 628 | #if 0 /* too verbose */ | 
|  | 629 | slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", | 
|  | 630 | mm, addr, len); | 
|  | 631 | slice_print_mask(" mask", mask); | 
|  | 632 | slice_print_mask(" available", available); | 
|  | 633 | #endif | 
|  | 634 | return !slice_check_fit(mask, available); | 
|  | 635 | } | 
|  | 636 |  |