| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/s390/mm/mmap.c | 
 | 3 |  * | 
 | 4 |  *  flexible mmap layout support | 
 | 5 |  * | 
 | 6 |  * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. | 
 | 7 |  * All Rights Reserved. | 
 | 8 |  * | 
 | 9 |  * This program is free software; you can redistribute it and/or modify | 
 | 10 |  * it under the terms of the GNU General Public License as published by | 
 | 11 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 12 |  * (at your option) any later version. | 
 | 13 |  * | 
 | 14 |  * This program is distributed in the hope that it will be useful, | 
 | 15 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 16 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 17 |  * GNU General Public License for more details. | 
 | 18 |  * | 
 | 19 |  * You should have received a copy of the GNU General Public License | 
 | 20 |  * along with this program; if not, write to the Free Software | 
 | 21 |  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA | 
 | 22 |  * | 
 | 23 |  * | 
 | 24 |  * Started by Ingo Molnar <mingo@elte.hu> | 
 | 25 |  */ | 
 | 26 |  | 
 | 27 | #include <linux/personality.h> | 
 | 28 | #include <linux/mm.h> | 
 | 29 | #include <linux/module.h> | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 30 | #include <asm/pgalloc.h> | 
| Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 31 | #include <asm/compat.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 |  | 
 | 33 | /* | 
 | 34 |  * Top of mmap area (just below the process stack). | 
 | 35 |  * | 
 | 36 |  * Leave an at least ~128 MB hole. | 
 | 37 |  */ | 
 | 38 | #define MIN_GAP (128*1024*1024) | 
| Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 39 | #define MAX_GAP (STACK_TOP/6*5) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
 | 41 | static inline unsigned long mmap_base(void) | 
 | 42 | { | 
| Jiri Slaby | a58c26b | 2010-01-13 20:44:33 +0100 | [diff] [blame] | 43 | 	unsigned long gap = rlimit(RLIMIT_STACK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  | 
 | 45 | 	if (gap < MIN_GAP) | 
 | 46 | 		gap = MIN_GAP; | 
 | 47 | 	else if (gap > MAX_GAP) | 
 | 48 | 		gap = MAX_GAP; | 
 | 49 |  | 
| Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 50 | 	return STACK_TOP - (gap & PAGE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | } | 
 | 52 |  | 
 | 53 | static inline int mmap_is_legacy(void) | 
 | 54 | { | 
| Martin Schwidefsky | 347a8dc | 2006-01-06 00:19:28 -0800 | [diff] [blame] | 55 | #ifdef CONFIG_64BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | 	/* | 
 | 57 | 	 * Force standard allocation for 64 bit programs. | 
 | 58 | 	 */ | 
| Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 59 | 	if (!is_compat_task()) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | 		return 1; | 
 | 61 | #endif | 
 | 62 | 	return sysctl_legacy_va_layout || | 
 | 63 | 	    (current->personality & ADDR_COMPAT_LAYOUT) || | 
| Jiri Slaby | a58c26b | 2010-01-13 20:44:33 +0100 | [diff] [blame] | 64 | 	    rlimit(RLIMIT_STACK) == RLIM_INFINITY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | } | 
 | 66 |  | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 67 | #ifndef CONFIG_64BIT | 
 | 68 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | /* | 
 | 70 |  * This function, called very early during the creation of a new | 
 | 71 |  * process VM image, sets up which VM layout function to use: | 
 | 72 |  */ | 
 | 73 | void arch_pick_mmap_layout(struct mm_struct *mm) | 
 | 74 | { | 
 | 75 | 	/* | 
 | 76 | 	 * Fall back to the standard layout if the personality | 
 | 77 | 	 * bit is set, or if the expected stack growth is unlimited: | 
 | 78 | 	 */ | 
 | 79 | 	if (mmap_is_legacy()) { | 
 | 80 | 		mm->mmap_base = TASK_UNMAPPED_BASE; | 
 | 81 | 		mm->get_unmapped_area = arch_get_unmapped_area; | 
 | 82 | 		mm->unmap_area = arch_unmap_area; | 
 | 83 | 	} else { | 
 | 84 | 		mm->mmap_base = mmap_base(); | 
 | 85 | 		mm->get_unmapped_area = arch_get_unmapped_area_topdown; | 
 | 86 | 		mm->unmap_area = arch_unmap_area_topdown; | 
 | 87 | 	} | 
 | 88 | } | 
 | 89 | EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); | 
 | 90 |  | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 91 | #else | 
 | 92 |  | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 93 | int s390_mmap_check(unsigned long addr, unsigned long len) | 
 | 94 | { | 
| Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 95 | 	if (!is_compat_task() && | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 96 | 	    len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) | 
 | 97 | 		return crst_table_upgrade(current->mm, 1UL << 53); | 
 | 98 | 	return 0; | 
 | 99 | } | 
 | 100 |  | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 101 | static unsigned long | 
 | 102 | s390_get_unmapped_area(struct file *filp, unsigned long addr, | 
 | 103 | 		unsigned long len, unsigned long pgoff, unsigned long flags) | 
 | 104 | { | 
 | 105 | 	struct mm_struct *mm = current->mm; | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 106 | 	unsigned long area; | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 107 | 	int rc; | 
 | 108 |  | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 109 | 	area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | 
 | 110 | 	if (!(area & ~PAGE_MASK)) | 
 | 111 | 		return area; | 
| Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 112 | 	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 113 | 		/* Upgrade the page table to 4 levels and retry. */ | 
 | 114 | 		rc = crst_table_upgrade(mm, 1UL << 53); | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 115 | 		if (rc) | 
 | 116 | 			return (unsigned long) rc; | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 117 | 		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 118 | 	} | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 119 | 	return area; | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 120 | } | 
 | 121 |  | 
 | 122 | static unsigned long | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 123 | s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 124 | 			  const unsigned long len, const unsigned long pgoff, | 
 | 125 | 			  const unsigned long flags) | 
 | 126 | { | 
 | 127 | 	struct mm_struct *mm = current->mm; | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 128 | 	unsigned long area; | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 129 | 	int rc; | 
 | 130 |  | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 131 | 	area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); | 
 | 132 | 	if (!(area & ~PAGE_MASK)) | 
 | 133 | 		return area; | 
| Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 134 | 	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 135 | 		/* Upgrade the page table to 4 levels and retry. */ | 
 | 136 | 		rc = crst_table_upgrade(mm, 1UL << 53); | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 137 | 		if (rc) | 
 | 138 | 			return (unsigned long) rc; | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 139 | 		area = arch_get_unmapped_area_topdown(filp, addr, len, | 
 | 140 | 						      pgoff, flags); | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 141 | 	} | 
| Martin Schwidefsky | 0fb1d9b | 2009-03-18 13:27:37 +0100 | [diff] [blame] | 142 | 	return area; | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 143 | } | 
 | 144 | /* | 
 | 145 |  * This function, called very early during the creation of a new | 
 | 146 |  * process VM image, sets up which VM layout function to use: | 
 | 147 |  */ | 
 | 148 | void arch_pick_mmap_layout(struct mm_struct *mm) | 
 | 149 | { | 
 | 150 | 	/* | 
 | 151 | 	 * Fall back to the standard layout if the personality | 
 | 152 | 	 * bit is set, or if the expected stack growth is unlimited: | 
 | 153 | 	 */ | 
 | 154 | 	if (mmap_is_legacy()) { | 
 | 155 | 		mm->mmap_base = TASK_UNMAPPED_BASE; | 
 | 156 | 		mm->get_unmapped_area = s390_get_unmapped_area; | 
 | 157 | 		mm->unmap_area = arch_unmap_area; | 
 | 158 | 	} else { | 
 | 159 | 		mm->mmap_base = mmap_base(); | 
 | 160 | 		mm->get_unmapped_area = s390_get_unmapped_area_topdown; | 
 | 161 | 		mm->unmap_area = arch_unmap_area_topdown; | 
 | 162 | 	} | 
 | 163 | } | 
 | 164 | EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); | 
 | 165 |  | 
 | 166 | #endif |