| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 1 | /* | 
| Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 2 |  *    Copyright IBM Corp. 2007,2009 | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 3 |  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | 
 | 4 |  */ | 
 | 5 |  | 
 | 6 | #include <linux/sched.h> | 
 | 7 | #include <linux/kernel.h> | 
 | 8 | #include <linux/errno.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/gfp.h> | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 10 | #include <linux/mm.h> | 
 | 11 | #include <linux/swap.h> | 
 | 12 | #include <linux/smp.h> | 
 | 13 | #include <linux/highmem.h> | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 14 | #include <linux/pagemap.h> | 
 | 15 | #include <linux/spinlock.h> | 
 | 16 | #include <linux/module.h> | 
 | 17 | #include <linux/quicklist.h> | 
 | 18 |  | 
 | 19 | #include <asm/system.h> | 
 | 20 | #include <asm/pgtable.h> | 
 | 21 | #include <asm/pgalloc.h> | 
 | 22 | #include <asm/tlb.h> | 
 | 23 | #include <asm/tlbflush.h> | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 24 | #include <asm/mmu_context.h> | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 25 |  | 
 | 26 | #ifndef CONFIG_64BIT | 
 | 27 | #define ALLOC_ORDER	1 | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 28 | #define TABLES_PER_PAGE	4 | 
 | 29 | #define FRAG_MASK	15UL | 
 | 30 | #define SECOND_HALVES	10UL | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 31 |  | 
 | 32 | void clear_table_pgstes(unsigned long *table) | 
 | 33 | { | 
 | 34 | 	clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); | 
 | 35 | 	memset(table + 256, 0, PAGE_SIZE/4); | 
 | 36 | 	clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); | 
 | 37 | 	memset(table + 768, 0, PAGE_SIZE/4); | 
 | 38 | } | 
 | 39 |  | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 40 | #else | 
 | 41 | #define ALLOC_ORDER	2 | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 42 | #define TABLES_PER_PAGE	2 | 
 | 43 | #define FRAG_MASK	3UL | 
 | 44 | #define SECOND_HALVES	2UL | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 45 |  | 
 | 46 | void clear_table_pgstes(unsigned long *table) | 
 | 47 | { | 
 | 48 | 	clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); | 
 | 49 | 	memset(table + 256, 0, PAGE_SIZE/2); | 
 | 50 | } | 
 | 51 |  | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 52 | #endif | 
 | 53 |  | 
| Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 54 | unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; | 
 | 55 | EXPORT_SYMBOL(VMALLOC_START); | 
 | 56 |  | 
 | 57 | static int __init parse_vmalloc(char *arg) | 
 | 58 | { | 
 | 59 | 	if (!arg) | 
 | 60 | 		return -EINVAL; | 
 | 61 | 	VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; | 
 | 62 | 	return 0; | 
 | 63 | } | 
 | 64 | early_param("vmalloc", parse_vmalloc); | 
 | 65 |  | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 66 | unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) | 
 | 67 | { | 
 | 68 | 	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | 
 | 69 |  | 
 | 70 | 	if (!page) | 
 | 71 | 		return NULL; | 
 | 72 | 	page->index = 0; | 
 | 73 | 	if (noexec) { | 
 | 74 | 		struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER); | 
 | 75 | 		if (!shadow) { | 
 | 76 | 			__free_pages(page, ALLOC_ORDER); | 
 | 77 | 			return NULL; | 
 | 78 | 		} | 
 | 79 | 		page->index = page_to_phys(shadow); | 
 | 80 | 	} | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 81 | 	spin_lock(&mm->context.list_lock); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 82 | 	list_add(&page->lru, &mm->context.crst_list); | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 83 | 	spin_unlock(&mm->context.list_lock); | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 84 | 	return (unsigned long *) page_to_phys(page); | 
 | 85 | } | 
 | 86 |  | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 87 | void crst_table_free(struct mm_struct *mm, unsigned long *table) | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 88 | { | 
 | 89 | 	unsigned long *shadow = get_shadow_table(table); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 90 | 	struct page *page = virt_to_page(table); | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 91 |  | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 92 | 	spin_lock(&mm->context.list_lock); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 93 | 	list_del(&page->lru); | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 94 | 	spin_unlock(&mm->context.list_lock); | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 95 | 	if (shadow) | 
 | 96 | 		free_pages((unsigned long) shadow, ALLOC_ORDER); | 
 | 97 | 	free_pages((unsigned long) table, ALLOC_ORDER); | 
 | 98 | } | 
 | 99 |  | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 100 | #ifdef CONFIG_64BIT | 
 | 101 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) | 
 | 102 | { | 
 | 103 | 	unsigned long *table, *pgd; | 
 | 104 | 	unsigned long entry; | 
 | 105 |  | 
 | 106 | 	BUG_ON(limit > (1UL << 53)); | 
 | 107 | repeat: | 
 | 108 | 	table = crst_table_alloc(mm, mm->context.noexec); | 
 | 109 | 	if (!table) | 
 | 110 | 		return -ENOMEM; | 
 | 111 | 	spin_lock(&mm->page_table_lock); | 
 | 112 | 	if (mm->context.asce_limit < limit) { | 
 | 113 | 		pgd = (unsigned long *) mm->pgd; | 
 | 114 | 		if (mm->context.asce_limit <= (1UL << 31)) { | 
 | 115 | 			entry = _REGION3_ENTRY_EMPTY; | 
 | 116 | 			mm->context.asce_limit = 1UL << 42; | 
 | 117 | 			mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 
 | 118 | 						_ASCE_USER_BITS | | 
 | 119 | 						_ASCE_TYPE_REGION3; | 
 | 120 | 		} else { | 
 | 121 | 			entry = _REGION2_ENTRY_EMPTY; | 
 | 122 | 			mm->context.asce_limit = 1UL << 53; | 
 | 123 | 			mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 
 | 124 | 						_ASCE_USER_BITS | | 
 | 125 | 						_ASCE_TYPE_REGION2; | 
 | 126 | 		} | 
 | 127 | 		crst_table_init(table, entry); | 
 | 128 | 		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | 
 | 129 | 		mm->pgd = (pgd_t *) table; | 
| Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 130 | 		mm->task_size = mm->context.asce_limit; | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 131 | 		table = NULL; | 
 | 132 | 	} | 
 | 133 | 	spin_unlock(&mm->page_table_lock); | 
 | 134 | 	if (table) | 
 | 135 | 		crst_table_free(mm, table); | 
 | 136 | 	if (mm->context.asce_limit < limit) | 
 | 137 | 		goto repeat; | 
 | 138 | 	update_mm(mm, current); | 
 | 139 | 	return 0; | 
 | 140 | } | 
 | 141 |  | 
 | 142 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | 
 | 143 | { | 
 | 144 | 	pgd_t *pgd; | 
 | 145 |  | 
 | 146 | 	if (mm->context.asce_limit <= limit) | 
 | 147 | 		return; | 
 | 148 | 	__tlb_flush_mm(mm); | 
 | 149 | 	while (mm->context.asce_limit > limit) { | 
 | 150 | 		pgd = mm->pgd; | 
 | 151 | 		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { | 
 | 152 | 		case _REGION_ENTRY_TYPE_R2: | 
 | 153 | 			mm->context.asce_limit = 1UL << 42; | 
 | 154 | 			mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 
 | 155 | 						_ASCE_USER_BITS | | 
 | 156 | 						_ASCE_TYPE_REGION3; | 
 | 157 | 			break; | 
 | 158 | 		case _REGION_ENTRY_TYPE_R3: | 
 | 159 | 			mm->context.asce_limit = 1UL << 31; | 
 | 160 | 			mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 
 | 161 | 						_ASCE_USER_BITS | | 
 | 162 | 						_ASCE_TYPE_SEGMENT; | 
 | 163 | 			break; | 
 | 164 | 		default: | 
 | 165 | 			BUG(); | 
 | 166 | 		} | 
 | 167 | 		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | 
| Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 168 | 		mm->task_size = mm->context.asce_limit; | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 169 | 		crst_table_free(mm, (unsigned long *) pgd); | 
 | 170 | 	} | 
 | 171 | 	update_mm(mm, current); | 
 | 172 | } | 
 | 173 | #endif | 
 | 174 |  | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 175 | /* | 
 | 176 |  * page table entry allocation/free routines. | 
 | 177 |  */ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 178 | unsigned long *page_table_alloc(struct mm_struct *mm) | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 179 | { | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 180 | 	struct page *page; | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 181 | 	unsigned long *table; | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 182 | 	unsigned long bits; | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 183 |  | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 184 | 	bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 185 | 	spin_lock(&mm->context.list_lock); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 186 | 	page = NULL; | 
 | 187 | 	if (!list_empty(&mm->context.pgtable_list)) { | 
 | 188 | 		page = list_first_entry(&mm->context.pgtable_list, | 
 | 189 | 					struct page, lru); | 
 | 190 | 		if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | 
 | 191 | 			page = NULL; | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 192 | 	} | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 193 | 	if (!page) { | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 194 | 		spin_unlock(&mm->context.list_lock); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 195 | 		page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | 
 | 196 | 		if (!page) | 
 | 197 | 			return NULL; | 
 | 198 | 		pgtable_page_ctor(page); | 
 | 199 | 		page->flags &= ~FRAG_MASK; | 
 | 200 | 		table = (unsigned long *) page_to_phys(page); | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 201 | 		if (mm->context.has_pgste) | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 202 | 			clear_table_pgstes(table); | 
 | 203 | 		else | 
 | 204 | 			clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 205 | 		spin_lock(&mm->context.list_lock); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 206 | 		list_add(&page->lru, &mm->context.pgtable_list); | 
 | 207 | 	} | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 208 | 	table = (unsigned long *) page_to_phys(page); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 209 | 	while (page->flags & bits) { | 
 | 210 | 		table += 256; | 
 | 211 | 		bits <<= 1; | 
 | 212 | 	} | 
 | 213 | 	page->flags |= bits; | 
 | 214 | 	if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) | 
 | 215 | 		list_move_tail(&page->lru, &mm->context.pgtable_list); | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 216 | 	spin_unlock(&mm->context.list_lock); | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 217 | 	return table; | 
 | 218 | } | 
 | 219 |  | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 220 | void page_table_free(struct mm_struct *mm, unsigned long *table) | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 221 | { | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 222 | 	struct page *page; | 
 | 223 | 	unsigned long bits; | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 224 |  | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 225 | 	bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 226 | 	bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); | 
 | 227 | 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 228 | 	spin_lock(&mm->context.list_lock); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 229 | 	page->flags ^= bits; | 
 | 230 | 	if (page->flags & FRAG_MASK) { | 
 | 231 | 		/* Page now has some free pgtable fragments. */ | 
 | 232 | 		list_move(&page->lru, &mm->context.pgtable_list); | 
 | 233 | 		page = NULL; | 
 | 234 | 	} else | 
 | 235 | 		/* All fragments of the 4K page have been freed. */ | 
 | 236 | 		list_del(&page->lru); | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 237 | 	spin_unlock(&mm->context.list_lock); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 238 | 	if (page) { | 
 | 239 | 		pgtable_page_dtor(page); | 
 | 240 | 		__free_page(page); | 
 | 241 | 	} | 
 | 242 | } | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 243 |  | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 244 | void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) | 
 | 245 | { | 
 | 246 | 	struct page *page; | 
 | 247 |  | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 248 | 	spin_lock(&mm->context.list_lock); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 249 | 	/* Free shadow region and segment tables. */ | 
 | 250 | 	list_for_each_entry(page, &mm->context.crst_list, lru) | 
 | 251 | 		if (page->index) { | 
 | 252 | 			free_pages((unsigned long) page->index, ALLOC_ORDER); | 
 | 253 | 			page->index = 0; | 
 | 254 | 		} | 
 | 255 | 	/* "Free" second halves of page tables. */ | 
 | 256 | 	list_for_each_entry(page, &mm->context.pgtable_list, lru) | 
 | 257 | 		page->flags &= ~SECOND_HALVES; | 
| Martin Schwidefsky | 50aa98b | 2009-09-11 10:28:57 +0200 | [diff] [blame] | 258 | 	spin_unlock(&mm->context.list_lock); | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 259 | 	mm->context.noexec = 0; | 
 | 260 | 	update_mm(mm, tsk); | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 261 | } | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 262 |  | 
 | 263 | /* | 
 | 264 |  * switch on pgstes for its userspace process (for kvm) | 
 | 265 |  */ | 
 | 266 | int s390_enable_sie(void) | 
 | 267 | { | 
 | 268 | 	struct task_struct *tsk = current; | 
| Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 269 | 	struct mm_struct *mm, *old_mm; | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 270 |  | 
| Carsten Otte | 702d9e5 | 2009-03-26 15:23:57 +0100 | [diff] [blame] | 271 | 	/* Do we have switched amode? If no, we cannot do sie */ | 
| Martin Schwidefsky | b11b533 | 2009-12-07 12:51:43 +0100 | [diff] [blame] | 272 | 	if (user_mode == HOME_SPACE_MODE) | 
| Carsten Otte | 702d9e5 | 2009-03-26 15:23:57 +0100 | [diff] [blame] | 273 | 		return -EINVAL; | 
 | 274 |  | 
| Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 275 | 	/* Do we have pgstes? if yes, we are done */ | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 276 | 	if (tsk->mm->context.has_pgste) | 
| Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 277 | 		return 0; | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 278 |  | 
| Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 279 | 	/* lets check if we are allowed to replace the mm */ | 
 | 280 | 	task_lock(tsk); | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 281 | 	if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | 
| Martin Schwidefsky | 52a21f2 | 2009-10-06 10:33:55 +0200 | [diff] [blame] | 282 | #ifdef CONFIG_AIO | 
 | 283 | 	    !hlist_empty(&tsk->mm->ioctx_list) || | 
 | 284 | #endif | 
 | 285 | 	    tsk->mm != tsk->active_mm) { | 
| Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 286 | 		task_unlock(tsk); | 
 | 287 | 		return -EINVAL; | 
 | 288 | 	} | 
 | 289 | 	task_unlock(tsk); | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 290 |  | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 291 | 	/* we copy the mm and let dup_mm create the page tables with_pgstes */ | 
 | 292 | 	tsk->mm->context.alloc_pgste = 1; | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 293 | 	mm = dup_mm(tsk); | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 294 | 	tsk->mm->context.alloc_pgste = 0; | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 295 | 	if (!mm) | 
| Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 296 | 		return -ENOMEM; | 
 | 297 |  | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 298 | 	/* Now lets check again if something happened */ | 
| Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 299 | 	task_lock(tsk); | 
 | 300 | 	if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || | 
| Martin Schwidefsky | 52a21f2 | 2009-10-06 10:33:55 +0200 | [diff] [blame] | 301 | #ifdef CONFIG_AIO | 
 | 302 | 	    !hlist_empty(&tsk->mm->ioctx_list) || | 
 | 303 | #endif | 
 | 304 | 	    tsk->mm != tsk->active_mm) { | 
| Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 305 | 		mmput(mm); | 
 | 306 | 		task_unlock(tsk); | 
 | 307 | 		return -EINVAL; | 
 | 308 | 	} | 
 | 309 |  | 
 | 310 | 	/* ok, we are alone. No ptrace, no threads, etc. */ | 
 | 311 | 	old_mm = tsk->mm; | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 312 | 	tsk->mm = tsk->active_mm = mm; | 
 | 313 | 	preempt_disable(); | 
 | 314 | 	update_mm(mm, tsk); | 
| Rusty Russell | 005f8ee | 2009-03-26 15:25:01 +0100 | [diff] [blame] | 315 | 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 316 | 	preempt_enable(); | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 317 | 	task_unlock(tsk); | 
| Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 318 | 	mmput(old_mm); | 
 | 319 | 	return 0; | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 320 | } | 
 | 321 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 
| Hans-Joachim Picht | 7db11a3 | 2009-06-16 10:30:26 +0200 | [diff] [blame] | 322 |  | 
| Heiko Carstens | 87458ff | 2009-09-22 22:58:46 +0200 | [diff] [blame] | 323 | #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION) | 
| Hans-Joachim Picht | 7db11a3 | 2009-06-16 10:30:26 +0200 | [diff] [blame] | 324 | bool kernel_page_present(struct page *page) | 
 | 325 | { | 
 | 326 | 	unsigned long addr; | 
 | 327 | 	int cc; | 
 | 328 |  | 
 | 329 | 	addr = page_to_phys(page); | 
| Heiko Carstens | 87458ff | 2009-09-22 22:58:46 +0200 | [diff] [blame] | 330 | 	asm volatile( | 
 | 331 | 		"	lra	%1,0(%1)\n" | 
 | 332 | 		"	ipm	%0\n" | 
 | 333 | 		"	srl	%0,28" | 
 | 334 | 		: "=d" (cc), "+a" (addr) : : "cc"); | 
| Hans-Joachim Picht | 7db11a3 | 2009-06-16 10:30:26 +0200 | [diff] [blame] | 335 | 	return cc == 0; | 
 | 336 | } | 
| Heiko Carstens | 87458ff | 2009-09-22 22:58:46 +0200 | [diff] [blame] | 337 | #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ |