blob: 96e85ac89269d692724e43be759566cfca977ead [file] [log] [blame]
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02001/*
Heiko Carstens239a6422009-06-12 10:26:33 +02002 * Copyright IBM Corp. 2007,2009
Martin Schwidefsky3610cce2007-10-22 12:52:47 +02003 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/gfp.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020010#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020014#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
Martin Schwidefsky80217142010-10-25 16:10:11 +020018#include <linux/rcupdate.h>
Martin Schwidefskye5992f22011-07-24 10:48:20 +020019#include <linux/slab.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020020
21#include <asm/system.h>
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010026#include <asm/mmu_context.h>
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020027
28#ifndef CONFIG_64BIT
29#define ALLOC_ORDER 1
Martin Schwidefsky36409f632011-06-06 14:14:41 +020030#define FRAG_MASK 0x0f
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020031#else
32#define ALLOC_ORDER 2
Martin Schwidefsky36409f632011-06-06 14:14:41 +020033#define FRAG_MASK 0x03
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020034#endif
35
Heiko Carstens239a6422009-06-12 10:26:33 +020036unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
37EXPORT_SYMBOL(VMALLOC_START);
38
39static int __init parse_vmalloc(char *arg)
40{
41 if (!arg)
42 return -EINVAL;
43 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
44 return 0;
45}
46early_param("vmalloc", parse_vmalloc);
47
Martin Schwidefsky043d0702011-05-23 10:24:23 +020048unsigned long *crst_table_alloc(struct mm_struct *mm)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020049{
50 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
51
52 if (!page)
53 return NULL;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020054 return (unsigned long *) page_to_phys(page);
55}
56
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010057void crst_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020058{
Martin Schwidefsky043d0702011-05-23 10:24:23 +020059 free_pages((unsigned long) table, ALLOC_ORDER);
Martin Schwidefsky80217142010-10-25 16:10:11 +020060}
61
Martin Schwidefsky6252d702008-02-09 18:24:37 +010062#ifdef CONFIG_64BIT
63int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
64{
65 unsigned long *table, *pgd;
66 unsigned long entry;
67
68 BUG_ON(limit > (1UL << 53));
69repeat:
Martin Schwidefsky043d0702011-05-23 10:24:23 +020070 table = crst_table_alloc(mm);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010071 if (!table)
72 return -ENOMEM;
Martin Schwidefsky80217142010-10-25 16:10:11 +020073 spin_lock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010074 if (mm->context.asce_limit < limit) {
75 pgd = (unsigned long *) mm->pgd;
76 if (mm->context.asce_limit <= (1UL << 31)) {
77 entry = _REGION3_ENTRY_EMPTY;
78 mm->context.asce_limit = 1UL << 42;
79 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
80 _ASCE_USER_BITS |
81 _ASCE_TYPE_REGION3;
82 } else {
83 entry = _REGION2_ENTRY_EMPTY;
84 mm->context.asce_limit = 1UL << 53;
85 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
86 _ASCE_USER_BITS |
87 _ASCE_TYPE_REGION2;
88 }
89 crst_table_init(table, entry);
90 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
91 mm->pgd = (pgd_t *) table;
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +010092 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +010093 table = NULL;
94 }
Martin Schwidefsky80217142010-10-25 16:10:11 +020095 spin_unlock_bh(&mm->page_table_lock);
Martin Schwidefsky6252d702008-02-09 18:24:37 +010096 if (table)
97 crst_table_free(mm, table);
98 if (mm->context.asce_limit < limit)
99 goto repeat;
100 update_mm(mm, current);
101 return 0;
102}
103
104void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
105{
106 pgd_t *pgd;
107
108 if (mm->context.asce_limit <= limit)
109 return;
110 __tlb_flush_mm(mm);
111 while (mm->context.asce_limit > limit) {
112 pgd = mm->pgd;
113 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
114 case _REGION_ENTRY_TYPE_R2:
115 mm->context.asce_limit = 1UL << 42;
116 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
117 _ASCE_USER_BITS |
118 _ASCE_TYPE_REGION3;
119 break;
120 case _REGION_ENTRY_TYPE_R3:
121 mm->context.asce_limit = 1UL << 31;
122 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
123 _ASCE_USER_BITS |
124 _ASCE_TYPE_SEGMENT;
125 break;
126 default:
127 BUG();
128 }
129 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
Martin Schwidefskyf481bfa2009-03-18 13:27:36 +0100130 mm->task_size = mm->context.asce_limit;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100131 crst_table_free(mm, (unsigned long *) pgd);
132 }
133 update_mm(mm, current);
134}
135#endif
136
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200137#ifdef CONFIG_PGSTE
138
139/**
140 * gmap_alloc - allocate a guest address space
141 * @mm: pointer to the parent mm_struct
142 *
143 * Returns a guest address space structure.
144 */
145struct gmap *gmap_alloc(struct mm_struct *mm)
146{
147 struct gmap *gmap;
148 struct page *page;
149 unsigned long *table;
150
151 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
152 if (!gmap)
153 goto out;
154 INIT_LIST_HEAD(&gmap->crst_list);
155 gmap->mm = mm;
156 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
157 if (!page)
158 goto out_free;
159 list_add(&page->lru, &gmap->crst_list);
160 table = (unsigned long *) page_to_phys(page);
161 crst_table_init(table, _REGION1_ENTRY_EMPTY);
162 gmap->table = table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200163 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164 _ASCE_USER_BITS | __pa(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200165 list_add(&gmap->list, &mm->context.gmap_list);
166 return gmap;
167
168out_free:
169 kfree(gmap);
170out:
171 return NULL;
172}
173EXPORT_SYMBOL_GPL(gmap_alloc);
174
175static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
176{
177 struct gmap_pgtable *mp;
178 struct gmap_rmap *rmap;
179 struct page *page;
180
181 if (*table & _SEGMENT_ENTRY_INV)
182 return 0;
183 page = pfn_to_page(*table >> PAGE_SHIFT);
184 mp = (struct gmap_pgtable *) page->index;
185 list_for_each_entry(rmap, &mp->mapper, list) {
186 if (rmap->entry != table)
187 continue;
188 list_del(&rmap->list);
189 kfree(rmap);
190 break;
191 }
192 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
193 return 1;
194}
195
196static void gmap_flush_tlb(struct gmap *gmap)
197{
198 if (MACHINE_HAS_IDTE)
199 __tlb_flush_idte((unsigned long) gmap->table |
200 _ASCE_TYPE_REGION1);
201 else
202 __tlb_flush_global();
203}
204
205/**
206 * gmap_free - free a guest address space
207 * @gmap: pointer to the guest address space structure
208 */
209void gmap_free(struct gmap *gmap)
210{
211 struct page *page, *next;
212 unsigned long *table;
213 int i;
214
215
216 /* Flush tlb. */
217 if (MACHINE_HAS_IDTE)
218 __tlb_flush_idte((unsigned long) gmap->table |
219 _ASCE_TYPE_REGION1);
220 else
221 __tlb_flush_global();
222
223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100225 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200226 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
227 table = (unsigned long *) page_to_phys(page);
228 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
229 /* Remove gmap rmap structures for segment table. */
230 for (i = 0; i < PTRS_PER_PMD; i++, table++)
231 gmap_unlink_segment(gmap, table);
232 __free_pages(page, ALLOC_ORDER);
233 }
Carsten Ottecc772452011-10-30 15:17:01 +0100234 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200235 up_read(&gmap->mm->mmap_sem);
236 list_del(&gmap->list);
237 kfree(gmap);
238}
239EXPORT_SYMBOL_GPL(gmap_free);
240
241/**
242 * gmap_enable - switch primary space to the guest address space
243 * @gmap: pointer to the guest address space structure
244 */
245void gmap_enable(struct gmap *gmap)
246{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200247 S390_lowcore.gmap = (unsigned long) gmap;
248}
249EXPORT_SYMBOL_GPL(gmap_enable);
250
251/**
252 * gmap_disable - switch back to the standard primary address space
253 * @gmap: pointer to the guest address space structure
254 */
255void gmap_disable(struct gmap *gmap)
256{
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200257 S390_lowcore.gmap = 0UL;
258}
259EXPORT_SYMBOL_GPL(gmap_disable);
260
Carsten Ottea9162f22011-10-30 15:17:00 +0100261/*
262 * gmap_alloc_table is assumed to be called with mmap_sem held
263 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200264static int gmap_alloc_table(struct gmap *gmap,
265 unsigned long *table, unsigned long init)
266{
267 struct page *page;
268 unsigned long *new;
269
270 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
271 if (!page)
272 return -ENOMEM;
273 new = (unsigned long *) page_to_phys(page);
274 crst_table_init(new, init);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200275 if (*table & _REGION_ENTRY_INV) {
276 list_add(&page->lru, &gmap->crst_list);
277 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
278 (*table & _REGION_ENTRY_TYPE_MASK);
279 } else
280 __free_pages(page, ALLOC_ORDER);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200281 return 0;
282}
283
284/**
285 * gmap_unmap_segment - unmap segment from the guest address space
286 * @gmap: pointer to the guest address space structure
287 * @addr: address in the guest address space
288 * @len: length of the memory area to unmap
289 *
290 * Returns 0 if the unmap succeded, -EINVAL if not.
291 */
292int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
293{
294 unsigned long *table;
295 unsigned long off;
296 int flush;
297
298 if ((to | len) & (PMD_SIZE - 1))
299 return -EINVAL;
300 if (len == 0 || to + len < to)
301 return -EINVAL;
302
303 flush = 0;
304 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100305 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200306 for (off = 0; off < len; off += PMD_SIZE) {
307 /* Walk the guest addr space page table */
308 table = gmap->table + (((to + off) >> 53) & 0x7ff);
309 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200310 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200311 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
312 table = table + (((to + off) >> 42) & 0x7ff);
313 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200314 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200315 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
316 table = table + (((to + off) >> 31) & 0x7ff);
317 if (*table & _REGION_ENTRY_INV)
Carsten Otte05873df2011-09-26 16:40:34 +0200318 goto out;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200319 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
320 table = table + (((to + off) >> 20) & 0x7ff);
321
322 /* Clear segment table entry in guest address space. */
323 flush |= gmap_unlink_segment(gmap, table);
324 *table = _SEGMENT_ENTRY_INV;
325 }
Carsten Otte05873df2011-09-26 16:40:34 +0200326out:
Carsten Ottecc772452011-10-30 15:17:01 +0100327 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200328 up_read(&gmap->mm->mmap_sem);
329 if (flush)
330 gmap_flush_tlb(gmap);
331 return 0;
332}
333EXPORT_SYMBOL_GPL(gmap_unmap_segment);
334
335/**
336 * gmap_mmap_segment - map a segment to the guest address space
337 * @gmap: pointer to the guest address space structure
338 * @from: source address in the parent address space
339 * @to: target address in the guest address space
340 *
341 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
342 */
343int gmap_map_segment(struct gmap *gmap, unsigned long from,
344 unsigned long to, unsigned long len)
345{
346 unsigned long *table;
347 unsigned long off;
348 int flush;
349
350 if ((from | to | len) & (PMD_SIZE - 1))
351 return -EINVAL;
352 if (len == 0 || from + len > PGDIR_SIZE ||
353 from + len < from || to + len < to)
354 return -EINVAL;
355
356 flush = 0;
357 down_read(&gmap->mm->mmap_sem);
Carsten Ottecc772452011-10-30 15:17:01 +0100358 spin_lock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200359 for (off = 0; off < len; off += PMD_SIZE) {
360 /* Walk the gmap address space page table */
361 table = gmap->table + (((to + off) >> 53) & 0x7ff);
362 if ((*table & _REGION_ENTRY_INV) &&
363 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
364 goto out_unmap;
365 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
366 table = table + (((to + off) >> 42) & 0x7ff);
367 if ((*table & _REGION_ENTRY_INV) &&
368 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
369 goto out_unmap;
370 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
371 table = table + (((to + off) >> 31) & 0x7ff);
372 if ((*table & _REGION_ENTRY_INV) &&
373 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
374 goto out_unmap;
375 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
376 table = table + (((to + off) >> 20) & 0x7ff);
377
378 /* Store 'from' address in an invalid segment table entry. */
379 flush |= gmap_unlink_segment(gmap, table);
380 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
381 }
Carsten Ottecc772452011-10-30 15:17:01 +0100382 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200383 up_read(&gmap->mm->mmap_sem);
384 if (flush)
385 gmap_flush_tlb(gmap);
386 return 0;
387
388out_unmap:
Carsten Ottecc772452011-10-30 15:17:01 +0100389 spin_unlock(&gmap->mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200390 up_read(&gmap->mm->mmap_sem);
391 gmap_unmap_segment(gmap, to, len);
392 return -ENOMEM;
393}
394EXPORT_SYMBOL_GPL(gmap_map_segment);
395
396unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
397{
398 unsigned long *table, vmaddr, segment;
399 struct mm_struct *mm;
400 struct gmap_pgtable *mp;
401 struct gmap_rmap *rmap;
402 struct vm_area_struct *vma;
403 struct page *page;
404 pgd_t *pgd;
405 pud_t *pud;
406 pmd_t *pmd;
407
408 current->thread.gmap_addr = address;
409 mm = gmap->mm;
410 /* Walk the gmap address space page table */
411 table = gmap->table + ((address >> 53) & 0x7ff);
412 if (unlikely(*table & _REGION_ENTRY_INV))
413 return -EFAULT;
414 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
415 table = table + ((address >> 42) & 0x7ff);
416 if (unlikely(*table & _REGION_ENTRY_INV))
417 return -EFAULT;
418 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
419 table = table + ((address >> 31) & 0x7ff);
420 if (unlikely(*table & _REGION_ENTRY_INV))
421 return -EFAULT;
422 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
423 table = table + ((address >> 20) & 0x7ff);
424
425 /* Convert the gmap address to an mm address. */
426 segment = *table;
427 if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
428 page = pfn_to_page(segment >> PAGE_SHIFT);
429 mp = (struct gmap_pgtable *) page->index;
430 return mp->vmaddr | (address & ~PMD_MASK);
431 } else if (segment & _SEGMENT_ENTRY_RO) {
432 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
433 vma = find_vma(mm, vmaddr);
434 if (!vma || vma->vm_start > vmaddr)
435 return -EFAULT;
436
437 /* Walk the parent mm page table */
438 pgd = pgd_offset(mm, vmaddr);
439 pud = pud_alloc(mm, pgd, vmaddr);
440 if (!pud)
441 return -ENOMEM;
442 pmd = pmd_alloc(mm, pud, vmaddr);
443 if (!pmd)
444 return -ENOMEM;
445 if (!pmd_present(*pmd) &&
446 __pte_alloc(mm, vma, pmd, vmaddr))
447 return -ENOMEM;
448 /* pmd now points to a valid segment table entry. */
449 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
450 if (!rmap)
451 return -ENOMEM;
452 /* Link gmap segment table entry location to page table. */
453 page = pmd_page(*pmd);
454 mp = (struct gmap_pgtable *) page->index;
455 rmap->entry = table;
Carsten Ottecc772452011-10-30 15:17:01 +0100456 spin_lock(&mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200457 list_add(&rmap->list, &mp->mapper);
Carsten Ottecc772452011-10-30 15:17:01 +0100458 spin_unlock(&mm->page_table_lock);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200459 /* Set gmap segment table entry to page table. */
460 *table = pmd_val(*pmd) & PAGE_MASK;
461 return vmaddr | (address & ~PMD_MASK);
462 }
463 return -EFAULT;
464
465}
466EXPORT_SYMBOL_GPL(gmap_fault);
467
468void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
469{
470 struct gmap_rmap *rmap, *next;
471 struct gmap_pgtable *mp;
472 struct page *page;
473 int flush;
474
475 flush = 0;
476 spin_lock(&mm->page_table_lock);
477 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
478 mp = (struct gmap_pgtable *) page->index;
479 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
480 *rmap->entry =
481 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
482 list_del(&rmap->list);
483 kfree(rmap);
484 flush = 1;
485 }
486 spin_unlock(&mm->page_table_lock);
487 if (flush)
488 __tlb_flush_global();
489}
490
491static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
492 unsigned long vmaddr)
493{
494 struct page *page;
495 unsigned long *table;
496 struct gmap_pgtable *mp;
497
498 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
499 if (!page)
500 return NULL;
501 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
502 if (!mp) {
503 __free_page(page);
504 return NULL;
505 }
506 pgtable_page_ctor(page);
507 mp->vmaddr = vmaddr & PMD_MASK;
508 INIT_LIST_HEAD(&mp->mapper);
509 page->index = (unsigned long) mp;
510 atomic_set(&page->_mapcount, 3);
511 table = (unsigned long *) page_to_phys(page);
512 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
513 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
514 return table;
515}
516
517static inline void page_table_free_pgste(unsigned long *table)
518{
519 struct page *page;
520 struct gmap_pgtable *mp;
521
522 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
523 mp = (struct gmap_pgtable *) page->index;
524 BUG_ON(!list_empty(&mp->mapper));
525 pgtable_page_ctor(page);
526 atomic_set(&page->_mapcount, -1);
527 kfree(mp);
528 __free_page(page);
529}
530
531#else /* CONFIG_PGSTE */
532
533static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
534 unsigned long vmaddr)
535{
Jan Glauber944291d2011-08-03 16:44:18 +0200536 return NULL;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200537}
538
539static inline void page_table_free_pgste(unsigned long *table)
540{
541}
542
543static inline void gmap_unmap_notifier(struct mm_struct *mm,
544 unsigned long *table)
545{
546}
547
548#endif /* CONFIG_PGSTE */
549
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200550static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
551{
552 unsigned int old, new;
553
554 do {
555 old = atomic_read(v);
556 new = old ^ bits;
557 } while (atomic_cmpxchg(v, old, new) != old);
558 return new;
559}
560
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200561/*
562 * page table entry allocation/free routines.
563 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200564unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200565{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100566 struct page *page;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200567 unsigned long *table;
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200568 unsigned int mask, bit;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200569
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200570 if (mm_has_pgste(mm))
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200571 return page_table_alloc_pgste(mm, vmaddr);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200572 /* Allocate fragments of a 4K page as 1K/2K page table */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200573 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200574 mask = FRAG_MASK;
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100575 if (!list_empty(&mm->context.pgtable_list)) {
576 page = list_first_entry(&mm->context.pgtable_list,
577 struct page, lru);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200578 table = (unsigned long *) page_to_phys(page);
579 mask = atomic_read(&page->_mapcount);
580 mask = mask | (mask >> 4);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200581 }
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200582 if ((mask & FRAG_MASK) == FRAG_MASK) {
Martin Schwidefsky80217142010-10-25 16:10:11 +0200583 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100584 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
585 if (!page)
586 return NULL;
587 pgtable_page_ctor(page);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200588 atomic_set(&page->_mapcount, 1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100589 table = (unsigned long *) page_to_phys(page);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200590 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200591 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100592 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200593 } else {
594 for (bit = 1; mask & bit; bit <<= 1)
595 table += PTRS_PER_PTE;
596 mask = atomic_xor_bits(&page->_mapcount, bit);
597 if ((mask & FRAG_MASK) == FRAG_MASK)
598 list_del(&page->lru);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100599 }
Martin Schwidefsky80217142010-10-25 16:10:11 +0200600 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200601 return table;
602}
603
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100604void page_table_free(struct mm_struct *mm, unsigned long *table)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200605{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100606 struct page *page;
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200607 unsigned int bit, mask;
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200608
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200609 if (mm_has_pgste(mm)) {
610 gmap_unmap_notifier(mm, table);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200611 return page_table_free_pgste(table);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200612 }
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200613 /* Free 1K/2K page table fragment of a 4K page */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100614 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200615 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200616 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200617 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100618 list_del(&page->lru);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200619 mask = atomic_xor_bits(&page->_mapcount, bit);
620 if (mask & FRAG_MASK)
621 list_add(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200622 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200623 if (mask == 0) {
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100624 pgtable_page_dtor(page);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200625 atomic_set(&page->_mapcount, -1);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100626 __free_page(page);
627 }
628}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200629
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200630#ifdef CONFIG_HAVE_RCU_TABLE_FREE
Martin Schwidefsky80217142010-10-25 16:10:11 +0200631
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200632static void __page_table_free_rcu(void *table, unsigned bit)
633{
634 struct page *page;
635
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200636 if (bit == FRAG_MASK)
637 return page_table_free_pgste(table);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200638 /* Free 1K/2K page table fragment of a 4K page */
639 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
640 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
641 pgtable_page_dtor(page);
642 atomic_set(&page->_mapcount, -1);
643 __free_page(page);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200644 }
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200645}
646
647void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
648{
649 struct mm_struct *mm;
650 struct page *page;
651 unsigned int bit, mask;
652
653 mm = tlb->mm;
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200654 if (mm_has_pgste(mm)) {
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200655 gmap_unmap_notifier(mm, table);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200656 table = (unsigned long *) (__pa(table) | FRAG_MASK);
657 tlb_remove_table(tlb, table);
658 return;
Martin Schwidefsky80217142010-10-25 16:10:11 +0200659 }
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200660 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200661 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
662 spin_lock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200663 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
664 list_del(&page->lru);
665 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
666 if (mask & FRAG_MASK)
667 list_add_tail(&page->lru, &mm->context.pgtable_list);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200668 spin_unlock_bh(&mm->context.list_lock);
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200669 table = (unsigned long *) (__pa(table) | (bit << 4));
670 tlb_remove_table(tlb, table);
Martin Schwidefsky80217142010-10-25 16:10:11 +0200671}
672
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200673void __tlb_remove_table(void *_table)
674{
Martin Schwidefskye73b7ff2011-10-30 15:16:08 +0100675 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
676 void *table = (void *)((unsigned long) _table & ~mask);
677 unsigned type = (unsigned long) _table & mask;
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200678
679 if (type)
680 __page_table_free_rcu(table, type);
681 else
682 free_pages((unsigned long) table, ALLOC_ORDER);
683}
684
685#endif
686
Carsten Otte402b0862008-03-25 18:47:10 +0100687/*
688 * switch on pgstes for its userspace process (for kvm)
689 */
690int s390_enable_sie(void)
691{
692 struct task_struct *tsk = current;
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200693 struct mm_struct *mm, *old_mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100694
Carsten Otte702d9e52009-03-26 15:23:57 +0100695 /* Do we have switched amode? If no, we cannot do sie */
Martin Schwidefskyb11b5332009-12-07 12:51:43 +0100696 if (user_mode == HOME_SPACE_MODE)
Carsten Otte702d9e52009-03-26 15:23:57 +0100697 return -EINVAL;
698
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200699 /* Do we have pgstes? if yes, we are done */
Martin Schwidefsky36409f632011-06-06 14:14:41 +0200700 if (mm_has_pgste(tsk->mm))
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200701 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100702
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200703 /* lets check if we are allowed to replace the mm */
704 task_lock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100705 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200706#ifdef CONFIG_AIO
707 !hlist_empty(&tsk->mm->ioctx_list) ||
708#endif
709 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200710 task_unlock(tsk);
711 return -EINVAL;
712 }
713 task_unlock(tsk);
Carsten Otte402b0862008-03-25 18:47:10 +0100714
Christian Borntraeger250cf772008-10-28 11:10:15 +0100715 /* we copy the mm and let dup_mm create the page tables with_pgstes */
716 tsk->mm->context.alloc_pgste = 1;
Carsten Otte402b0862008-03-25 18:47:10 +0100717 mm = dup_mm(tsk);
Christian Borntraeger250cf772008-10-28 11:10:15 +0100718 tsk->mm->context.alloc_pgste = 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100719 if (!mm)
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200720 return -ENOMEM;
721
Christian Borntraeger250cf772008-10-28 11:10:15 +0100722 /* Now lets check again if something happened */
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200723 task_lock(tsk);
724 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
Martin Schwidefsky52a21f22009-10-06 10:33:55 +0200725#ifdef CONFIG_AIO
726 !hlist_empty(&tsk->mm->ioctx_list) ||
727#endif
728 tsk->mm != tsk->active_mm) {
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200729 mmput(mm);
730 task_unlock(tsk);
731 return -EINVAL;
732 }
733
734 /* ok, we are alone. No ptrace, no threads, etc. */
735 old_mm = tsk->mm;
Carsten Otte402b0862008-03-25 18:47:10 +0100736 tsk->mm = tsk->active_mm = mm;
737 preempt_disable();
738 update_mm(mm, tsk);
Christian Borntraegere05ef9b2010-10-25 16:10:45 +0200739 atomic_inc(&mm->context.attach_count);
740 atomic_dec(&old_mm->context.attach_count);
Rusty Russell005f8ee2009-03-26 15:25:01 +0100741 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Carsten Otte402b0862008-03-25 18:47:10 +0100742 preempt_enable();
Carsten Otte402b0862008-03-25 18:47:10 +0100743 task_unlock(tsk);
Christian Borntraeger74b6b522008-05-21 13:37:29 +0200744 mmput(old_mm);
745 return 0;
Carsten Otte402b0862008-03-25 18:47:10 +0100746}
747EXPORT_SYMBOL_GPL(s390_enable_sie);
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200748
Heiko Carstens87458ff2009-09-22 22:58:46 +0200749#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200750bool kernel_page_present(struct page *page)
751{
752 unsigned long addr;
753 int cc;
754
755 addr = page_to_phys(page);
Heiko Carstens87458ff2009-09-22 22:58:46 +0200756 asm volatile(
757 " lra %1,0(%1)\n"
758 " ipm %0\n"
759 " srl %0,28"
760 : "=d" (cc), "+a" (addr) : : "cc");
Hans-Joachim Picht7db11a32009-06-16 10:30:26 +0200761 return cc == 0;
762}
Heiko Carstens87458ff2009-09-22 22:58:46 +0200763#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */