blob: 24ead559c7bb829d4c5f5d578d1b280386b90dd5 [file] [log] [blame]
Gerald Schaefer59f35d52006-12-04 15:40:45 +01001/*
2 * arch/s390/lib/uaccess_pt.c
3 *
4 * User access functions based on page table walks.
5 *
6 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
8 */
9
10#include <linux/errno.h>
Heiko Carstensd8ad0752007-01-09 10:18:50 +010011#include <linux/hardirq.h>
Gerald Schaefer59f35d52006-12-04 15:40:45 +010012#include <linux/mm.h>
Heiko Carstens22155912006-12-08 15:53:49 +010013#include <asm/uaccess.h>
Gerald Schaefer59f35d52006-12-04 15:40:45 +010014#include <asm/futex.h>
Heiko Carstens2b67fc42007-02-05 21:16:47 +010015#include "uaccess.h"
Gerald Schaefer59f35d52006-12-04 15:40:45 +010016
17static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
18 int write_access)
19{
20 struct vm_area_struct *vma;
21 int ret = -EFAULT;
22
Heiko Carstensd8ad0752007-01-09 10:18:50 +010023 if (in_atomic())
24 return ret;
Gerald Schaefer59f35d52006-12-04 15:40:45 +010025 down_read(&mm->mmap_sem);
26 vma = find_vma(mm, address);
27 if (unlikely(!vma))
28 goto out;
29 if (unlikely(vma->vm_start > address)) {
30 if (!(vma->vm_flags & VM_GROWSDOWN))
31 goto out;
32 if (expand_stack(vma, address))
33 goto out;
34 }
35
36 if (!write_access) {
37 /* page not present, check vm flags */
38 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
39 goto out;
40 } else {
41 if (!(vma->vm_flags & VM_WRITE))
42 goto out;
43 }
44
45survive:
46 switch (handle_mm_fault(mm, vma, address, write_access)) {
47 case VM_FAULT_MINOR:
48 current->min_flt++;
49 break;
50 case VM_FAULT_MAJOR:
51 current->maj_flt++;
52 break;
53 case VM_FAULT_SIGBUS:
54 goto out_sigbus;
55 case VM_FAULT_OOM:
56 goto out_of_memory;
57 default:
58 BUG();
59 }
60 ret = 0;
61out:
62 up_read(&mm->mmap_sem);
63 return ret;
64
65out_of_memory:
66 up_read(&mm->mmap_sem);
Heiko Carstens22155912006-12-08 15:53:49 +010067 if (is_init(current)) {
Gerald Schaefer59f35d52006-12-04 15:40:45 +010068 yield();
Heiko Carstens22155912006-12-08 15:53:49 +010069 down_read(&mm->mmap_sem);
Gerald Schaefer59f35d52006-12-04 15:40:45 +010070 goto survive;
71 }
72 printk("VM: killing process %s\n", current->comm);
73 return ret;
74
75out_sigbus:
76 up_read(&mm->mmap_sem);
77 current->thread.prot_addr = address;
78 current->thread.trap_no = 0x11;
79 force_sig(SIGBUS, current);
80 return ret;
81}
82
83static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
84 size_t n, int write_user)
85{
86 struct mm_struct *mm = current->mm;
87 unsigned long offset, pfn, done, size;
88 pgd_t *pgd;
89 pmd_t *pmd;
90 pte_t *pte;
91 void *from, *to;
92
93 done = 0;
94retry:
95 spin_lock(&mm->page_table_lock);
96 do {
97 pgd = pgd_offset(mm, uaddr);
98 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
99 goto fault;
100
101 pmd = pmd_offset(pgd, uaddr);
102 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
103 goto fault;
104
105 pte = pte_offset_map(pmd, uaddr);
106 if (!pte || !pte_present(*pte) ||
107 (write_user && !pte_write(*pte)))
108 goto fault;
109
110 pfn = pte_pfn(*pte);
111 if (!pfn_valid(pfn))
112 goto out;
113
114 offset = uaddr & (PAGE_SIZE - 1);
115 size = min(n - done, PAGE_SIZE - offset);
116 if (write_user) {
117 to = (void *)((pfn << PAGE_SHIFT) + offset);
118 from = kptr + done;
119 } else {
120 from = (void *)((pfn << PAGE_SHIFT) + offset);
121 to = kptr + done;
122 }
123 memcpy(to, from, size);
124 done += size;
125 uaddr += size;
126 } while (done < n);
127out:
128 spin_unlock(&mm->page_table_lock);
129 return n - done;
130fault:
131 spin_unlock(&mm->page_table_lock);
132 if (__handle_fault(mm, uaddr, write_user))
133 return n - done;
134 goto retry;
135}
136
137size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
138{
139 size_t rc;
140
141 if (segment_eq(get_fs(), KERNEL_DS)) {
142 memcpy(to, (void __kernel __force *) from, n);
143 return 0;
144 }
145 rc = __user_copy_pt((unsigned long) from, to, n, 0);
146 if (unlikely(rc))
147 memset(to + n - rc, 0, rc);
148 return rc;
149}
150
151size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
152{
153 if (segment_eq(get_fs(), KERNEL_DS)) {
154 memcpy((void __kernel __force *) to, from, n);
155 return 0;
156 }
157 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
158}