Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 1 | /* |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 2 | * User access functions based on page table walks for enhanced |
| 3 | * system layout without hardware support. |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 4 | * |
| 5 | * Copyright IBM Corp. 2006 |
| 6 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) |
| 7 | */ |
| 8 | |
| 9 | #include <linux/errno.h> |
Heiko Carstens | d8ad075 | 2007-01-09 10:18:50 +0100 | [diff] [blame] | 10 | #include <linux/hardirq.h> |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 11 | #include <linux/mm.h> |
Heiko Carstens | 2215591 | 2006-12-08 15:53:49 +0100 | [diff] [blame] | 12 | #include <asm/uaccess.h> |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 13 | #include <asm/futex.h> |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 14 | #include "uaccess.h" |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 15 | |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 16 | static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) |
| 17 | { |
| 18 | pgd_t *pgd; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 19 | pud_t *pud; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 20 | pmd_t *pmd; |
| 21 | |
| 22 | pgd = pgd_offset(mm, addr); |
| 23 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 24 | return (pte_t *) 0x3a; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 25 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 26 | pud = pud_offset(pgd, addr); |
| 27 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 28 | return (pte_t *) 0x3b; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 29 | |
| 30 | pmd = pmd_offset(pud, addr); |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 31 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 32 | return (pte_t *) 0x10; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 33 | |
| 34 | return pte_offset_map(pmd, addr); |
| 35 | } |
| 36 | |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 37 | static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, |
| 38 | size_t n, int write_user) |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 39 | { |
| 40 | struct mm_struct *mm = current->mm; |
| 41 | unsigned long offset, pfn, done, size; |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 42 | pte_t *pte; |
| 43 | void *from, *to; |
| 44 | |
| 45 | done = 0; |
| 46 | retry: |
| 47 | spin_lock(&mm->page_table_lock); |
| 48 | do { |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 49 | pte = follow_table(mm, uaddr); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 50 | if ((unsigned long) pte < 0x1000) |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 51 | goto fault; |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 52 | if (!pte_present(*pte)) { |
| 53 | pte = (pte_t *) 0x11; |
| 54 | goto fault; |
| 55 | } else if (write_user && !pte_write(*pte)) { |
| 56 | pte = (pte_t *) 0x04; |
| 57 | goto fault; |
| 58 | } |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 59 | |
| 60 | pfn = pte_pfn(*pte); |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 61 | offset = uaddr & (PAGE_SIZE - 1); |
| 62 | size = min(n - done, PAGE_SIZE - offset); |
| 63 | if (write_user) { |
| 64 | to = (void *)((pfn << PAGE_SHIFT) + offset); |
| 65 | from = kptr + done; |
| 66 | } else { |
| 67 | from = (void *)((pfn << PAGE_SHIFT) + offset); |
| 68 | to = kptr + done; |
| 69 | } |
| 70 | memcpy(to, from, size); |
| 71 | done += size; |
| 72 | uaddr += size; |
| 73 | } while (done < n); |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 74 | spin_unlock(&mm->page_table_lock); |
| 75 | return n - done; |
| 76 | fault: |
| 77 | spin_unlock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 78 | if (__handle_fault(uaddr, (unsigned long) pte, write_user)) |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 79 | return n - done; |
| 80 | goto retry; |
| 81 | } |
| 82 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 83 | /* |
| 84 | * Do DAT for user address by page table walk, return kernel address. |
| 85 | * This function needs to be called with current->mm->page_table_lock held. |
| 86 | */ |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 87 | static __always_inline unsigned long __dat_user_addr(unsigned long uaddr) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 88 | { |
| 89 | struct mm_struct *mm = current->mm; |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 90 | unsigned long pfn; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 91 | pte_t *pte; |
| 92 | int rc; |
| 93 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 94 | retry: |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 95 | pte = follow_table(mm, uaddr); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 96 | if ((unsigned long) pte < 0x1000) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 97 | goto fault; |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 98 | if (!pte_present(*pte)) { |
| 99 | pte = (pte_t *) 0x11; |
| 100 | goto fault; |
| 101 | } |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 102 | |
| 103 | pfn = pte_pfn(*pte); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 104 | return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 105 | fault: |
| 106 | spin_unlock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 107 | rc = __handle_fault(uaddr, (unsigned long) pte, 0); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 108 | spin_lock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 109 | if (!rc) |
| 110 | goto retry; |
| 111 | return 0; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 112 | } |
| 113 | |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 114 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) |
| 115 | { |
| 116 | size_t rc; |
| 117 | |
| 118 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 119 | memcpy(to, (void __kernel __force *) from, n); |
| 120 | return 0; |
| 121 | } |
| 122 | rc = __user_copy_pt((unsigned long) from, to, n, 0); |
| 123 | if (unlikely(rc)) |
| 124 | memset(to + n - rc, 0, rc); |
| 125 | return rc; |
| 126 | } |
| 127 | |
| 128 | size_t copy_to_user_pt(size_t n, void __user *to, const void *from) |
| 129 | { |
| 130 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 131 | memcpy((void __kernel __force *) to, from, n); |
| 132 | return 0; |
| 133 | } |
| 134 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); |
| 135 | } |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 136 | |
| 137 | static size_t clear_user_pt(size_t n, void __user *to) |
| 138 | { |
| 139 | long done, size, ret; |
| 140 | |
| 141 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 142 | memset((void __kernel __force *) to, 0, n); |
| 143 | return 0; |
| 144 | } |
| 145 | done = 0; |
| 146 | do { |
| 147 | if (n - done > PAGE_SIZE) |
| 148 | size = PAGE_SIZE; |
| 149 | else |
| 150 | size = n - done; |
| 151 | ret = __user_copy_pt((unsigned long) to + done, |
| 152 | &empty_zero_page, size, 1); |
| 153 | done += size; |
| 154 | if (ret) |
| 155 | return ret + n - done; |
| 156 | } while (done < n); |
| 157 | return 0; |
| 158 | } |
| 159 | |
| 160 | static size_t strnlen_user_pt(size_t count, const char __user *src) |
| 161 | { |
| 162 | char *addr; |
| 163 | unsigned long uaddr = (unsigned long) src; |
| 164 | struct mm_struct *mm = current->mm; |
| 165 | unsigned long offset, pfn, done, len; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 166 | pte_t *pte; |
| 167 | size_t len_str; |
| 168 | |
| 169 | if (segment_eq(get_fs(), KERNEL_DS)) |
| 170 | return strnlen((const char __kernel __force *) src, count) + 1; |
| 171 | done = 0; |
| 172 | retry: |
| 173 | spin_lock(&mm->page_table_lock); |
| 174 | do { |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 175 | pte = follow_table(mm, uaddr); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 176 | if ((unsigned long) pte < 0x1000) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 177 | goto fault; |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 178 | if (!pte_present(*pte)) { |
| 179 | pte = (pte_t *) 0x11; |
| 180 | goto fault; |
| 181 | } |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 182 | |
| 183 | pfn = pte_pfn(*pte); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 184 | offset = uaddr & (PAGE_SIZE-1); |
| 185 | addr = (char *)(pfn << PAGE_SHIFT) + offset; |
| 186 | len = min(count - done, PAGE_SIZE - offset); |
| 187 | len_str = strnlen(addr, len); |
| 188 | done += len_str; |
| 189 | uaddr += len_str; |
| 190 | } while ((len_str == len) && (done < count)); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 191 | spin_unlock(&mm->page_table_lock); |
| 192 | return done + 1; |
| 193 | fault: |
| 194 | spin_unlock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 195 | if (__handle_fault(uaddr, (unsigned long) pte, 0)) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 196 | return 0; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 197 | goto retry; |
| 198 | } |
| 199 | |
| 200 | static size_t strncpy_from_user_pt(size_t count, const char __user *src, |
| 201 | char *dst) |
| 202 | { |
| 203 | size_t n = strnlen_user_pt(count, src); |
| 204 | |
| 205 | if (!n) |
| 206 | return -EFAULT; |
| 207 | if (n > count) |
| 208 | n = count; |
| 209 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 210 | memcpy(dst, (const char __kernel __force *) src, n); |
| 211 | if (dst[n-1] == '\0') |
| 212 | return n-1; |
| 213 | else |
| 214 | return n; |
| 215 | } |
| 216 | if (__user_copy_pt((unsigned long) src, dst, n, 0)) |
| 217 | return -EFAULT; |
| 218 | if (dst[n-1] == '\0') |
| 219 | return n-1; |
| 220 | else |
| 221 | return n; |
| 222 | } |
| 223 | |
| 224 | static size_t copy_in_user_pt(size_t n, void __user *to, |
| 225 | const void __user *from) |
| 226 | { |
| 227 | struct mm_struct *mm = current->mm; |
| 228 | unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 229 | uaddr, done, size, error_code; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 230 | unsigned long uaddr_from = (unsigned long) from; |
| 231 | unsigned long uaddr_to = (unsigned long) to; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 232 | pte_t *pte_from, *pte_to; |
| 233 | int write_user; |
| 234 | |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 235 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 236 | memcpy((void __force *) to, (void __force *) from, n); |
| 237 | return 0; |
| 238 | } |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 239 | done = 0; |
| 240 | retry: |
| 241 | spin_lock(&mm->page_table_lock); |
| 242 | do { |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 243 | write_user = 0; |
| 244 | uaddr = uaddr_from; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 245 | pte_from = follow_table(mm, uaddr_from); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 246 | error_code = (unsigned long) pte_from; |
| 247 | if (error_code < 0x1000) |
| 248 | goto fault; |
| 249 | if (!pte_present(*pte_from)) { |
| 250 | error_code = 0x11; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 251 | goto fault; |
| 252 | } |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 253 | |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 254 | write_user = 1; |
| 255 | uaddr = uaddr_to; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 256 | pte_to = follow_table(mm, uaddr_to); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 257 | error_code = (unsigned long) pte_to; |
| 258 | if (error_code < 0x1000) |
| 259 | goto fault; |
| 260 | if (!pte_present(*pte_to)) { |
| 261 | error_code = 0x11; |
| 262 | goto fault; |
| 263 | } else if (!pte_write(*pte_to)) { |
| 264 | error_code = 0x04; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 265 | goto fault; |
| 266 | } |
| 267 | |
| 268 | pfn_from = pte_pfn(*pte_from); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 269 | pfn_to = pte_pfn(*pte_to); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 270 | offset_from = uaddr_from & (PAGE_SIZE-1); |
| 271 | offset_to = uaddr_from & (PAGE_SIZE-1); |
| 272 | offset_max = max(offset_from, offset_to); |
| 273 | size = min(n - done, PAGE_SIZE - offset_max); |
| 274 | |
| 275 | memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, |
| 276 | (void *)(pfn_from << PAGE_SHIFT) + offset_from, size); |
| 277 | done += size; |
| 278 | uaddr_from += size; |
| 279 | uaddr_to += size; |
| 280 | } while (done < n); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 281 | spin_unlock(&mm->page_table_lock); |
| 282 | return n - done; |
| 283 | fault: |
| 284 | spin_unlock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 285 | if (__handle_fault(uaddr, error_code, write_user)) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 286 | return n - done; |
| 287 | goto retry; |
| 288 | } |
| 289 | |
| 290 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ |
| 291 | asm volatile("0: l %1,0(%6)\n" \ |
| 292 | "1: " insn \ |
| 293 | "2: cs %1,%2,0(%6)\n" \ |
| 294 | "3: jl 1b\n" \ |
| 295 | " lhi %0,0\n" \ |
| 296 | "4:\n" \ |
| 297 | EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ |
| 298 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ |
| 299 | "=m" (*uaddr) \ |
| 300 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
| 301 | "m" (*uaddr) : "cc" ); |
| 302 | |
Michel Lespinasse | 8d7718a | 2011-03-10 18:50:58 -0800 | [diff] [blame] | 303 | static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 304 | { |
| 305 | int oldval = 0, newval, ret; |
| 306 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 307 | switch (op) { |
| 308 | case FUTEX_OP_SET: |
| 309 | __futex_atomic_op("lr %2,%5\n", |
| 310 | ret, oldval, newval, uaddr, oparg); |
| 311 | break; |
| 312 | case FUTEX_OP_ADD: |
| 313 | __futex_atomic_op("lr %2,%1\nar %2,%5\n", |
| 314 | ret, oldval, newval, uaddr, oparg); |
| 315 | break; |
| 316 | case FUTEX_OP_OR: |
| 317 | __futex_atomic_op("lr %2,%1\nor %2,%5\n", |
| 318 | ret, oldval, newval, uaddr, oparg); |
| 319 | break; |
| 320 | case FUTEX_OP_ANDN: |
| 321 | __futex_atomic_op("lr %2,%1\nnr %2,%5\n", |
| 322 | ret, oldval, newval, uaddr, oparg); |
| 323 | break; |
| 324 | case FUTEX_OP_XOR: |
| 325 | __futex_atomic_op("lr %2,%1\nxr %2,%5\n", |
| 326 | ret, oldval, newval, uaddr, oparg); |
| 327 | break; |
| 328 | default: |
| 329 | ret = -ENOSYS; |
| 330 | } |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 331 | if (ret == 0) |
| 332 | *old = oldval; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 333 | return ret; |
| 334 | } |
| 335 | |
Michel Lespinasse | 8d7718a | 2011-03-10 18:50:58 -0800 | [diff] [blame] | 336 | int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 337 | { |
| 338 | int ret; |
| 339 | |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 340 | if (segment_eq(get_fs(), KERNEL_DS)) |
| 341 | return __futex_atomic_op_pt(op, uaddr, oparg, old); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 342 | spin_lock(¤t->mm->page_table_lock); |
Martin Schwidefsky | 3c52e49 | 2011-10-30 15:17:15 +0100 | [diff] [blame] | 343 | uaddr = (u32 __force __user *) |
| 344 | __dat_user_addr((__force unsigned long) uaddr); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 345 | if (!uaddr) { |
| 346 | spin_unlock(¤t->mm->page_table_lock); |
| 347 | return -EFAULT; |
| 348 | } |
| 349 | get_page(virt_to_page(uaddr)); |
| 350 | spin_unlock(¤t->mm->page_table_lock); |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 351 | ret = __futex_atomic_op_pt(op, uaddr, oparg, old); |
| 352 | put_page(virt_to_page(uaddr)); |
| 353 | return ret; |
| 354 | } |
| 355 | |
Michel Lespinasse | 8d7718a | 2011-03-10 18:50:58 -0800 | [diff] [blame] | 356 | static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, |
| 357 | u32 oldval, u32 newval) |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 358 | { |
| 359 | int ret; |
| 360 | |
| 361 | asm volatile("0: cs %1,%4,0(%5)\n" |
Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 362 | "1: la %0,0\n" |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 363 | "2:\n" |
| 364 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 365 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) |
| 366 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) |
| 367 | : "cc", "memory" ); |
Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 368 | *uval = oldval; |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 369 | return ret; |
| 370 | } |
| 371 | |
Michel Lespinasse | 8d7718a | 2011-03-10 18:50:58 -0800 | [diff] [blame] | 372 | int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, |
| 373 | u32 oldval, u32 newval) |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 374 | { |
| 375 | int ret; |
| 376 | |
| 377 | if (segment_eq(get_fs(), KERNEL_DS)) |
Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 378 | return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 379 | spin_lock(¤t->mm->page_table_lock); |
Martin Schwidefsky | 3c52e49 | 2011-10-30 15:17:15 +0100 | [diff] [blame] | 380 | uaddr = (u32 __force __user *) |
| 381 | __dat_user_addr((__force unsigned long) uaddr); |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 382 | if (!uaddr) { |
| 383 | spin_unlock(¤t->mm->page_table_lock); |
| 384 | return -EFAULT; |
| 385 | } |
| 386 | get_page(virt_to_page(uaddr)); |
| 387 | spin_unlock(¤t->mm->page_table_lock); |
Michel Lespinasse | 37a9d91 | 2011-03-10 18:48:51 -0800 | [diff] [blame] | 388 | ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 389 | put_page(virt_to_page(uaddr)); |
| 390 | return ret; |
| 391 | } |
| 392 | |
| 393 | struct uaccess_ops uaccess_pt = { |
| 394 | .copy_from_user = copy_from_user_pt, |
| 395 | .copy_from_user_small = copy_from_user_pt, |
| 396 | .copy_to_user = copy_to_user_pt, |
| 397 | .copy_to_user_small = copy_to_user_pt, |
| 398 | .copy_in_user = copy_in_user_pt, |
| 399 | .clear_user = clear_user_pt, |
| 400 | .strnlen_user = strnlen_user_pt, |
| 401 | .strncpy_from_user = strncpy_from_user_pt, |
| 402 | .futex_atomic_op = futex_atomic_op_pt, |
| 403 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, |
| 404 | }; |