| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  *  arch/s390/lib/uaccess_pt.c | 
 | 3 |  * | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 4 |  *  User access functions based on page table walks for enhanced | 
 | 5 |  *  system layout without hardware support. | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 6 |  * | 
 | 7 |  *    Copyright IBM Corp. 2006 | 
 | 8 |  *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/errno.h> | 
| Heiko Carstens | d8ad075 | 2007-01-09 10:18:50 +0100 | [diff] [blame] | 12 | #include <linux/hardirq.h> | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 13 | #include <linux/mm.h> | 
| Heiko Carstens | 2215591 | 2006-12-08 15:53:49 +0100 | [diff] [blame] | 14 | #include <asm/uaccess.h> | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 15 | #include <asm/futex.h> | 
| Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 16 | #include "uaccess.h" | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 17 |  | 
| Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 18 | static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) | 
 | 19 | { | 
 | 20 | 	pgd_t *pgd; | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 21 | 	pud_t *pud; | 
| Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 22 | 	pmd_t *pmd; | 
 | 23 |  | 
 | 24 | 	pgd = pgd_offset(mm, addr); | 
 | 25 | 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | 
 | 26 | 		return NULL; | 
 | 27 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 28 | 	pud = pud_offset(pgd, addr); | 
 | 29 | 	if (pud_none(*pud) || unlikely(pud_bad(*pud))) | 
 | 30 | 		return NULL; | 
 | 31 |  | 
 | 32 | 	pmd = pmd_offset(pud, addr); | 
| Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 33 | 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | 
 | 34 | 		return NULL; | 
 | 35 |  | 
 | 36 | 	return pte_offset_map(pmd, addr); | 
 | 37 | } | 
 | 38 |  | 
| Heiko Carstens | 4d284ca | 2007-02-05 21:18:53 +0100 | [diff] [blame] | 39 | static int __handle_fault(struct mm_struct *mm, unsigned long address, | 
 | 40 | 			  int write_access) | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 41 | { | 
 | 42 | 	struct vm_area_struct *vma; | 
 | 43 | 	int ret = -EFAULT; | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 44 | 	int fault; | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 45 |  | 
| Heiko Carstens | d8ad075 | 2007-01-09 10:18:50 +0100 | [diff] [blame] | 46 | 	if (in_atomic()) | 
 | 47 | 		return ret; | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 48 | 	down_read(&mm->mmap_sem); | 
 | 49 | 	vma = find_vma(mm, address); | 
 | 50 | 	if (unlikely(!vma)) | 
 | 51 | 		goto out; | 
 | 52 | 	if (unlikely(vma->vm_start > address)) { | 
 | 53 | 		if (!(vma->vm_flags & VM_GROWSDOWN)) | 
 | 54 | 			goto out; | 
 | 55 | 		if (expand_stack(vma, address)) | 
 | 56 | 			goto out; | 
 | 57 | 	} | 
 | 58 |  | 
 | 59 | 	if (!write_access) { | 
 | 60 | 		/* page not present, check vm flags */ | 
 | 61 | 		if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | 
 | 62 | 			goto out; | 
 | 63 | 	} else { | 
 | 64 | 		if (!(vma->vm_flags & VM_WRITE)) | 
 | 65 | 			goto out; | 
 | 66 | 	} | 
 | 67 |  | 
 | 68 | survive: | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 69 | 	fault = handle_mm_fault(mm, vma, address, write_access); | 
 | 70 | 	if (unlikely(fault & VM_FAULT_ERROR)) { | 
 | 71 | 		if (fault & VM_FAULT_OOM) | 
 | 72 | 			goto out_of_memory; | 
 | 73 | 		else if (fault & VM_FAULT_SIGBUS) | 
 | 74 | 			goto out_sigbus; | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 75 | 		BUG(); | 
 | 76 | 	} | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 77 | 	if (fault & VM_FAULT_MAJOR) | 
 | 78 | 		current->maj_flt++; | 
 | 79 | 	else | 
 | 80 | 		current->min_flt++; | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 81 | 	ret = 0; | 
 | 82 | out: | 
 | 83 | 	up_read(&mm->mmap_sem); | 
 | 84 | 	return ret; | 
 | 85 |  | 
 | 86 | out_of_memory: | 
 | 87 | 	up_read(&mm->mmap_sem); | 
| Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 88 | 	if (is_global_init(current)) { | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 89 | 		yield(); | 
| Heiko Carstens | 2215591 | 2006-12-08 15:53:49 +0100 | [diff] [blame] | 90 | 		down_read(&mm->mmap_sem); | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 91 | 		goto survive; | 
 | 92 | 	} | 
 | 93 | 	printk("VM: killing process %s\n", current->comm); | 
 | 94 | 	return ret; | 
 | 95 |  | 
 | 96 | out_sigbus: | 
 | 97 | 	up_read(&mm->mmap_sem); | 
 | 98 | 	current->thread.prot_addr = address; | 
 | 99 | 	current->thread.trap_no = 0x11; | 
 | 100 | 	force_sig(SIGBUS, current); | 
 | 101 | 	return ret; | 
 | 102 | } | 
 | 103 |  | 
| Heiko Carstens | 4d284ca | 2007-02-05 21:18:53 +0100 | [diff] [blame] | 104 | static size_t __user_copy_pt(unsigned long uaddr, void *kptr, | 
 | 105 | 			     size_t n, int write_user) | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 106 | { | 
 | 107 | 	struct mm_struct *mm = current->mm; | 
 | 108 | 	unsigned long offset, pfn, done, size; | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 109 | 	pte_t *pte; | 
 | 110 | 	void *from, *to; | 
 | 111 |  | 
 | 112 | 	done = 0; | 
 | 113 | retry: | 
 | 114 | 	spin_lock(&mm->page_table_lock); | 
 | 115 | 	do { | 
| Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 116 | 		pte = follow_table(mm, uaddr); | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 117 | 		if (!pte || !pte_present(*pte) || | 
 | 118 | 		    (write_user && !pte_write(*pte))) | 
 | 119 | 			goto fault; | 
 | 120 |  | 
 | 121 | 		pfn = pte_pfn(*pte); | 
 | 122 | 		if (!pfn_valid(pfn)) | 
 | 123 | 			goto out; | 
 | 124 |  | 
 | 125 | 		offset = uaddr & (PAGE_SIZE - 1); | 
 | 126 | 		size = min(n - done, PAGE_SIZE - offset); | 
 | 127 | 		if (write_user) { | 
 | 128 | 			to = (void *)((pfn << PAGE_SHIFT) + offset); | 
 | 129 | 			from = kptr + done; | 
 | 130 | 		} else { | 
 | 131 | 			from = (void *)((pfn << PAGE_SHIFT) + offset); | 
 | 132 | 			to = kptr + done; | 
 | 133 | 		} | 
 | 134 | 		memcpy(to, from, size); | 
 | 135 | 		done += size; | 
 | 136 | 		uaddr += size; | 
 | 137 | 	} while (done < n); | 
 | 138 | out: | 
 | 139 | 	spin_unlock(&mm->page_table_lock); | 
 | 140 | 	return n - done; | 
 | 141 | fault: | 
 | 142 | 	spin_unlock(&mm->page_table_lock); | 
 | 143 | 	if (__handle_fault(mm, uaddr, write_user)) | 
 | 144 | 		return n - done; | 
 | 145 | 	goto retry; | 
 | 146 | } | 
 | 147 |  | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 148 | /* | 
 | 149 |  * Do DAT for user address by page table walk, return kernel address. | 
 | 150 |  * This function needs to be called with current->mm->page_table_lock held. | 
 | 151 |  */ | 
| Heiko Carstens | 4d284ca | 2007-02-05 21:18:53 +0100 | [diff] [blame] | 152 | static unsigned long __dat_user_addr(unsigned long uaddr) | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 153 | { | 
 | 154 | 	struct mm_struct *mm = current->mm; | 
 | 155 | 	unsigned long pfn, ret; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 156 | 	pte_t *pte; | 
 | 157 | 	int rc; | 
 | 158 |  | 
 | 159 | 	ret = 0; | 
 | 160 | retry: | 
| Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 161 | 	pte = follow_table(mm, uaddr); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 162 | 	if (!pte || !pte_present(*pte)) | 
 | 163 | 		goto fault; | 
 | 164 |  | 
 | 165 | 	pfn = pte_pfn(*pte); | 
 | 166 | 	if (!pfn_valid(pfn)) | 
 | 167 | 		goto out; | 
 | 168 |  | 
 | 169 | 	ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); | 
 | 170 | out: | 
 | 171 | 	return ret; | 
 | 172 | fault: | 
 | 173 | 	spin_unlock(&mm->page_table_lock); | 
 | 174 | 	rc = __handle_fault(mm, uaddr, 0); | 
 | 175 | 	spin_lock(&mm->page_table_lock); | 
 | 176 | 	if (rc) | 
 | 177 | 		goto out; | 
 | 178 | 	goto retry; | 
 | 179 | } | 
 | 180 |  | 
| Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 181 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) | 
 | 182 | { | 
 | 183 | 	size_t rc; | 
 | 184 |  | 
 | 185 | 	if (segment_eq(get_fs(), KERNEL_DS)) { | 
 | 186 | 		memcpy(to, (void __kernel __force *) from, n); | 
 | 187 | 		return 0; | 
 | 188 | 	} | 
 | 189 | 	rc = __user_copy_pt((unsigned long) from, to, n, 0); | 
 | 190 | 	if (unlikely(rc)) | 
 | 191 | 		memset(to + n - rc, 0, rc); | 
 | 192 | 	return rc; | 
 | 193 | } | 
 | 194 |  | 
 | 195 | size_t copy_to_user_pt(size_t n, void __user *to, const void *from) | 
 | 196 | { | 
 | 197 | 	if (segment_eq(get_fs(), KERNEL_DS)) { | 
 | 198 | 		memcpy((void __kernel __force *) to, from, n); | 
 | 199 | 		return 0; | 
 | 200 | 	} | 
 | 201 | 	return __user_copy_pt((unsigned long) to, (void *) from, n, 1); | 
 | 202 | } | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 203 |  | 
 | 204 | static size_t clear_user_pt(size_t n, void __user *to) | 
 | 205 | { | 
 | 206 | 	long done, size, ret; | 
 | 207 |  | 
 | 208 | 	if (segment_eq(get_fs(), KERNEL_DS)) { | 
 | 209 | 		memset((void __kernel __force *) to, 0, n); | 
 | 210 | 		return 0; | 
 | 211 | 	} | 
 | 212 | 	done = 0; | 
 | 213 | 	do { | 
 | 214 | 		if (n - done > PAGE_SIZE) | 
 | 215 | 			size = PAGE_SIZE; | 
 | 216 | 		else | 
 | 217 | 			size = n - done; | 
 | 218 | 		ret = __user_copy_pt((unsigned long) to + done, | 
 | 219 | 				      &empty_zero_page, size, 1); | 
 | 220 | 		done += size; | 
 | 221 | 		if (ret) | 
 | 222 | 			return ret + n - done; | 
 | 223 | 	} while (done < n); | 
 | 224 | 	return 0; | 
 | 225 | } | 
 | 226 |  | 
 | 227 | static size_t strnlen_user_pt(size_t count, const char __user *src) | 
 | 228 | { | 
 | 229 | 	char *addr; | 
 | 230 | 	unsigned long uaddr = (unsigned long) src; | 
 | 231 | 	struct mm_struct *mm = current->mm; | 
 | 232 | 	unsigned long offset, pfn, done, len; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 233 | 	pte_t *pte; | 
 | 234 | 	size_t len_str; | 
 | 235 |  | 
 | 236 | 	if (segment_eq(get_fs(), KERNEL_DS)) | 
 | 237 | 		return strnlen((const char __kernel __force *) src, count) + 1; | 
 | 238 | 	done = 0; | 
 | 239 | retry: | 
 | 240 | 	spin_lock(&mm->page_table_lock); | 
 | 241 | 	do { | 
| Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 242 | 		pte = follow_table(mm, uaddr); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 243 | 		if (!pte || !pte_present(*pte)) | 
 | 244 | 			goto fault; | 
 | 245 |  | 
 | 246 | 		pfn = pte_pfn(*pte); | 
 | 247 | 		if (!pfn_valid(pfn)) { | 
 | 248 | 			done = -1; | 
 | 249 | 			goto out; | 
 | 250 | 		} | 
 | 251 |  | 
 | 252 | 		offset = uaddr & (PAGE_SIZE-1); | 
 | 253 | 		addr = (char *)(pfn << PAGE_SHIFT) + offset; | 
 | 254 | 		len = min(count - done, PAGE_SIZE - offset); | 
 | 255 | 		len_str = strnlen(addr, len); | 
 | 256 | 		done += len_str; | 
 | 257 | 		uaddr += len_str; | 
 | 258 | 	} while ((len_str == len) && (done < count)); | 
 | 259 | out: | 
 | 260 | 	spin_unlock(&mm->page_table_lock); | 
 | 261 | 	return done + 1; | 
 | 262 | fault: | 
 | 263 | 	spin_unlock(&mm->page_table_lock); | 
 | 264 | 	if (__handle_fault(mm, uaddr, 0)) { | 
 | 265 | 		return 0; | 
 | 266 | 	} | 
 | 267 | 	goto retry; | 
 | 268 | } | 
 | 269 |  | 
 | 270 | static size_t strncpy_from_user_pt(size_t count, const char __user *src, | 
 | 271 | 				   char *dst) | 
 | 272 | { | 
 | 273 | 	size_t n = strnlen_user_pt(count, src); | 
 | 274 |  | 
 | 275 | 	if (!n) | 
 | 276 | 		return -EFAULT; | 
 | 277 | 	if (n > count) | 
 | 278 | 		n = count; | 
 | 279 | 	if (segment_eq(get_fs(), KERNEL_DS)) { | 
 | 280 | 		memcpy(dst, (const char __kernel __force *) src, n); | 
 | 281 | 		if (dst[n-1] == '\0') | 
 | 282 | 			return n-1; | 
 | 283 | 		else | 
 | 284 | 			return n; | 
 | 285 | 	} | 
 | 286 | 	if (__user_copy_pt((unsigned long) src, dst, n, 0)) | 
 | 287 | 		return -EFAULT; | 
 | 288 | 	if (dst[n-1] == '\0') | 
 | 289 | 		return n-1; | 
 | 290 | 	else | 
 | 291 | 		return n; | 
 | 292 | } | 
 | 293 |  | 
 | 294 | static size_t copy_in_user_pt(size_t n, void __user *to, | 
 | 295 | 			      const void __user *from) | 
 | 296 | { | 
 | 297 | 	struct mm_struct *mm = current->mm; | 
 | 298 | 	unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, | 
 | 299 | 		      uaddr, done, size; | 
 | 300 | 	unsigned long uaddr_from = (unsigned long) from; | 
 | 301 | 	unsigned long uaddr_to = (unsigned long) to; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 302 | 	pte_t *pte_from, *pte_to; | 
 | 303 | 	int write_user; | 
 | 304 |  | 
| Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 305 | 	if (segment_eq(get_fs(), KERNEL_DS)) { | 
 | 306 | 		memcpy((void __force *) to, (void __force *) from, n); | 
 | 307 | 		return 0; | 
 | 308 | 	} | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 309 | 	done = 0; | 
 | 310 | retry: | 
 | 311 | 	spin_lock(&mm->page_table_lock); | 
 | 312 | 	do { | 
| Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 313 | 		pte_from = follow_table(mm, uaddr_from); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 314 | 		if (!pte_from || !pte_present(*pte_from)) { | 
 | 315 | 			uaddr = uaddr_from; | 
 | 316 | 			write_user = 0; | 
 | 317 | 			goto fault; | 
 | 318 | 		} | 
| Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 319 |  | 
 | 320 | 		pte_to = follow_table(mm, uaddr_to); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 321 | 		if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { | 
 | 322 | 			uaddr = uaddr_to; | 
 | 323 | 			write_user = 1; | 
 | 324 | 			goto fault; | 
 | 325 | 		} | 
 | 326 |  | 
 | 327 | 		pfn_from = pte_pfn(*pte_from); | 
 | 328 | 		if (!pfn_valid(pfn_from)) | 
 | 329 | 			goto out; | 
 | 330 | 		pfn_to = pte_pfn(*pte_to); | 
 | 331 | 		if (!pfn_valid(pfn_to)) | 
 | 332 | 			goto out; | 
 | 333 |  | 
 | 334 | 		offset_from = uaddr_from & (PAGE_SIZE-1); | 
 | 335 | 		offset_to = uaddr_from & (PAGE_SIZE-1); | 
 | 336 | 		offset_max = max(offset_from, offset_to); | 
 | 337 | 		size = min(n - done, PAGE_SIZE - offset_max); | 
 | 338 |  | 
 | 339 | 		memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, | 
 | 340 | 		       (void *)(pfn_from << PAGE_SHIFT) + offset_from, size); | 
 | 341 | 		done += size; | 
 | 342 | 		uaddr_from += size; | 
 | 343 | 		uaddr_to += size; | 
 | 344 | 	} while (done < n); | 
 | 345 | out: | 
 | 346 | 	spin_unlock(&mm->page_table_lock); | 
 | 347 | 	return n - done; | 
 | 348 | fault: | 
 | 349 | 	spin_unlock(&mm->page_table_lock); | 
 | 350 | 	if (__handle_fault(mm, uaddr, write_user)) | 
 | 351 | 		return n - done; | 
 | 352 | 	goto retry; | 
 | 353 | } | 
 | 354 |  | 
 | 355 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)	\ | 
 | 356 | 	asm volatile("0: l   %1,0(%6)\n"				\ | 
 | 357 | 		     "1: " insn						\ | 
 | 358 | 		     "2: cs  %1,%2,0(%6)\n"				\ | 
 | 359 | 		     "3: jl  1b\n"					\ | 
 | 360 | 		     "   lhi %0,0\n"					\ | 
 | 361 | 		     "4:\n"						\ | 
 | 362 | 		     EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)	\ | 
 | 363 | 		     : "=d" (ret), "=&d" (oldval), "=&d" (newval),	\ | 
 | 364 | 		       "=m" (*uaddr)					\ | 
 | 365 | 		     : "0" (-EFAULT), "d" (oparg), "a" (uaddr),		\ | 
 | 366 | 		       "m" (*uaddr) : "cc" ); | 
 | 367 |  | 
| Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 368 | static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 369 | { | 
 | 370 | 	int oldval = 0, newval, ret; | 
 | 371 |  | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 372 | 	switch (op) { | 
 | 373 | 	case FUTEX_OP_SET: | 
 | 374 | 		__futex_atomic_op("lr %2,%5\n", | 
 | 375 | 				  ret, oldval, newval, uaddr, oparg); | 
 | 376 | 		break; | 
 | 377 | 	case FUTEX_OP_ADD: | 
 | 378 | 		__futex_atomic_op("lr %2,%1\nar %2,%5\n", | 
 | 379 | 				  ret, oldval, newval, uaddr, oparg); | 
 | 380 | 		break; | 
 | 381 | 	case FUTEX_OP_OR: | 
 | 382 | 		__futex_atomic_op("lr %2,%1\nor %2,%5\n", | 
 | 383 | 				  ret, oldval, newval, uaddr, oparg); | 
 | 384 | 		break; | 
 | 385 | 	case FUTEX_OP_ANDN: | 
 | 386 | 		__futex_atomic_op("lr %2,%1\nnr %2,%5\n", | 
 | 387 | 				  ret, oldval, newval, uaddr, oparg); | 
 | 388 | 		break; | 
 | 389 | 	case FUTEX_OP_XOR: | 
 | 390 | 		__futex_atomic_op("lr %2,%1\nxr %2,%5\n", | 
 | 391 | 				  ret, oldval, newval, uaddr, oparg); | 
 | 392 | 		break; | 
 | 393 | 	default: | 
 | 394 | 		ret = -ENOSYS; | 
 | 395 | 	} | 
| Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 396 | 	if (ret == 0) | 
 | 397 | 		*old = oldval; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 398 | 	return ret; | 
 | 399 | } | 
 | 400 |  | 
| Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 401 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 402 | { | 
 | 403 | 	int ret; | 
 | 404 |  | 
| Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 405 | 	if (segment_eq(get_fs(), KERNEL_DS)) | 
 | 406 | 		return __futex_atomic_op_pt(op, uaddr, oparg, old); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 407 | 	spin_lock(¤t->mm->page_table_lock); | 
 | 408 | 	uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 
 | 409 | 	if (!uaddr) { | 
 | 410 | 		spin_unlock(¤t->mm->page_table_lock); | 
 | 411 | 		return -EFAULT; | 
 | 412 | 	} | 
 | 413 | 	get_page(virt_to_page(uaddr)); | 
 | 414 | 	spin_unlock(¤t->mm->page_table_lock); | 
| Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 415 | 	ret = __futex_atomic_op_pt(op, uaddr, oparg, old); | 
 | 416 | 	put_page(virt_to_page(uaddr)); | 
 | 417 | 	return ret; | 
 | 418 | } | 
 | 419 |  | 
 | 420 | static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | 
 | 421 | { | 
 | 422 | 	int ret; | 
 | 423 |  | 
 | 424 | 	asm volatile("0: cs   %1,%4,0(%5)\n" | 
 | 425 | 		     "1: lr   %0,%1\n" | 
 | 426 | 		     "2:\n" | 
 | 427 | 		     EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 428 | 		     : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | 
 | 429 | 		     : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | 
 | 430 | 		     : "cc", "memory" ); | 
| Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 431 | 	return ret; | 
 | 432 | } | 
 | 433 |  | 
 | 434 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | 
 | 435 | { | 
 | 436 | 	int ret; | 
 | 437 |  | 
 | 438 | 	if (segment_eq(get_fs(), KERNEL_DS)) | 
 | 439 | 		return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | 
 | 440 | 	spin_lock(¤t->mm->page_table_lock); | 
 | 441 | 	uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 
 | 442 | 	if (!uaddr) { | 
 | 443 | 		spin_unlock(¤t->mm->page_table_lock); | 
 | 444 | 		return -EFAULT; | 
 | 445 | 	} | 
 | 446 | 	get_page(virt_to_page(uaddr)); | 
 | 447 | 	spin_unlock(¤t->mm->page_table_lock); | 
 | 448 | 	ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 449 | 	put_page(virt_to_page(uaddr)); | 
 | 450 | 	return ret; | 
 | 451 | } | 
 | 452 |  | 
 | 453 | struct uaccess_ops uaccess_pt = { | 
 | 454 | 	.copy_from_user		= copy_from_user_pt, | 
 | 455 | 	.copy_from_user_small	= copy_from_user_pt, | 
 | 456 | 	.copy_to_user		= copy_to_user_pt, | 
 | 457 | 	.copy_to_user_small	= copy_to_user_pt, | 
 | 458 | 	.copy_in_user		= copy_in_user_pt, | 
 | 459 | 	.clear_user		= clear_user_pt, | 
 | 460 | 	.strnlen_user		= strnlen_user_pt, | 
 | 461 | 	.strncpy_from_user	= strncpy_from_user_pt, | 
 | 462 | 	.futex_atomic_op	= futex_atomic_op_pt, | 
 | 463 | 	.futex_atomic_cmpxchg	= futex_atomic_cmpxchg_pt, | 
 | 464 | }; |