blob: 59a9f20c2dc7696c893f6025375563ad3c7d95b2 [file] [log] [blame]
Paul Mundt9b01bd92007-11-10 19:55:50 +09001/* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $
2 *
3 * User space memory access functions
4 *
5 * Copyright (C) 1999, 2002 Niibe Yutaka
6 * Copyright (C) 2003 Paul Mundt
7 *
8 * Based on:
9 * MIPS implementation version 1.15 by
10 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
11 * and i386 version.
12 */
13#ifndef __ASM_SH_UACCESS_H
14#define __ASM_SH_UACCESS_H
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22/*
23 * The fs value determines whether argument validity checking should be
24 * performed or not. If get_fs() == USER_DS, checking is performed, with
25 * get_fs() == KERNEL_DS, checking is bypassed.
26 *
27 * For historical reasons (Data Segment Register?), these macros are misnamed.
28 */
29
30#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
31
32#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL)
33#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
34
35#define segment_eq(a,b) ((a).seg == (b).seg)
36
37#define get_ds() (KERNEL_DS)
38
39#if !defined(CONFIG_MMU)
40/* NOMMU is always true */
41#define __addr_ok(addr) (1)
42
43static inline mm_segment_t get_fs(void)
44{
45 return USER_DS;
46}
47
48static inline void set_fs(mm_segment_t s)
49{
50}
51
52/*
53 * __access_ok: Check if address with size is OK or not.
54 *
55 * If we don't have an MMU (or if its disabled) the only thing we really have
56 * to look out for is if the address resides somewhere outside of what
57 * available RAM we have.
58 *
59 * TODO: This check could probably also stand to be restricted somewhat more..
60 * though it still does the Right Thing(tm) for the time being.
61 */
62static inline int __access_ok(unsigned long addr, unsigned long size)
63{
64 return ((addr >= memory_start) && ((addr + size) < memory_end));
65}
66#else /* CONFIG_MMU */
67#define __addr_ok(addr) \
68 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
69
70#define get_fs() (current_thread_info()->addr_limit)
71#define set_fs(x) (current_thread_info()->addr_limit = (x))
72
73/*
74 * __access_ok: Check if address with size is OK or not.
75 *
Stuart Menefy0fb19dc2007-11-30 18:16:23 +090076 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
Paul Mundt9b01bd92007-11-10 19:55:50 +090077 *
Stuart Menefy0fb19dc2007-11-30 18:16:23 +090078 * sum := addr + size; carry? --> flag = true;
79 * if (sum >= addr_limit) flag = true;
Paul Mundt9b01bd92007-11-10 19:55:50 +090080 */
81static inline int __access_ok(unsigned long addr, unsigned long size)
82{
Stuart Menefy0fb19dc2007-11-30 18:16:23 +090083 unsigned long flag, sum;
Paul Mundt9b01bd92007-11-10 19:55:50 +090084
Stuart Menefy0fb19dc2007-11-30 18:16:23 +090085 __asm__("clrt\n\t"
86 "addc %3, %1\n\t"
87 "movt %0\n\t"
88 "cmp/hi %4, %1\n\t"
89 "rotcl %0"
90 :"=&r" (flag), "=r" (sum)
91 :"1" (addr), "r" (size),
92 "r" (current_thread_info()->addr_limit.seg)
93 :"t");
Paul Mundt9b01bd92007-11-10 19:55:50 +090094 return flag == 0;
95}
96#endif /* CONFIG_MMU */
97
98static inline int access_ok(int type, const void __user *p, unsigned long size)
99{
100 unsigned long addr = (unsigned long)p;
101 return __access_ok(addr, size);
102}
103
104/*
105 * Uh, these should become the main single-value transfer routines ...
106 * They automatically use the right size if we just have the right
107 * pointer type ...
108 *
109 * As SuperH uses the same address space for kernel and user data, we
110 * can just do these as direct assignments.
111 *
112 * Careful to not
113 * (a) re-use the arguments for side effects (sizeof is ok)
114 * (b) require any knowledge of processes at this stage
115 */
116#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
117#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
118
119/*
120 * The "__xxx" versions do not do address space checking, useful when
121 * doing multiple accesses to the same area (the user has to do the
122 * checks by hand with "access_ok()")
123 */
124#define __put_user(x,ptr) \
125 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
126#define __get_user(x,ptr) \
127 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
128
129struct __large_struct { unsigned long buf[100]; };
130#define __m(x) (*(struct __large_struct __user *)(x))
131
132#define __get_user_size(x,ptr,size,retval) \
133do { \
134 retval = 0; \
135 __chk_user_ptr(ptr); \
136 switch (size) { \
137 case 1: \
138 __get_user_asm(x, ptr, retval, "b"); \
139 break; \
140 case 2: \
141 __get_user_asm(x, ptr, retval, "w"); \
142 break; \
143 case 4: \
144 __get_user_asm(x, ptr, retval, "l"); \
145 break; \
146 default: \
147 __get_user_unknown(); \
148 break; \
149 } \
150} while (0)
151
152#define __get_user_nocheck(x,ptr,size) \
153({ \
154 long __gu_err, __gu_val; \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900155 __typeof__(*(ptr)) *__pu_addr = (ptr); \
156 __get_user_size(__gu_val, (__pu_addr), (size), __gu_err); \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900157 (x) = (__typeof__(*(ptr)))__gu_val; \
158 __gu_err; \
159})
160
Paul Mundt9b01bd92007-11-10 19:55:50 +0900161#define __get_user_check(x,ptr,size) \
162({ \
163 long __gu_err, __gu_val; \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900164 __typeof__(*(ptr)) *__pu_addr = (ptr); \
165 __chk_user_ptr(__pu_addr); \
166 if (likely(__addr_ok((unsigned long)(__pu_addr)))) { \
167 __get_user_size(__gu_val, (__pu_addr), (size), __gu_err);\
168 } else { \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900169 __gu_err = -EFAULT; \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900170 __gu_val = 0; \
171 } \
172 (x) = (__typeof__(*(ptr)))__gu_val; \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900173 __gu_err; \
174})
Paul Mundt9b01bd92007-11-10 19:55:50 +0900175
176#define __get_user_asm(x, addr, err, insn) \
177({ \
178__asm__ __volatile__( \
179 "1:\n\t" \
180 "mov." insn " %2, %1\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900181 "2:\n" \
182 ".section .fixup,\"ax\"\n" \
183 "3:\n\t" \
184 "mov #0, %1\n\t" \
185 "mov.l 4f, %0\n\t" \
186 "jmp @%0\n\t" \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900187 " mov %3, %0\n\t" \
188 ".balign 4\n" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900189 "4: .long 2b\n\t" \
190 ".previous\n" \
191 ".section __ex_table,\"a\"\n\t" \
192 ".long 1b, 3b\n\t" \
193 ".previous" \
194 :"=&r" (err), "=&r" (x) \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900195 :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
Paul Mundt9b01bd92007-11-10 19:55:50 +0900196
197extern void __get_user_unknown(void);
198
199#define __put_user_size(x,ptr,size,retval) \
200do { \
201 retval = 0; \
202 __chk_user_ptr(ptr); \
203 switch (size) { \
204 case 1: \
205 __put_user_asm(x, ptr, retval, "b"); \
206 break; \
207 case 2: \
208 __put_user_asm(x, ptr, retval, "w"); \
209 break; \
210 case 4: \
211 __put_user_asm(x, ptr, retval, "l"); \
212 break; \
213 case 8: \
214 __put_user_u64(x, ptr, retval); \
215 break; \
216 default: \
217 __put_user_unknown(); \
218 } \
219} while (0)
220
221#define __put_user_nocheck(x,ptr,size) \
222({ \
223 long __pu_err; \
224 __put_user_size((x),(ptr),(size),__pu_err); \
225 __pu_err; \
226})
227
228#define __put_user_check(x,ptr,size) \
229({ \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900230 long __pu_err; \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900231 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
232 \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900233 if (likely(__addr_ok((unsigned long)__pu_addr))) \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900234 __put_user_size((x),__pu_addr,(size),__pu_err); \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900235 else \
236 __pu_err = -EFAULT; \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900237 __pu_err; \
238})
239
240#define __put_user_asm(x, addr, err, insn) \
241({ \
242__asm__ __volatile__( \
243 "1:\n\t" \
244 "mov." insn " %1, %2\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900245 "2:\n" \
246 ".section .fixup,\"ax\"\n" \
247 "3:\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900248 "mov.l 4f, %0\n\t" \
249 "jmp @%0\n\t" \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900250 " mov %3, %0\n\t" \
251 ".balign 4\n" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900252 "4: .long 2b\n\t" \
253 ".previous\n" \
254 ".section __ex_table,\"a\"\n\t" \
255 ".long 1b, 3b\n\t" \
256 ".previous" \
257 :"=&r" (err) \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900258 :"r" (x), "m" (__m(addr)), "i" (-EFAULT), "0" (err) \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900259 :"memory"); })
260
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900261#if defined(CONFIG_CPU_LITTLE_ENDIAN)
Paul Mundt9b01bd92007-11-10 19:55:50 +0900262#define __put_user_u64(val,addr,retval) \
263({ \
264__asm__ __volatile__( \
265 "1:\n\t" \
266 "mov.l %R1,%2\n\t" \
267 "mov.l %S1,%T2\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900268 "2:\n" \
269 ".section .fixup,\"ax\"\n" \
270 "3:\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900271 "mov.l 4f,%0\n\t" \
272 "jmp @%0\n\t" \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900273 " mov %3,%0\n\t" \
274 ".balign 4\n" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900275 "4: .long 2b\n\t" \
276 ".previous\n" \
277 ".section __ex_table,\"a\"\n\t" \
278 ".long 1b, 3b\n\t" \
279 ".previous" \
280 : "=r" (retval) \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900281 : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900282 : "memory"); })
283#else
284#define __put_user_u64(val,addr,retval) \
285({ \
286__asm__ __volatile__( \
287 "1:\n\t" \
288 "mov.l %S1,%2\n\t" \
289 "mov.l %R1,%T2\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900290 "2:\n" \
291 ".section .fixup,\"ax\"\n" \
292 "3:\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900293 "mov.l 4f,%0\n\t" \
294 "jmp @%0\n\t" \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900295 " mov %3,%0\n\t" \
296 ".balign 4\n" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900297 "4: .long 2b\n\t" \
298 ".previous\n" \
299 ".section __ex_table,\"a\"\n\t" \
300 ".long 1b, 3b\n\t" \
301 ".previous" \
302 : "=r" (retval) \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900303 : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900304 : "memory"); })
305#endif
306
307extern void __put_user_unknown(void);
308
309/* Generic arbitrary sized copy. */
310/* Return the number of bytes NOT copied */
311__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
312
313#define copy_to_user(to,from,n) ({ \
314void *__copy_to = (void *) (to); \
315__kernel_size_t __copy_size = (__kernel_size_t) (n); \
316__kernel_size_t __copy_res; \
317if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
318__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
319} else __copy_res = __copy_size; \
320__copy_res; })
321
322#define copy_from_user(to,from,n) ({ \
323void *__copy_to = (void *) (to); \
324void *__copy_from = (void *) (from); \
325__kernel_size_t __copy_size = (__kernel_size_t) (n); \
326__kernel_size_t __copy_res; \
327if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
328__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
329} else __copy_res = __copy_size; \
330__copy_res; })
331
332static __always_inline unsigned long
333__copy_from_user(void *to, const void __user *from, unsigned long n)
334{
335 return __copy_user(to, (__force void *)from, n);
336}
337
338static __always_inline unsigned long __must_check
339__copy_to_user(void __user *to, const void *from, unsigned long n)
340{
341 return __copy_user((__force void *)to, from, n);
342}
343
344#define __copy_to_user_inatomic __copy_to_user
345#define __copy_from_user_inatomic __copy_from_user
346
347/*
348 * Clear the area and return remaining number of bytes
349 * (on failure. Usually it's 0.)
350 */
351extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
352
353#define clear_user(addr,n) ({ \
354void * __cl_addr = (addr); \
355unsigned long __cl_size = (n); \
356if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
357__cl_size = __clear_user(__cl_addr, __cl_size); \
358__cl_size; })
359
360static __inline__ int
361__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
362{
363 __kernel_size_t res;
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900364 unsigned long __dummy, _d, _s, _c;
Paul Mundt9b01bd92007-11-10 19:55:50 +0900365
366 __asm__ __volatile__(
367 "9:\n"
368 "mov.b @%2+, %1\n\t"
369 "cmp/eq #0, %1\n\t"
370 "bt/s 2f\n"
371 "1:\n"
372 "mov.b %1, @%3\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900373 "dt %4\n\t"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900374 "bf/s 9b\n\t"
375 " add #1, %3\n\t"
376 "2:\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900377 "sub %4, %0\n"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900378 "3:\n"
379 ".section .fixup,\"ax\"\n"
380 "4:\n\t"
381 "mov.l 5f, %1\n\t"
382 "jmp @%1\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900383 " mov %9, %0\n\t"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900384 ".balign 4\n"
385 "5: .long 3b\n"
386 ".previous\n"
387 ".section __ex_table,\"a\"\n"
388 " .balign 4\n"
389 " .long 9b,4b\n"
390 ".previous"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900391 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
392 : "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
Paul Mundt9b01bd92007-11-10 19:55:50 +0900393 "i" (-EFAULT)
394 : "memory", "t");
395
396 return res;
397}
398
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900399/**
400 * strncpy_from_user: - Copy a NUL terminated string from userspace.
401 * @dst: Destination address, in kernel space. This buffer must be at
402 * least @count bytes long.
403 * @src: Source address, in user space.
404 * @count: Maximum number of bytes to copy, including the trailing NUL.
405 *
406 * Copies a NUL-terminated string from userspace to kernel space.
407 *
408 * On success, returns the length of the string (not including the trailing
409 * NUL).
410 *
411 * If access to userspace fails, returns -EFAULT (some data may have been
412 * copied).
413 *
414 * If @count is smaller than the length of the string, copies @count bytes
415 * and returns @count.
416 */
Paul Mundt9b01bd92007-11-10 19:55:50 +0900417#define strncpy_from_user(dest,src,count) ({ \
418unsigned long __sfu_src = (unsigned long) (src); \
419int __sfu_count = (int) (count); \
420long __sfu_res = -EFAULT; \
421if(__access_ok(__sfu_src, __sfu_count)) { \
422__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
423} __sfu_res; })
424
425/*
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900426 * Return the size of a string (including the ending 0 even when we have
427 * exceeded the maximum string length).
Paul Mundt9b01bd92007-11-10 19:55:50 +0900428 */
429static __inline__ long __strnlen_user(const char __user *__s, long __n)
430{
431 unsigned long res;
432 unsigned long __dummy;
433
434 __asm__ __volatile__(
Paul Mundt9b01bd92007-11-10 19:55:50 +0900435 "1:\t"
436 "mov.b @(%0,%3), %1\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900437 "cmp/eq %4, %0\n\t"
438 "bt/s 2f\n\t"
439 " add #1, %0\n\t"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900440 "tst %1, %1\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900441 "bf 1b\n\t"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900442 "2:\n"
443 ".section .fixup,\"ax\"\n"
444 "3:\n\t"
445 "mov.l 4f, %1\n\t"
446 "jmp @%1\n\t"
447 " mov #0, %0\n"
448 ".balign 4\n"
449 "4: .long 2b\n"
450 ".previous\n"
451 ".section __ex_table,\"a\"\n"
452 " .balign 4\n"
453 " .long 1b,3b\n"
454 ".previous"
455 : "=z" (res), "=&r" (__dummy)
456 : "0" (0), "r" (__s), "r" (__n)
457 : "t");
458 return res;
459}
460
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900461/**
462 * strnlen_user: - Get the size of a string in user space.
463 * @s: The string to measure.
464 * @n: The maximum valid length
465 *
466 * Context: User context only. This function may sleep.
467 *
468 * Get the size of a NUL-terminated string in user space.
469 *
470 * Returns the size of the string INCLUDING the terminating NUL.
471 * On exception, returns 0.
472 * If the string is too long, returns a value greater than @n.
473 */
Paul Mundt9b01bd92007-11-10 19:55:50 +0900474static __inline__ long strnlen_user(const char __user *s, long n)
475{
476 if (!__addr_ok(s))
477 return 0;
478 else
479 return __strnlen_user(s, n);
480}
481
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900482/**
483 * strlen_user: - Get the size of a string in user space.
484 * @str: The string to measure.
485 *
486 * Context: User context only. This function may sleep.
487 *
488 * Get the size of a NUL-terminated string in user space.
489 *
490 * Returns the size of the string INCLUDING the terminating NUL.
491 * On exception, returns 0.
492 *
493 * If there is a limit on the length of a valid string, you may wish to
494 * consider using strnlen_user() instead.
495 */
Paul Mundt9b01bd92007-11-10 19:55:50 +0900496#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
497
498/*
499 * The exception table consists of pairs of addresses: the first is the
500 * address of an instruction that is allowed to fault, and the second is
501 * the address at which the program should continue. No registers are
502 * modified, so it is entirely up to the continuation code to figure out
503 * what to do.
504 *
505 * All the routines below use bits of fixup code that are out of line
506 * with the main instruction path. This means when everything is well,
507 * we don't even have to jump over them. Further, they do not intrude
508 * on our cache or tlb entries.
509 */
510
511struct exception_table_entry
512{
513 unsigned long insn, fixup;
514};
515
516extern int fixup_exception(struct pt_regs *regs);
517
518#endif /* __ASM_SH_UACCESS_H */