| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: uaccess.h,v 1.35 2002/02/09 19:49:31 davem Exp $ */ | 
|  | 2 | #ifndef _ASM_UACCESS_H | 
|  | 3 | #define _ASM_UACCESS_H | 
|  | 4 |  | 
|  | 5 | /* | 
|  | 6 | * User space memory access functions | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | #ifdef __KERNEL__ | 
|  | 10 | #include <linux/compiler.h> | 
|  | 11 | #include <linux/sched.h> | 
|  | 12 | #include <linux/string.h> | 
|  | 13 | #include <asm/a.out.h> | 
|  | 14 | #include <asm/asi.h> | 
|  | 15 | #include <asm/system.h> | 
|  | 16 | #include <asm/spitfire.h> | 
|  | 17 | #include <asm-generic/uaccess.h> | 
|  | 18 | #endif | 
|  | 19 |  | 
|  | 20 | #ifndef __ASSEMBLY__ | 
|  | 21 |  | 
|  | 22 | /* | 
|  | 23 | * Sparc64 is segmented, though more like the M68K than the I386. | 
|  | 24 | * We use the secondary ASI to address user memory, which references a | 
|  | 25 | * completely different VM map, thus there is zero chance of the user | 
|  | 26 | * doing something queer and tricking us into poking kernel memory. | 
|  | 27 | * | 
|  | 28 | * What is left here is basically what is needed for the other parts of | 
|  | 29 | * the kernel that expect to be able to manipulate, erum, "segments". | 
|  | 30 | * Or perhaps more properly, permissions. | 
|  | 31 | * | 
|  | 32 | * "For historical reasons, these macros are grossly misnamed." -Linus | 
|  | 33 | */ | 
|  | 34 |  | 
|  | 35 | #define KERNEL_DS   ((mm_segment_t) { ASI_P }) | 
|  | 36 | #define USER_DS     ((mm_segment_t) { ASI_AIUS })	/* har har har */ | 
|  | 37 |  | 
|  | 38 | #define VERIFY_READ	0 | 
|  | 39 | #define VERIFY_WRITE	1 | 
|  | 40 |  | 
|  | 41 | #define get_fs() ((mm_segment_t) { get_thread_current_ds() }) | 
|  | 42 | #define get_ds() (KERNEL_DS) | 
|  | 43 |  | 
|  | 44 | #define segment_eq(a,b)  ((a).seg == (b).seg) | 
|  | 45 |  | 
|  | 46 | #define set_fs(val)								\ | 
|  | 47 | do {										\ | 
|  | 48 | set_thread_current_ds((val).seg);					\ | 
|  | 49 | __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));	\ | 
|  | 50 | } while(0) | 
|  | 51 |  | 
|  | 52 | static inline int __access_ok(const void __user * addr, unsigned long size) | 
|  | 53 | { | 
|  | 54 | return 1; | 
|  | 55 | } | 
|  | 56 |  | 
|  | 57 | static inline int access_ok(int type, const void __user * addr, unsigned long size) | 
|  | 58 | { | 
|  | 59 | return 1; | 
|  | 60 | } | 
|  | 61 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | /* | 
|  | 63 | * The exception table consists of pairs of addresses: the first is the | 
|  | 64 | * address of an instruction that is allowed to fault, and the second is | 
|  | 65 | * the address at which the program should continue.  No registers are | 
|  | 66 | * modified, so it is entirely up to the continuation code to figure out | 
|  | 67 | * what to do. | 
|  | 68 | * | 
|  | 69 | * All the routines below use bits of fixup code that are out of line | 
|  | 70 | * with the main instruction path.  This means when everything is well, | 
|  | 71 | * we don't even have to jump over them.  Further, they do not intrude | 
|  | 72 | * on our cache or tlb entries. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | */ | 
|  | 74 |  | 
| David S. Miller | 8cf14af | 2005-09-28 20:21:11 -0700 | [diff] [blame] | 75 | struct exception_table_entry { | 
|  | 76 | unsigned int insn, fixup; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | }; | 
|  | 78 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | extern void __ret_efault(void); | 
| David S. Miller | 5fd2975 | 2005-09-28 20:41:45 -0700 | [diff] [blame] | 80 | extern void __retl_efault(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 |  | 
|  | 82 | /* Uh, these should become the main single-value transfer routines.. | 
|  | 83 | * They automatically use the right size if we just have the right | 
|  | 84 | * pointer type.. | 
|  | 85 | * | 
|  | 86 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | 
|  | 87 | * and yet we don't want to do any pointers, because that is too much | 
|  | 88 | * of a performance impact. Thus we have a few rather ugly macros here, | 
|  | 89 | * and hide all the ugliness from the user. | 
|  | 90 | */ | 
|  | 91 | #define put_user(x,ptr) ({ \ | 
|  | 92 | unsigned long __pu_addr = (unsigned long)(ptr); \ | 
|  | 93 | __chk_user_ptr(ptr); \ | 
|  | 94 | __put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) | 
|  | 95 |  | 
|  | 96 | #define get_user(x,ptr) ({ \ | 
|  | 97 | unsigned long __gu_addr = (unsigned long)(ptr); \ | 
|  | 98 | __chk_user_ptr(ptr); \ | 
|  | 99 | __get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) | 
|  | 100 |  | 
|  | 101 | #define __put_user(x,ptr) put_user(x,ptr) | 
|  | 102 | #define __get_user(x,ptr) get_user(x,ptr) | 
|  | 103 |  | 
|  | 104 | struct __large_struct { unsigned long buf[100]; }; | 
|  | 105 | #define __m(x) ((struct __large_struct *)(x)) | 
|  | 106 |  | 
|  | 107 | #define __put_user_nocheck(data,addr,size) ({ \ | 
|  | 108 | register int __pu_ret; \ | 
|  | 109 | switch (size) { \ | 
|  | 110 | case 1: __put_user_asm(data,b,addr,__pu_ret); break; \ | 
|  | 111 | case 2: __put_user_asm(data,h,addr,__pu_ret); break; \ | 
|  | 112 | case 4: __put_user_asm(data,w,addr,__pu_ret); break; \ | 
|  | 113 | case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ | 
|  | 114 | default: __pu_ret = __put_user_bad(); break; \ | 
|  | 115 | } __pu_ret; }) | 
|  | 116 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | #define __put_user_asm(x,size,addr,ret)					\ | 
|  | 118 | __asm__ __volatile__(							\ | 
|  | 119 | "/* Put user asm, inline. */\n"					\ | 
|  | 120 | "1:\t"	"st"#size "a %1, [%2] %%asi\n\t"				\ | 
|  | 121 | "clr	%0\n"							\ | 
|  | 122 | "2:\n\n\t"								\ | 
|  | 123 | ".section .fixup,#alloc,#execinstr\n\t"				\ | 
|  | 124 | ".align	4\n"							\ | 
|  | 125 | "3:\n\t"								\ | 
|  | 126 | "b	2b\n\t"							\ | 
|  | 127 | " mov	%3, %0\n\n\t"						\ | 
|  | 128 | ".previous\n\t"							\ | 
| David S. Miller | 4d000d5 | 2006-03-04 23:23:56 -0800 | [diff] [blame] | 129 | ".section __ex_table,\"a\"\n\t"					\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | ".align	4\n\t"							\ | 
|  | 131 | ".word	1b, 3b\n\t"						\ | 
|  | 132 | ".previous\n\n\t"						\ | 
|  | 133 | : "=r" (ret) : "r" (x), "r" (__m(addr)),				\ | 
|  | 134 | "i" (-EFAULT)) | 
|  | 135 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | extern int __put_user_bad(void); | 
|  | 137 |  | 
|  | 138 | #define __get_user_nocheck(data,addr,size,type) ({ \ | 
|  | 139 | register int __gu_ret; \ | 
|  | 140 | register unsigned long __gu_val; \ | 
|  | 141 | switch (size) { \ | 
|  | 142 | case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ | 
|  | 143 | case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ | 
|  | 144 | case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \ | 
|  | 145 | case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \ | 
|  | 146 | default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ | 
|  | 147 | } data = (type) __gu_val; __gu_ret; }) | 
|  | 148 |  | 
|  | 149 | #define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \ | 
|  | 150 | register unsigned long __gu_val __asm__ ("l1"); \ | 
|  | 151 | switch (size) { \ | 
|  | 152 | case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ | 
|  | 153 | case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ | 
|  | 154 | case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \ | 
|  | 155 | case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \ | 
|  | 156 | default: if (__get_user_bad()) return retval; \ | 
|  | 157 | } data = (type) __gu_val; }) | 
|  | 158 |  | 
|  | 159 | #define __get_user_asm(x,size,addr,ret)					\ | 
|  | 160 | __asm__ __volatile__(							\ | 
|  | 161 | "/* Get user asm, inline. */\n"					\ | 
|  | 162 | "1:\t"	"ld"#size "a [%2] %%asi, %1\n\t"				\ | 
|  | 163 | "clr	%0\n"							\ | 
|  | 164 | "2:\n\n\t"								\ | 
|  | 165 | ".section .fixup,#alloc,#execinstr\n\t"				\ | 
|  | 166 | ".align	4\n"							\ | 
|  | 167 | "3:\n\t"								\ | 
|  | 168 | "clr	%1\n\t"							\ | 
|  | 169 | "b	2b\n\t"							\ | 
|  | 170 | " mov	%3, %0\n\n\t"						\ | 
|  | 171 | ".previous\n\t"							\ | 
| David S. Miller | 4d000d5 | 2006-03-04 23:23:56 -0800 | [diff] [blame] | 172 | ".section __ex_table,\"a\"\n\t"					\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | ".align	4\n\t"							\ | 
|  | 174 | ".word	1b, 3b\n\n\t"						\ | 
|  | 175 | ".previous\n\t"							\ | 
|  | 176 | : "=r" (ret), "=r" (x) : "r" (__m(addr)),			\ | 
|  | 177 | "i" (-EFAULT)) | 
|  | 178 |  | 
|  | 179 | #define __get_user_asm_ret(x,size,addr,retval)				\ | 
|  | 180 | if (__builtin_constant_p(retval) && retval == -EFAULT)			\ | 
|  | 181 | __asm__ __volatile__(							\ | 
|  | 182 | "/* Get user asm ret, inline. */\n"				\ | 
|  | 183 | "1:\t"	"ld"#size "a [%1] %%asi, %0\n\n\t"				\ | 
| David S. Miller | 4d000d5 | 2006-03-04 23:23:56 -0800 | [diff] [blame] | 184 | ".section __ex_table,\"a\"\n\t"					\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | ".align	4\n\t"							\ | 
|  | 186 | ".word	1b,__ret_efault\n\n\t"					\ | 
|  | 187 | ".previous\n\t"							\ | 
|  | 188 | : "=r" (x) : "r" (__m(addr)));					\ | 
|  | 189 | else									\ | 
|  | 190 | __asm__ __volatile__(							\ | 
|  | 191 | "/* Get user asm ret, inline. */\n"				\ | 
|  | 192 | "1:\t"	"ld"#size "a [%1] %%asi, %0\n\n\t"				\ | 
|  | 193 | ".section .fixup,#alloc,#execinstr\n\t"				\ | 
|  | 194 | ".align	4\n"							\ | 
|  | 195 | "3:\n\t"								\ | 
|  | 196 | "ret\n\t"							\ | 
|  | 197 | " restore %%g0, %2, %%o0\n\n\t"					\ | 
|  | 198 | ".previous\n\t"							\ | 
| David S. Miller | 4d000d5 | 2006-03-04 23:23:56 -0800 | [diff] [blame] | 199 | ".section __ex_table,\"a\"\n\t"					\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | ".align	4\n\t"							\ | 
|  | 201 | ".word	1b, 3b\n\n\t"						\ | 
|  | 202 | ".previous\n\t"							\ | 
|  | 203 | : "=r" (x) : "r" (__m(addr)), "i" (retval)) | 
|  | 204 |  | 
|  | 205 | extern int __get_user_bad(void); | 
|  | 206 |  | 
|  | 207 | extern unsigned long __must_check ___copy_from_user(void *to, | 
|  | 208 | const void __user *from, | 
|  | 209 | unsigned long size); | 
|  | 210 | extern unsigned long copy_from_user_fixup(void *to, const void __user *from, | 
|  | 211 | unsigned long size); | 
|  | 212 | static inline unsigned long __must_check | 
|  | 213 | copy_from_user(void *to, const void __user *from, unsigned long size) | 
|  | 214 | { | 
|  | 215 | unsigned long ret = ___copy_from_user(to, from, size); | 
|  | 216 |  | 
| David S. Miller | efdc1e2 | 2005-09-28 21:06:47 -0700 | [diff] [blame] | 217 | if (unlikely(ret)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | ret = copy_from_user_fixup(to, from, size); | 
|  | 219 | return ret; | 
|  | 220 | } | 
|  | 221 | #define __copy_from_user copy_from_user | 
|  | 222 |  | 
|  | 223 | extern unsigned long __must_check ___copy_to_user(void __user *to, | 
|  | 224 | const void *from, | 
|  | 225 | unsigned long size); | 
|  | 226 | extern unsigned long copy_to_user_fixup(void __user *to, const void *from, | 
|  | 227 | unsigned long size); | 
|  | 228 | static inline unsigned long __must_check | 
|  | 229 | copy_to_user(void __user *to, const void *from, unsigned long size) | 
|  | 230 | { | 
|  | 231 | unsigned long ret = ___copy_to_user(to, from, size); | 
|  | 232 |  | 
| David S. Miller | efdc1e2 | 2005-09-28 21:06:47 -0700 | [diff] [blame] | 233 | if (unlikely(ret)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | ret = copy_to_user_fixup(to, from, size); | 
|  | 235 | return ret; | 
|  | 236 | } | 
|  | 237 | #define __copy_to_user copy_to_user | 
|  | 238 |  | 
|  | 239 | extern unsigned long __must_check ___copy_in_user(void __user *to, | 
|  | 240 | const void __user *from, | 
|  | 241 | unsigned long size); | 
|  | 242 | extern unsigned long copy_in_user_fixup(void __user *to, void __user *from, | 
|  | 243 | unsigned long size); | 
|  | 244 | static inline unsigned long __must_check | 
|  | 245 | copy_in_user(void __user *to, void __user *from, unsigned long size) | 
|  | 246 | { | 
|  | 247 | unsigned long ret = ___copy_in_user(to, from, size); | 
|  | 248 |  | 
| David S. Miller | efdc1e2 | 2005-09-28 21:06:47 -0700 | [diff] [blame] | 249 | if (unlikely(ret)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | ret = copy_in_user_fixup(to, from, size); | 
|  | 251 | return ret; | 
|  | 252 | } | 
|  | 253 | #define __copy_in_user copy_in_user | 
|  | 254 |  | 
| David S. Miller | c857e3f | 2006-02-17 10:35:23 -0800 | [diff] [blame] | 255 | extern unsigned long __must_check __clear_user(void __user *, unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 |  | 
|  | 257 | #define clear_user __clear_user | 
|  | 258 |  | 
|  | 259 | extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count); | 
|  | 260 |  | 
|  | 261 | #define strncpy_from_user __strncpy_from_user | 
|  | 262 |  | 
|  | 263 | extern long __strlen_user(const char __user *); | 
|  | 264 | extern long __strnlen_user(const char __user *, long len); | 
|  | 265 |  | 
|  | 266 | #define strlen_user __strlen_user | 
|  | 267 | #define strnlen_user __strnlen_user | 
|  | 268 | #define __copy_to_user_inatomic __copy_to_user | 
|  | 269 | #define __copy_from_user_inatomic __copy_from_user | 
|  | 270 |  | 
|  | 271 | #endif  /* __ASSEMBLY__ */ | 
|  | 272 |  | 
|  | 273 | #endif /* _ASM_UACCESS_H */ |