| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 1 | #ifndef _ARCH_POWERPC_UACCESS_H | 
|  | 2 | #define _ARCH_POWERPC_UACCESS_H | 
|  | 3 |  | 
|  | 4 | #ifdef __KERNEL__ | 
|  | 5 | #ifndef __ASSEMBLY__ | 
|  | 6 |  | 
|  | 7 | #include <linux/sched.h> | 
|  | 8 | #include <linux/errno.h> | 
| Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 9 | #include <asm/asm-compat.h> | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 10 | #include <asm/processor.h> | 
| Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 11 | #include <asm/page.h> | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 12 |  | 
|  | 13 | #define VERIFY_READ	0 | 
|  | 14 | #define VERIFY_WRITE	1 | 
|  | 15 |  | 
|  | 16 | /* | 
|  | 17 | * The fs value determines whether argument validity checking should be | 
|  | 18 | * performed or not.  If get_fs() == USER_DS, checking is performed, with | 
|  | 19 | * get_fs() == KERNEL_DS, checking is bypassed. | 
|  | 20 | * | 
|  | 21 | * For historical reasons, these macros are grossly misnamed. | 
|  | 22 | * | 
|  | 23 | * The fs/ds values are now the highest legal address in the "segment". | 
|  | 24 | * This simplifies the checking in the routines below. | 
|  | 25 | */ | 
|  | 26 |  | 
|  | 27 | #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) }) | 
|  | 28 |  | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 29 | #define KERNEL_DS	MAKE_MM_SEG(~0UL) | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 30 | #ifdef __powerpc64__ | 
|  | 31 | /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ | 
|  | 32 | #define USER_DS		MAKE_MM_SEG(TASK_SIZE_USER64 - 1) | 
|  | 33 | #else | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 34 | #define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1) | 
|  | 35 | #endif | 
|  | 36 |  | 
|  | 37 | #define get_ds()	(KERNEL_DS) | 
|  | 38 | #define get_fs()	(current->thread.fs) | 
|  | 39 | #define set_fs(val)	(current->thread.fs = (val)) | 
|  | 40 |  | 
|  | 41 | #define segment_eq(a, b)	((a).seg == (b).seg) | 
|  | 42 |  | 
|  | 43 | #ifdef __powerpc64__ | 
|  | 44 | /* | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 45 | * This check is sufficient because there is a large enough | 
|  | 46 | * gap between user addresses and the kernel addresses | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 47 | */ | 
|  | 48 | #define __access_ok(addr, size, segment)	\ | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 49 | (((addr) <= (segment).seg) && ((size) <= (segment).seg)) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 50 |  | 
|  | 51 | #else | 
|  | 52 |  | 
|  | 53 | #define __access_ok(addr, size, segment)	\ | 
|  | 54 | (((addr) <= (segment).seg) &&		\ | 
|  | 55 | (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr))))) | 
|  | 56 |  | 
|  | 57 | #endif | 
|  | 58 |  | 
|  | 59 | #define access_ok(type, addr, size)		\ | 
|  | 60 | (__chk_user_ptr(addr),			\ | 
|  | 61 | __access_ok((__force unsigned long)(addr), (size), get_fs())) | 
|  | 62 |  | 
|  | 63 | /* | 
|  | 64 | * The exception table consists of pairs of addresses: the first is the | 
|  | 65 | * address of an instruction that is allowed to fault, and the second is | 
|  | 66 | * the address at which the program should continue.  No registers are | 
|  | 67 | * modified, so it is entirely up to the continuation code to figure out | 
|  | 68 | * what to do. | 
|  | 69 | * | 
|  | 70 | * All the routines below use bits of fixup code that are out of line | 
|  | 71 | * with the main instruction path.  This means when everything is well, | 
|  | 72 | * we don't even have to jump over them.  Further, they do not intrude | 
|  | 73 | * on our cache or tlb entries. | 
|  | 74 | */ | 
|  | 75 |  | 
|  | 76 | struct exception_table_entry { | 
|  | 77 | unsigned long insn; | 
|  | 78 | unsigned long fixup; | 
|  | 79 | }; | 
|  | 80 |  | 
|  | 81 | /* | 
|  | 82 | * These are the main single-value transfer routines.  They automatically | 
|  | 83 | * use the right size if we just have the right pointer type. | 
|  | 84 | * | 
|  | 85 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | 
|  | 86 | * and yet we don't want to do any pointers, because that is too much | 
|  | 87 | * of a performance impact. Thus we have a few rather ugly macros here, | 
|  | 88 | * and hide all the ugliness from the user. | 
|  | 89 | * | 
|  | 90 | * The "__xxx" versions of the user access functions are versions that | 
|  | 91 | * do not verify the address space, that must have been done previously | 
|  | 92 | * with a separate "access_ok()" call (this is used when we do multiple | 
|  | 93 | * accesses to the same area of user memory). | 
|  | 94 | * | 
|  | 95 | * As we use the same address space for kernel and user data on the | 
|  | 96 | * PowerPC, we can just do these as direct assignments.  (Of course, the | 
|  | 97 | * exception handling means that it's no longer "just"...) | 
|  | 98 | * | 
|  | 99 | * The "user64" versions of the user access functions are versions that | 
|  | 100 | * allow access of 64-bit data. The "get_user" functions do not | 
|  | 101 | * properly handle 64-bit data because the value gets down cast to a long. | 
|  | 102 | * The "put_user" functions already handle 64-bit data properly but we add | 
|  | 103 | * "user64" versions for completeness | 
|  | 104 | */ | 
|  | 105 | #define get_user(x, ptr) \ | 
|  | 106 | __get_user_check((x), (ptr), sizeof(*(ptr))) | 
|  | 107 | #define put_user(x, ptr) \ | 
|  | 108 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | 
|  | 109 |  | 
|  | 110 | #define __get_user(x, ptr) \ | 
|  | 111 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | 
|  | 112 | #define __put_user(x, ptr) \ | 
|  | 113 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | 
| Benjamin Herrenschmidt | e68c825 | 2007-04-11 16:13:19 +1000 | [diff] [blame] | 114 |  | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 115 | #ifndef __powerpc64__ | 
|  | 116 | #define __get_user64(x, ptr) \ | 
|  | 117 | __get_user64_nocheck((x), (ptr), sizeof(*(ptr))) | 
|  | 118 | #define __put_user64(x, ptr) __put_user(x, ptr) | 
|  | 119 | #endif | 
|  | 120 |  | 
| Benjamin Herrenschmidt | e68c825 | 2007-04-11 16:13:19 +1000 | [diff] [blame] | 121 | #define __get_user_inatomic(x, ptr) \ | 
|  | 122 | __get_user_nosleep((x), (ptr), sizeof(*(ptr))) | 
|  | 123 | #define __put_user_inatomic(x, ptr) \ | 
|  | 124 | __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | 
|  | 125 |  | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 126 | #define __get_user_unaligned __get_user | 
|  | 127 | #define __put_user_unaligned __put_user | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 128 |  | 
|  | 129 | extern long __put_user_bad(void); | 
|  | 130 |  | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 131 | /* | 
|  | 132 | * We don't tell gcc that we are accessing memory, but this is OK | 
|  | 133 | * because we do not write to any memory gcc knows about, so there | 
|  | 134 | * are no aliasing issues. | 
|  | 135 | */ | 
|  | 136 | #define __put_user_asm(x, addr, err, op)			\ | 
|  | 137 | __asm__ __volatile__(					\ | 
|  | 138 | "1:	" op " %1,0(%2)	# put_user\n"		\ | 
|  | 139 | "2:\n"						\ | 
|  | 140 | ".section .fixup,\"ax\"\n"			\ | 
|  | 141 | "3:	li %0,%3\n"				\ | 
|  | 142 | "	b 2b\n"					\ | 
|  | 143 | ".previous\n"					\ | 
|  | 144 | ".section __ex_table,\"a\"\n"			\ | 
| Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 145 | PPC_LONG_ALIGN "\n"			\ | 
| David Gibson | 3ddfbcf | 2005-11-10 12:56:55 +1100 | [diff] [blame] | 146 | PPC_LONG "1b,3b\n"			\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 147 | ".previous"					\ | 
|  | 148 | : "=r" (err)					\ | 
| Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 149 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 150 |  | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 151 | #ifdef __powerpc64__ | 
|  | 152 | #define __put_user_asm2(x, ptr, retval)				\ | 
|  | 153 | __put_user_asm(x, ptr, retval, "std") | 
|  | 154 | #else /* __powerpc64__ */ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 155 | #define __put_user_asm2(x, addr, err)				\ | 
|  | 156 | __asm__ __volatile__(					\ | 
|  | 157 | "1:	stw %1,0(%2)\n"				\ | 
|  | 158 | "2:	stw %1+1,4(%2)\n"			\ | 
|  | 159 | "3:\n"						\ | 
|  | 160 | ".section .fixup,\"ax\"\n"			\ | 
|  | 161 | "4:	li %0,%3\n"				\ | 
|  | 162 | "	b 3b\n"					\ | 
|  | 163 | ".previous\n"					\ | 
|  | 164 | ".section __ex_table,\"a\"\n"			\ | 
| Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 165 | PPC_LONG_ALIGN "\n"			\ | 
| David Gibson | 3ddfbcf | 2005-11-10 12:56:55 +1100 | [diff] [blame] | 166 | PPC_LONG "1b,4b\n"			\ | 
|  | 167 | PPC_LONG "2b,4b\n"			\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 168 | ".previous"					\ | 
|  | 169 | : "=r" (err)					\ | 
| Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 170 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 171 | #endif /* __powerpc64__ */ | 
|  | 172 |  | 
|  | 173 | #define __put_user_size(x, ptr, size, retval)			\ | 
|  | 174 | do {								\ | 
|  | 175 | retval = 0;						\ | 
|  | 176 | switch (size) {						\ | 
|  | 177 | case 1: __put_user_asm(x, ptr, retval, "stb"); break;	\ | 
|  | 178 | case 2: __put_user_asm(x, ptr, retval, "sth"); break;	\ | 
|  | 179 | case 4: __put_user_asm(x, ptr, retval, "stw"); break;	\ | 
|  | 180 | case 8: __put_user_asm2(x, ptr, retval); break;	\ | 
|  | 181 | default: __put_user_bad();				\ | 
|  | 182 | }							\ | 
|  | 183 | } while (0) | 
|  | 184 |  | 
|  | 185 | #define __put_user_nocheck(x, ptr, size)			\ | 
|  | 186 | ({								\ | 
|  | 187 | long __pu_err;						\ | 
| Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 188 | __typeof__(*(ptr)) __user *__pu_addr = (ptr);		\ | 
|  | 189 | if (!is_kernel_addr((unsigned long)__pu_addr))		\ | 
|  | 190 | might_sleep();					\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 191 | __chk_user_ptr(ptr);					\ | 
| Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 192 | __put_user_size((x), __pu_addr, (size), __pu_err);	\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 193 | __pu_err;						\ | 
|  | 194 | }) | 
|  | 195 |  | 
|  | 196 | #define __put_user_check(x, ptr, size)					\ | 
|  | 197 | ({									\ | 
|  | 198 | long __pu_err = -EFAULT;					\ | 
|  | 199 | __typeof__(*(ptr)) __user *__pu_addr = (ptr);			\ | 
|  | 200 | might_sleep();							\ | 
|  | 201 | if (access_ok(VERIFY_WRITE, __pu_addr, size))			\ | 
|  | 202 | __put_user_size((x), __pu_addr, (size), __pu_err);	\ | 
|  | 203 | __pu_err;							\ | 
|  | 204 | }) | 
|  | 205 |  | 
| Benjamin Herrenschmidt | e68c825 | 2007-04-11 16:13:19 +1000 | [diff] [blame] | 206 | #define __put_user_nosleep(x, ptr, size)			\ | 
|  | 207 | ({								\ | 
|  | 208 | long __pu_err;						\ | 
|  | 209 | __typeof__(*(ptr)) __user *__pu_addr = (ptr);		\ | 
|  | 210 | __chk_user_ptr(ptr);					\ | 
|  | 211 | __put_user_size((x), __pu_addr, (size), __pu_err);	\ | 
|  | 212 | __pu_err;						\ | 
|  | 213 | }) | 
|  | 214 |  | 
|  | 215 |  | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 216 | extern long __get_user_bad(void); | 
|  | 217 |  | 
|  | 218 | #define __get_user_asm(x, addr, err, op)		\ | 
|  | 219 | __asm__ __volatile__(				\ | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 220 | "1:	"op" %1,0(%2)	# get_user\n"	\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 221 | "2:\n"					\ | 
|  | 222 | ".section .fixup,\"ax\"\n"		\ | 
|  | 223 | "3:	li %0,%3\n"			\ | 
|  | 224 | "	li %1,0\n"			\ | 
|  | 225 | "	b 2b\n"				\ | 
|  | 226 | ".previous\n"				\ | 
|  | 227 | ".section __ex_table,\"a\"\n"		\ | 
| Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 228 | PPC_LONG_ALIGN "\n"		\ | 
| David Gibson | 3ddfbcf | 2005-11-10 12:56:55 +1100 | [diff] [blame] | 229 | PPC_LONG "1b,3b\n"		\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 230 | ".previous"				\ | 
|  | 231 | : "=r" (err), "=r" (x)			\ | 
| Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 232 | : "b" (addr), "i" (-EFAULT), "0" (err)) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 233 |  | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 234 | #ifdef __powerpc64__ | 
|  | 235 | #define __get_user_asm2(x, addr, err)			\ | 
|  | 236 | __get_user_asm(x, addr, err, "ld") | 
|  | 237 | #else /* __powerpc64__ */ | 
|  | 238 | #define __get_user_asm2(x, addr, err)			\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 239 | __asm__ __volatile__(				\ | 
|  | 240 | "1:	lwz %1,0(%2)\n"			\ | 
|  | 241 | "2:	lwz %1+1,4(%2)\n"		\ | 
|  | 242 | "3:\n"					\ | 
|  | 243 | ".section .fixup,\"ax\"\n"		\ | 
|  | 244 | "4:	li %0,%3\n"			\ | 
|  | 245 | "	li %1,0\n"			\ | 
|  | 246 | "	li %1+1,0\n"			\ | 
|  | 247 | "	b 3b\n"				\ | 
|  | 248 | ".previous\n"				\ | 
|  | 249 | ".section __ex_table,\"a\"\n"		\ | 
| Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 250 | PPC_LONG_ALIGN "\n"		\ | 
| David Gibson | 3ddfbcf | 2005-11-10 12:56:55 +1100 | [diff] [blame] | 251 | PPC_LONG "1b,4b\n"		\ | 
|  | 252 | PPC_LONG "2b,4b\n"		\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 253 | ".previous"				\ | 
|  | 254 | : "=r" (err), "=&r" (x)			\ | 
| Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 255 | : "b" (addr), "i" (-EFAULT), "0" (err)) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 256 | #endif /* __powerpc64__ */ | 
|  | 257 |  | 
|  | 258 | #define __get_user_size(x, ptr, size, retval)			\ | 
|  | 259 | do {								\ | 
|  | 260 | retval = 0;						\ | 
|  | 261 | __chk_user_ptr(ptr);					\ | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 262 | if (size > sizeof(x))					\ | 
|  | 263 | (x) = __get_user_bad();				\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 264 | switch (size) {						\ | 
|  | 265 | case 1: __get_user_asm(x, ptr, retval, "lbz"); break;	\ | 
|  | 266 | case 2: __get_user_asm(x, ptr, retval, "lhz"); break;	\ | 
|  | 267 | case 4: __get_user_asm(x, ptr, retval, "lwz"); break;	\ | 
|  | 268 | case 8: __get_user_asm2(x, ptr, retval);  break;	\ | 
|  | 269 | default: (x) = __get_user_bad();			\ | 
|  | 270 | }							\ | 
|  | 271 | } while (0) | 
|  | 272 |  | 
|  | 273 | #define __get_user_nocheck(x, ptr, size)			\ | 
|  | 274 | ({								\ | 
|  | 275 | long __gu_err;						\ | 
|  | 276 | unsigned long __gu_val;					\ | 
| Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 277 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 278 | __chk_user_ptr(ptr);					\ | 
| Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 279 | if (!is_kernel_addr((unsigned long)__gu_addr))		\ | 
|  | 280 | might_sleep();					\ | 
|  | 281 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 282 | (x) = (__typeof__(*(ptr)))__gu_val;			\ | 
|  | 283 | __gu_err;						\ | 
|  | 284 | }) | 
|  | 285 |  | 
|  | 286 | #ifndef __powerpc64__ | 
|  | 287 | #define __get_user64_nocheck(x, ptr, size)			\ | 
|  | 288 | ({								\ | 
|  | 289 | long __gu_err;						\ | 
|  | 290 | long long __gu_val;					\ | 
| Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 291 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 292 | __chk_user_ptr(ptr);					\ | 
| Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 293 | if (!is_kernel_addr((unsigned long)__gu_addr))		\ | 
|  | 294 | might_sleep();					\ | 
|  | 295 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 296 | (x) = (__typeof__(*(ptr)))__gu_val;			\ | 
|  | 297 | __gu_err;						\ | 
|  | 298 | }) | 
|  | 299 | #endif /* __powerpc64__ */ | 
|  | 300 |  | 
|  | 301 | #define __get_user_check(x, ptr, size)					\ | 
|  | 302 | ({									\ | 
|  | 303 | long __gu_err = -EFAULT;					\ | 
|  | 304 | unsigned long  __gu_val = 0;					\ | 
|  | 305 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\ | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 306 | might_sleep();							\ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 307 | if (access_ok(VERIFY_READ, __gu_addr, (size)))			\ | 
|  | 308 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\ | 
|  | 309 | (x) = (__typeof__(*(ptr)))__gu_val;				\ | 
|  | 310 | __gu_err;							\ | 
|  | 311 | }) | 
|  | 312 |  | 
| Benjamin Herrenschmidt | e68c825 | 2007-04-11 16:13:19 +1000 | [diff] [blame] | 313 | #define __get_user_nosleep(x, ptr, size)			\ | 
|  | 314 | ({								\ | 
|  | 315 | long __gu_err;						\ | 
|  | 316 | unsigned long __gu_val;					\ | 
|  | 317 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\ | 
|  | 318 | __chk_user_ptr(ptr);					\ | 
|  | 319 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\ | 
|  | 320 | (x) = (__typeof__(*(ptr)))__gu_val;			\ | 
|  | 321 | __gu_err;						\ | 
|  | 322 | }) | 
|  | 323 |  | 
|  | 324 |  | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 325 | /* more complex routines */ | 
|  | 326 |  | 
|  | 327 | extern unsigned long __copy_tofrom_user(void __user *to, | 
|  | 328 | const void __user *from, unsigned long size); | 
|  | 329 |  | 
|  | 330 | #ifndef __powerpc64__ | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 331 |  | 
| Adrian Bunk | 4cfbdff | 2006-12-01 12:53:18 +0100 | [diff] [blame] | 332 | static inline unsigned long copy_from_user(void *to, | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 333 | const void __user *from, unsigned long n) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 334 | { | 
|  | 335 | unsigned long over; | 
|  | 336 |  | 
|  | 337 | if (access_ok(VERIFY_READ, from, n)) | 
|  | 338 | return __copy_tofrom_user((__force void __user *)to, from, n); | 
|  | 339 | if ((unsigned long)from < TASK_SIZE) { | 
|  | 340 | over = (unsigned long)from + n - TASK_SIZE; | 
|  | 341 | return __copy_tofrom_user((__force void __user *)to, from, | 
|  | 342 | n - over) + over; | 
|  | 343 | } | 
|  | 344 | return n; | 
|  | 345 | } | 
|  | 346 |  | 
| Adrian Bunk | 4cfbdff | 2006-12-01 12:53:18 +0100 | [diff] [blame] | 347 | static inline unsigned long copy_to_user(void __user *to, | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 348 | const void *from, unsigned long n) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 349 | { | 
|  | 350 | unsigned long over; | 
|  | 351 |  | 
|  | 352 | if (access_ok(VERIFY_WRITE, to, n)) | 
|  | 353 | return __copy_tofrom_user(to, (__force void __user *)from, n); | 
|  | 354 | if ((unsigned long)to < TASK_SIZE) { | 
|  | 355 | over = (unsigned long)to + n - TASK_SIZE; | 
|  | 356 | return __copy_tofrom_user(to, (__force void __user *)from, | 
|  | 357 | n - over) + over; | 
|  | 358 | } | 
|  | 359 | return n; | 
|  | 360 | } | 
|  | 361 |  | 
|  | 362 | #else /* __powerpc64__ */ | 
|  | 363 |  | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 364 | #define __copy_in_user(to, from, size) \ | 
|  | 365 | __copy_tofrom_user((to), (from), (size)) | 
|  | 366 |  | 
|  | 367 | extern unsigned long copy_from_user(void *to, const void __user *from, | 
|  | 368 | unsigned long n); | 
|  | 369 | extern unsigned long copy_to_user(void __user *to, const void *from, | 
|  | 370 | unsigned long n); | 
|  | 371 | extern unsigned long copy_in_user(void __user *to, const void __user *from, | 
|  | 372 | unsigned long n); | 
|  | 373 |  | 
| Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 374 | #endif /* __powerpc64__ */ | 
|  | 375 |  | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 376 | static inline unsigned long __copy_from_user_inatomic(void *to, | 
|  | 377 | const void __user *from, unsigned long n) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 378 | { | 
|  | 379 | if (__builtin_constant_p(n) && (n <= 8)) { | 
| Nate Case | 9c8387a | 2008-05-13 06:14:14 +1000 | [diff] [blame] | 380 | unsigned long ret = 1; | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 381 |  | 
|  | 382 | switch (n) { | 
|  | 383 | case 1: | 
|  | 384 | __get_user_size(*(u8 *)to, from, 1, ret); | 
|  | 385 | break; | 
|  | 386 | case 2: | 
|  | 387 | __get_user_size(*(u16 *)to, from, 2, ret); | 
|  | 388 | break; | 
|  | 389 | case 4: | 
|  | 390 | __get_user_size(*(u32 *)to, from, 4, ret); | 
|  | 391 | break; | 
|  | 392 | case 8: | 
|  | 393 | __get_user_size(*(u64 *)to, from, 8, ret); | 
|  | 394 | break; | 
|  | 395 | } | 
| Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 396 | if (ret == 0) | 
|  | 397 | return 0; | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 398 | } | 
| Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 399 | return __copy_tofrom_user((__force void __user *)to, from, n); | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 400 | } | 
|  | 401 |  | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 402 | static inline unsigned long __copy_to_user_inatomic(void __user *to, | 
|  | 403 | const void *from, unsigned long n) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 404 | { | 
|  | 405 | if (__builtin_constant_p(n) && (n <= 8)) { | 
| Nate Case | 9c8387a | 2008-05-13 06:14:14 +1000 | [diff] [blame] | 406 | unsigned long ret = 1; | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 407 |  | 
|  | 408 | switch (n) { | 
|  | 409 | case 1: | 
|  | 410 | __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); | 
|  | 411 | break; | 
|  | 412 | case 2: | 
|  | 413 | __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); | 
|  | 414 | break; | 
|  | 415 | case 4: | 
|  | 416 | __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); | 
|  | 417 | break; | 
|  | 418 | case 8: | 
|  | 419 | __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); | 
|  | 420 | break; | 
|  | 421 | } | 
| Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 422 | if (ret == 0) | 
|  | 423 | return 0; | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 424 | } | 
| Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 425 | return __copy_tofrom_user(to, (__force const void __user *)from, n); | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 426 | } | 
|  | 427 |  | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 428 | static inline unsigned long __copy_from_user(void *to, | 
|  | 429 | const void __user *from, unsigned long size) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 430 | { | 
|  | 431 | might_sleep(); | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 432 | return __copy_from_user_inatomic(to, from, size); | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 433 | } | 
|  | 434 |  | 
| Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 435 | static inline unsigned long __copy_to_user(void __user *to, | 
|  | 436 | const void *from, unsigned long size) | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 437 | { | 
|  | 438 | might_sleep(); | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 439 | return __copy_to_user_inatomic(to, from, size); | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 440 | } | 
|  | 441 |  | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 442 | extern unsigned long __clear_user(void __user *addr, unsigned long size); | 
|  | 443 |  | 
|  | 444 | static inline unsigned long clear_user(void __user *addr, unsigned long size) | 
|  | 445 | { | 
|  | 446 | might_sleep(); | 
|  | 447 | if (likely(access_ok(VERIFY_WRITE, addr, size))) | 
|  | 448 | return __clear_user(addr, size); | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 449 | if ((unsigned long)addr < TASK_SIZE) { | 
|  | 450 | unsigned long over = (unsigned long)addr + size - TASK_SIZE; | 
|  | 451 | return __clear_user(addr, size - over) + over; | 
|  | 452 | } | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 453 | return size; | 
|  | 454 | } | 
|  | 455 |  | 
|  | 456 | extern int __strncpy_from_user(char *dst, const char __user *src, long count); | 
|  | 457 |  | 
|  | 458 | static inline long strncpy_from_user(char *dst, const char __user *src, | 
|  | 459 | long count) | 
|  | 460 | { | 
|  | 461 | might_sleep(); | 
|  | 462 | if (likely(access_ok(VERIFY_READ, src, 1))) | 
|  | 463 | return __strncpy_from_user(dst, src, count); | 
|  | 464 | return -EFAULT; | 
|  | 465 | } | 
|  | 466 |  | 
|  | 467 | /* | 
|  | 468 | * Return the size of a string (including the ending 0) | 
|  | 469 | * | 
|  | 470 | * Return 0 for error | 
|  | 471 | */ | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 472 | extern int __strnlen_user(const char __user *str, long len, unsigned long top); | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 473 |  | 
|  | 474 | /* | 
|  | 475 | * Returns the length of the string at str (including the null byte), | 
|  | 476 | * or 0 if we hit a page we can't access, | 
|  | 477 | * or something > len if we didn't find a null byte. | 
|  | 478 | * | 
|  | 479 | * The `top' parameter to __strnlen_user is to make sure that | 
|  | 480 | * we can never overflow from the user area into kernel space. | 
|  | 481 | */ | 
|  | 482 | static inline int strnlen_user(const char __user *str, long len) | 
|  | 483 | { | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 484 | unsigned long top = current->thread.fs.seg; | 
|  | 485 |  | 
|  | 486 | if ((unsigned long)str > top) | 
|  | 487 | return 0; | 
|  | 488 | return __strnlen_user(str, len, top); | 
| Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 489 | } | 
|  | 490 |  | 
|  | 491 | #define strlen_user(str)	strnlen_user((str), 0x7ffffffe) | 
|  | 492 |  | 
|  | 493 | #endif  /* __ASSEMBLY__ */ | 
|  | 494 | #endif /* __KERNEL__ */ | 
|  | 495 |  | 
|  | 496 | #endif	/* _ARCH_POWERPC_UACCESS_H */ |