blob: 883b216c60b2d51059985c716da365aef5e9bfbf [file] [log] [blame]
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +01001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * User address space access functions.
3 * The non inlined parts of asm-i386/uaccess.h are here.
4 *
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/mm.h>
9#include <linux/highmem.h>
10#include <linux/blkdev.h>
11#include <linux/module.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070012#include <linux/backing-dev.h>
Thomas Gleixnerb6a8b312007-04-02 14:25:31 +020013#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/uaccess.h>
15#include <asm/mmx.h>
H. Peter Anvin9c675122012-04-20 12:19:52 -070016#include <asm/asm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Thomas Petazzoni8bfcb392008-08-18 12:33:20 +020018#ifdef CONFIG_X86_INTEL_USERCOPY
19/*
20 * Alignment at which movsl is preferred for bulk memory copies.
21 */
22struct movsl_mask movsl_mask __read_mostly;
23#endif
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
26{
27#ifdef CONFIG_X86_INTEL_USERCOPY
28 if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
29 return 0;
30#endif
31 return 1;
32}
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +010033#define movsl_is_ok(a1, a2, n) \
34 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * Zero Userspace
38 */
39
40#define __do_clear_user(addr,size) \
41do { \
42 int __d0; \
Nick Piggin3ee1afa2008-09-10 13:37:17 +020043 might_fault(); \
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +010044 __asm__ __volatile__( \
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 "0: rep; stosl\n" \
46 " movl %2,%0\n" \
47 "1: rep; stosb\n" \
48 "2:\n" \
49 ".section .fixup,\"ax\"\n" \
50 "3: lea 0(%2,%0,4),%0\n" \
51 " jmp 2b\n" \
52 ".previous\n" \
H. Peter Anvin287774412008-02-04 16:47:57 +010053 _ASM_EXTABLE(0b,3b) \
54 _ASM_EXTABLE(1b,2b) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 : "=&c"(size), "=&D" (__d0) \
56 : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
57} while (0)
58
59/**
60 * clear_user: - Zero a block of memory in user space.
61 * @to: Destination address, in user space.
62 * @n: Number of bytes to zero.
63 *
64 * Zero a block of memory in user space.
65 *
66 * Returns number of bytes that could not be cleared.
67 * On success, this will be zero.
68 */
69unsigned long
70clear_user(void __user *to, unsigned long n)
71{
Ingo Molnar1d18ef42008-09-11 20:53:21 +020072 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 if (access_ok(VERIFY_WRITE, to, n))
74 __do_clear_user(to, n);
75 return n;
76}
Alexey Dobriyan129f6942005-06-23 00:08:33 -070077EXPORT_SYMBOL(clear_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79/**
80 * __clear_user: - Zero a block of memory in user space, with less checking.
81 * @to: Destination address, in user space.
82 * @n: Number of bytes to zero.
83 *
84 * Zero a block of memory in user space. Caller must check
85 * the specified block with access_ok() before calling this function.
86 *
87 * Returns number of bytes that could not be cleared.
88 * On success, this will be zero.
89 */
90unsigned long
91__clear_user(void __user *to, unsigned long n)
92{
93 __do_clear_user(to, n);
94 return n;
95}
Alexey Dobriyan129f6942005-06-23 00:08:33 -070096EXPORT_SYMBOL(__clear_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/**
Randy Dunlap35e38a62006-10-11 01:22:09 -070099 * strnlen_user: - Get the size of a string in user space.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 * @s: The string to measure.
101 * @n: The maximum valid length
102 *
103 * Get the size of a NUL-terminated string in user space.
104 *
105 * Returns the size of the string INCLUDING the terminating NUL.
106 * On exception, returns 0.
107 * If the string is too long, returns a value greater than @n.
108 */
109long strnlen_user(const char __user *s, long n)
110{
111 unsigned long mask = -__addr_ok(s);
112 unsigned long res, tmp;
113
Nick Piggin3ee1afa2008-09-10 13:37:17 +0200114 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116 __asm__ __volatile__(
117 " testl %0, %0\n"
118 " jz 3f\n"
119 " andl %0,%%ecx\n"
120 "0: repne; scasb\n"
121 " setne %%al\n"
122 " subl %%ecx,%0\n"
123 " addl %0,%%eax\n"
124 "1:\n"
125 ".section .fixup,\"ax\"\n"
126 "2: xorl %%eax,%%eax\n"
127 " jmp 1b\n"
128 "3: movb $1,%%al\n"
129 " jmp 1b\n"
130 ".previous\n"
H. Peter Anvin9c675122012-04-20 12:19:52 -0700131 _ASM_EXTABLE(0b,2b)
Andi Kleene0a96122009-01-16 15:22:11 +0100132 :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 :"0" (n), "1" (s), "2" (0), "3" (mask)
134 :"cc");
135 return res & mask;
136}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700137EXPORT_SYMBOL(strnlen_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139#ifdef CONFIG_X86_INTEL_USERCOPY
140static unsigned long
141__copy_user_intel(void __user *to, const void *from, unsigned long size)
142{
143 int d0, d1;
144 __asm__ __volatile__(
145 " .align 2,0x90\n"
146 "1: movl 32(%4), %%eax\n"
147 " cmpl $67, %0\n"
148 " jbe 3f\n"
149 "2: movl 64(%4), %%eax\n"
150 " .align 2,0x90\n"
151 "3: movl 0(%4), %%eax\n"
152 "4: movl 4(%4), %%edx\n"
153 "5: movl %%eax, 0(%3)\n"
154 "6: movl %%edx, 4(%3)\n"
155 "7: movl 8(%4), %%eax\n"
156 "8: movl 12(%4),%%edx\n"
157 "9: movl %%eax, 8(%3)\n"
158 "10: movl %%edx, 12(%3)\n"
159 "11: movl 16(%4), %%eax\n"
160 "12: movl 20(%4), %%edx\n"
161 "13: movl %%eax, 16(%3)\n"
162 "14: movl %%edx, 20(%3)\n"
163 "15: movl 24(%4), %%eax\n"
164 "16: movl 28(%4), %%edx\n"
165 "17: movl %%eax, 24(%3)\n"
166 "18: movl %%edx, 28(%3)\n"
167 "19: movl 32(%4), %%eax\n"
168 "20: movl 36(%4), %%edx\n"
169 "21: movl %%eax, 32(%3)\n"
170 "22: movl %%edx, 36(%3)\n"
171 "23: movl 40(%4), %%eax\n"
172 "24: movl 44(%4), %%edx\n"
173 "25: movl %%eax, 40(%3)\n"
174 "26: movl %%edx, 44(%3)\n"
175 "27: movl 48(%4), %%eax\n"
176 "28: movl 52(%4), %%edx\n"
177 "29: movl %%eax, 48(%3)\n"
178 "30: movl %%edx, 52(%3)\n"
179 "31: movl 56(%4), %%eax\n"
180 "32: movl 60(%4), %%edx\n"
181 "33: movl %%eax, 56(%3)\n"
182 "34: movl %%edx, 60(%3)\n"
183 " addl $-64, %0\n"
184 " addl $64, %4\n"
185 " addl $64, %3\n"
186 " cmpl $63, %0\n"
187 " ja 1b\n"
188 "35: movl %0, %%eax\n"
189 " shrl $2, %0\n"
190 " andl $3, %%eax\n"
191 " cld\n"
192 "99: rep; movsl\n"
193 "36: movl %%eax, %0\n"
194 "37: rep; movsb\n"
195 "100:\n"
196 ".section .fixup,\"ax\"\n"
197 "101: lea 0(%%eax,%0,4),%0\n"
198 " jmp 100b\n"
199 ".previous\n"
H. Peter Anvin9c675122012-04-20 12:19:52 -0700200 _ASM_EXTABLE(1b,100b)
201 _ASM_EXTABLE(2b,100b)
202 _ASM_EXTABLE(3b,100b)
203 _ASM_EXTABLE(4b,100b)
204 _ASM_EXTABLE(5b,100b)
205 _ASM_EXTABLE(6b,100b)
206 _ASM_EXTABLE(7b,100b)
207 _ASM_EXTABLE(8b,100b)
208 _ASM_EXTABLE(9b,100b)
209 _ASM_EXTABLE(10b,100b)
210 _ASM_EXTABLE(11b,100b)
211 _ASM_EXTABLE(12b,100b)
212 _ASM_EXTABLE(13b,100b)
213 _ASM_EXTABLE(14b,100b)
214 _ASM_EXTABLE(15b,100b)
215 _ASM_EXTABLE(16b,100b)
216 _ASM_EXTABLE(17b,100b)
217 _ASM_EXTABLE(18b,100b)
218 _ASM_EXTABLE(19b,100b)
219 _ASM_EXTABLE(20b,100b)
220 _ASM_EXTABLE(21b,100b)
221 _ASM_EXTABLE(22b,100b)
222 _ASM_EXTABLE(23b,100b)
223 _ASM_EXTABLE(24b,100b)
224 _ASM_EXTABLE(25b,100b)
225 _ASM_EXTABLE(26b,100b)
226 _ASM_EXTABLE(27b,100b)
227 _ASM_EXTABLE(28b,100b)
228 _ASM_EXTABLE(29b,100b)
229 _ASM_EXTABLE(30b,100b)
230 _ASM_EXTABLE(31b,100b)
231 _ASM_EXTABLE(32b,100b)
232 _ASM_EXTABLE(33b,100b)
233 _ASM_EXTABLE(34b,100b)
234 _ASM_EXTABLE(35b,100b)
235 _ASM_EXTABLE(36b,100b)
236 _ASM_EXTABLE(37b,100b)
237 _ASM_EXTABLE(99b,101b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 : "=&c"(size), "=&D" (d0), "=&S" (d1)
239 : "1"(to), "2"(from), "0"(size)
240 : "eax", "edx", "memory");
241 return size;
242}
243
244static unsigned long
245__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
246{
247 int d0, d1;
248 __asm__ __volatile__(
249 " .align 2,0x90\n"
250 "0: movl 32(%4), %%eax\n"
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100251 " cmpl $67, %0\n"
252 " jbe 2f\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 "1: movl 64(%4), %%eax\n"
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100254 " .align 2,0x90\n"
255 "2: movl 0(%4), %%eax\n"
256 "21: movl 4(%4), %%edx\n"
257 " movl %%eax, 0(%3)\n"
258 " movl %%edx, 4(%3)\n"
259 "3: movl 8(%4), %%eax\n"
260 "31: movl 12(%4),%%edx\n"
261 " movl %%eax, 8(%3)\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 " movl %%edx, 12(%3)\n"
263 "4: movl 16(%4), %%eax\n"
264 "41: movl 20(%4), %%edx\n"
265 " movl %%eax, 16(%3)\n"
266 " movl %%edx, 20(%3)\n"
267 "10: movl 24(%4), %%eax\n"
268 "51: movl 28(%4), %%edx\n"
269 " movl %%eax, 24(%3)\n"
270 " movl %%edx, 28(%3)\n"
271 "11: movl 32(%4), %%eax\n"
272 "61: movl 36(%4), %%edx\n"
273 " movl %%eax, 32(%3)\n"
274 " movl %%edx, 36(%3)\n"
275 "12: movl 40(%4), %%eax\n"
276 "71: movl 44(%4), %%edx\n"
277 " movl %%eax, 40(%3)\n"
278 " movl %%edx, 44(%3)\n"
279 "13: movl 48(%4), %%eax\n"
280 "81: movl 52(%4), %%edx\n"
281 " movl %%eax, 48(%3)\n"
282 " movl %%edx, 52(%3)\n"
283 "14: movl 56(%4), %%eax\n"
284 "91: movl 60(%4), %%edx\n"
285 " movl %%eax, 56(%3)\n"
286 " movl %%edx, 60(%3)\n"
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100287 " addl $-64, %0\n"
288 " addl $64, %4\n"
289 " addl $64, %3\n"
290 " cmpl $63, %0\n"
291 " ja 0b\n"
292 "5: movl %0, %%eax\n"
293 " shrl $2, %0\n"
294 " andl $3, %%eax\n"
295 " cld\n"
296 "6: rep; movsl\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 " movl %%eax,%0\n"
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100298 "7: rep; movsb\n"
299 "8:\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 ".section .fixup,\"ax\"\n"
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100301 "9: lea 0(%%eax,%0,4),%0\n"
302 "16: pushl %0\n"
303 " pushl %%eax\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 " xorl %%eax,%%eax\n"
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100305 " rep; stosb\n"
306 " popl %%eax\n"
307 " popl %0\n"
308 " jmp 8b\n"
309 ".previous\n"
H. Peter Anvin9c675122012-04-20 12:19:52 -0700310 _ASM_EXTABLE(0b,16b)
311 _ASM_EXTABLE(1b,16b)
312 _ASM_EXTABLE(2b,16b)
313 _ASM_EXTABLE(21b,16b)
314 _ASM_EXTABLE(3b,16b)
315 _ASM_EXTABLE(31b,16b)
316 _ASM_EXTABLE(4b,16b)
317 _ASM_EXTABLE(41b,16b)
318 _ASM_EXTABLE(10b,16b)
319 _ASM_EXTABLE(51b,16b)
320 _ASM_EXTABLE(11b,16b)
321 _ASM_EXTABLE(61b,16b)
322 _ASM_EXTABLE(12b,16b)
323 _ASM_EXTABLE(71b,16b)
324 _ASM_EXTABLE(13b,16b)
325 _ASM_EXTABLE(81b,16b)
326 _ASM_EXTABLE(14b,16b)
327 _ASM_EXTABLE(91b,16b)
328 _ASM_EXTABLE(6b,9b)
329 _ASM_EXTABLE(7b,16b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 : "=&c"(size), "=&D" (d0), "=&S" (d1)
331 : "1"(to), "2"(from), "0"(size)
332 : "eax", "edx", "memory");
333 return size;
334}
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700335
336/*
337 * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
338 * hyoshiok@miraclelinux.com
339 */
340
341static unsigned long __copy_user_zeroing_intel_nocache(void *to,
342 const void __user *from, unsigned long size)
343{
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100344 int d0, d1;
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700345
346 __asm__ __volatile__(
347 " .align 2,0x90\n"
348 "0: movl 32(%4), %%eax\n"
349 " cmpl $67, %0\n"
350 " jbe 2f\n"
351 "1: movl 64(%4), %%eax\n"
352 " .align 2,0x90\n"
353 "2: movl 0(%4), %%eax\n"
354 "21: movl 4(%4), %%edx\n"
355 " movnti %%eax, 0(%3)\n"
356 " movnti %%edx, 4(%3)\n"
357 "3: movl 8(%4), %%eax\n"
358 "31: movl 12(%4),%%edx\n"
359 " movnti %%eax, 8(%3)\n"
360 " movnti %%edx, 12(%3)\n"
361 "4: movl 16(%4), %%eax\n"
362 "41: movl 20(%4), %%edx\n"
363 " movnti %%eax, 16(%3)\n"
364 " movnti %%edx, 20(%3)\n"
365 "10: movl 24(%4), %%eax\n"
366 "51: movl 28(%4), %%edx\n"
367 " movnti %%eax, 24(%3)\n"
368 " movnti %%edx, 28(%3)\n"
369 "11: movl 32(%4), %%eax\n"
370 "61: movl 36(%4), %%edx\n"
371 " movnti %%eax, 32(%3)\n"
372 " movnti %%edx, 36(%3)\n"
373 "12: movl 40(%4), %%eax\n"
374 "71: movl 44(%4), %%edx\n"
375 " movnti %%eax, 40(%3)\n"
376 " movnti %%edx, 44(%3)\n"
377 "13: movl 48(%4), %%eax\n"
378 "81: movl 52(%4), %%edx\n"
379 " movnti %%eax, 48(%3)\n"
380 " movnti %%edx, 52(%3)\n"
381 "14: movl 56(%4), %%eax\n"
382 "91: movl 60(%4), %%edx\n"
383 " movnti %%eax, 56(%3)\n"
384 " movnti %%edx, 60(%3)\n"
385 " addl $-64, %0\n"
386 " addl $64, %4\n"
387 " addl $64, %3\n"
388 " cmpl $63, %0\n"
389 " ja 0b\n"
390 " sfence \n"
391 "5: movl %0, %%eax\n"
392 " shrl $2, %0\n"
393 " andl $3, %%eax\n"
394 " cld\n"
395 "6: rep; movsl\n"
396 " movl %%eax,%0\n"
397 "7: rep; movsb\n"
398 "8:\n"
399 ".section .fixup,\"ax\"\n"
400 "9: lea 0(%%eax,%0,4),%0\n"
401 "16: pushl %0\n"
402 " pushl %%eax\n"
403 " xorl %%eax,%%eax\n"
404 " rep; stosb\n"
405 " popl %%eax\n"
406 " popl %0\n"
407 " jmp 8b\n"
408 ".previous\n"
H. Peter Anvin9c675122012-04-20 12:19:52 -0700409 _ASM_EXTABLE(0b,16b)
410 _ASM_EXTABLE(1b,16b)
411 _ASM_EXTABLE(2b,16b)
412 _ASM_EXTABLE(21b,16b)
413 _ASM_EXTABLE(3b,16b)
414 _ASM_EXTABLE(31b,16b)
415 _ASM_EXTABLE(4b,16b)
416 _ASM_EXTABLE(41b,16b)
417 _ASM_EXTABLE(10b,16b)
418 _ASM_EXTABLE(51b,16b)
419 _ASM_EXTABLE(11b,16b)
420 _ASM_EXTABLE(61b,16b)
421 _ASM_EXTABLE(12b,16b)
422 _ASM_EXTABLE(71b,16b)
423 _ASM_EXTABLE(13b,16b)
424 _ASM_EXTABLE(81b,16b)
425 _ASM_EXTABLE(14b,16b)
426 _ASM_EXTABLE(91b,16b)
427 _ASM_EXTABLE(6b,9b)
428 _ASM_EXTABLE(7b,16b)
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700429 : "=&c"(size), "=&D" (d0), "=&S" (d1)
430 : "1"(to), "2"(from), "0"(size)
431 : "eax", "edx", "memory");
432 return size;
433}
434
NeilBrown7c12d812006-06-25 05:48:02 -0700435static unsigned long __copy_user_intel_nocache(void *to,
436 const void __user *from, unsigned long size)
437{
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100438 int d0, d1;
NeilBrown7c12d812006-06-25 05:48:02 -0700439
440 __asm__ __volatile__(
441 " .align 2,0x90\n"
442 "0: movl 32(%4), %%eax\n"
443 " cmpl $67, %0\n"
444 " jbe 2f\n"
445 "1: movl 64(%4), %%eax\n"
446 " .align 2,0x90\n"
447 "2: movl 0(%4), %%eax\n"
448 "21: movl 4(%4), %%edx\n"
449 " movnti %%eax, 0(%3)\n"
450 " movnti %%edx, 4(%3)\n"
451 "3: movl 8(%4), %%eax\n"
452 "31: movl 12(%4),%%edx\n"
453 " movnti %%eax, 8(%3)\n"
454 " movnti %%edx, 12(%3)\n"
455 "4: movl 16(%4), %%eax\n"
456 "41: movl 20(%4), %%edx\n"
457 " movnti %%eax, 16(%3)\n"
458 " movnti %%edx, 20(%3)\n"
459 "10: movl 24(%4), %%eax\n"
460 "51: movl 28(%4), %%edx\n"
461 " movnti %%eax, 24(%3)\n"
462 " movnti %%edx, 28(%3)\n"
463 "11: movl 32(%4), %%eax\n"
464 "61: movl 36(%4), %%edx\n"
465 " movnti %%eax, 32(%3)\n"
466 " movnti %%edx, 36(%3)\n"
467 "12: movl 40(%4), %%eax\n"
468 "71: movl 44(%4), %%edx\n"
469 " movnti %%eax, 40(%3)\n"
470 " movnti %%edx, 44(%3)\n"
471 "13: movl 48(%4), %%eax\n"
472 "81: movl 52(%4), %%edx\n"
473 " movnti %%eax, 48(%3)\n"
474 " movnti %%edx, 52(%3)\n"
475 "14: movl 56(%4), %%eax\n"
476 "91: movl 60(%4), %%edx\n"
477 " movnti %%eax, 56(%3)\n"
478 " movnti %%edx, 60(%3)\n"
479 " addl $-64, %0\n"
480 " addl $64, %4\n"
481 " addl $64, %3\n"
482 " cmpl $63, %0\n"
483 " ja 0b\n"
484 " sfence \n"
485 "5: movl %0, %%eax\n"
486 " shrl $2, %0\n"
487 " andl $3, %%eax\n"
488 " cld\n"
489 "6: rep; movsl\n"
490 " movl %%eax,%0\n"
491 "7: rep; movsb\n"
492 "8:\n"
493 ".section .fixup,\"ax\"\n"
494 "9: lea 0(%%eax,%0,4),%0\n"
495 "16: jmp 8b\n"
496 ".previous\n"
H. Peter Anvin9c675122012-04-20 12:19:52 -0700497 _ASM_EXTABLE(0b,16b)
498 _ASM_EXTABLE(1b,16b)
499 _ASM_EXTABLE(2b,16b)
500 _ASM_EXTABLE(21b,16b)
501 _ASM_EXTABLE(3b,16b)
502 _ASM_EXTABLE(31b,16b)
503 _ASM_EXTABLE(4b,16b)
504 _ASM_EXTABLE(41b,16b)
505 _ASM_EXTABLE(10b,16b)
506 _ASM_EXTABLE(51b,16b)
507 _ASM_EXTABLE(11b,16b)
508 _ASM_EXTABLE(61b,16b)
509 _ASM_EXTABLE(12b,16b)
510 _ASM_EXTABLE(71b,16b)
511 _ASM_EXTABLE(13b,16b)
512 _ASM_EXTABLE(81b,16b)
513 _ASM_EXTABLE(14b,16b)
514 _ASM_EXTABLE(91b,16b)
515 _ASM_EXTABLE(6b,9b)
516 _ASM_EXTABLE(7b,16b)
NeilBrown7c12d812006-06-25 05:48:02 -0700517 : "=&c"(size), "=&D" (d0), "=&S" (d1)
518 : "1"(to), "2"(from), "0"(size)
519 : "eax", "edx", "memory");
520 return size;
521}
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523#else
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525/*
526 * Leave these declared but undefined. They should not be any references to
527 * them
528 */
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700529unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
530 unsigned long size);
531unsigned long __copy_user_intel(void __user *to, const void *from,
532 unsigned long size);
533unsigned long __copy_user_zeroing_intel_nocache(void *to,
534 const void __user *from, unsigned long size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535#endif /* CONFIG_X86_INTEL_USERCOPY */
536
537/* Generic arbitrary sized copy. */
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100538#define __copy_user(to, from, size) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539do { \
540 int __d0, __d1, __d2; \
541 __asm__ __volatile__( \
542 " cmp $7,%0\n" \
543 " jbe 1f\n" \
544 " movl %1,%0\n" \
545 " negl %0\n" \
546 " andl $7,%0\n" \
547 " subl %0,%3\n" \
548 "4: rep; movsb\n" \
549 " movl %3,%0\n" \
550 " shrl $2,%0\n" \
551 " andl $3,%3\n" \
552 " .align 2,0x90\n" \
553 "0: rep; movsl\n" \
554 " movl %3,%0\n" \
555 "1: rep; movsb\n" \
556 "2:\n" \
557 ".section .fixup,\"ax\"\n" \
558 "5: addl %3,%0\n" \
559 " jmp 2b\n" \
560 "3: lea 0(%3,%0,4),%0\n" \
561 " jmp 2b\n" \
562 ".previous\n" \
H. Peter Anvin9c675122012-04-20 12:19:52 -0700563 _ASM_EXTABLE(4b,5b) \
564 _ASM_EXTABLE(0b,3b) \
565 _ASM_EXTABLE(1b,2b) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
567 : "3"(size), "0"(size), "1"(to), "2"(from) \
568 : "memory"); \
569} while (0)
570
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100571#define __copy_user_zeroing(to, from, size) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572do { \
573 int __d0, __d1, __d2; \
574 __asm__ __volatile__( \
575 " cmp $7,%0\n" \
576 " jbe 1f\n" \
577 " movl %1,%0\n" \
578 " negl %0\n" \
579 " andl $7,%0\n" \
580 " subl %0,%3\n" \
581 "4: rep; movsb\n" \
582 " movl %3,%0\n" \
583 " shrl $2,%0\n" \
584 " andl $3,%3\n" \
585 " .align 2,0x90\n" \
586 "0: rep; movsl\n" \
587 " movl %3,%0\n" \
588 "1: rep; movsb\n" \
589 "2:\n" \
590 ".section .fixup,\"ax\"\n" \
591 "5: addl %3,%0\n" \
592 " jmp 6f\n" \
593 "3: lea 0(%3,%0,4),%0\n" \
594 "6: pushl %0\n" \
595 " pushl %%eax\n" \
596 " xorl %%eax,%%eax\n" \
597 " rep; stosb\n" \
598 " popl %%eax\n" \
599 " popl %0\n" \
600 " jmp 2b\n" \
601 ".previous\n" \
H. Peter Anvin9c675122012-04-20 12:19:52 -0700602 _ASM_EXTABLE(4b,5b) \
603 _ASM_EXTABLE(0b,3b) \
604 _ASM_EXTABLE(1b,6b) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
606 : "3"(size), "0"(size), "1"(to), "2"(from) \
607 : "memory"); \
608} while (0)
609
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700610unsigned long __copy_to_user_ll(void __user *to, const void *from,
611 unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613#ifndef CONFIG_X86_WP_WORKS_OK
614 if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100615 ((unsigned long)to) < TASK_SIZE) {
Thomas Gleixnerb6a8b312007-04-02 14:25:31 +0200616 /*
617 * When we are in an atomic section (see
618 * mm/filemap.c:file_read_actor), return the full
619 * length to take the slow path.
620 */
621 if (in_atomic())
622 return n;
623
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100624 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 * CPU does not honor the WP bit when writing
626 * from supervisory mode, and due to preemption or SMP,
627 * the page tables can change at any time.
628 * Do it manually. Manfred <manfred@colorfullife.com>
629 */
630 while (n) {
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100631 unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 unsigned long len = PAGE_SIZE - offset;
633 int retval;
634 struct page *pg;
635 void *maddr;
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 if (len > n)
638 len = n;
639
640survive:
641 down_read(&current->mm->mmap_sem);
642 retval = get_user_pages(current, current->mm,
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100643 (unsigned long)to, 1, 1, 0, &pg, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700645 if (retval == -ENOMEM && is_global_init(current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 up_read(&current->mm->mmap_sem);
Jens Axboe8aa7e842009-07-09 14:52:32 +0200647 congestion_wait(BLK_RW_ASYNC, HZ/50);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 goto survive;
649 }
650
651 if (retval != 1) {
652 up_read(&current->mm->mmap_sem);
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100653 break;
654 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Cong Wang8fd75e12011-11-25 23:14:17 +0800656 maddr = kmap_atomic(pg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 memcpy(maddr + offset, from, len);
Cong Wang8fd75e12011-11-25 23:14:17 +0800658 kunmap_atomic(maddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 set_page_dirty_lock(pg);
660 put_page(pg);
661 up_read(&current->mm->mmap_sem);
662
663 from += len;
664 to += len;
665 n -= len;
666 }
667 return n;
668 }
669#endif
670 if (movsl_is_ok(to, from, n))
671 __copy_user(to, from, n);
672 else
673 n = __copy_user_intel(to, from, n);
674 return n;
675}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700676EXPORT_SYMBOL(__copy_to_user_ll);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700678unsigned long __copy_from_user_ll(void *to, const void __user *from,
679 unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (movsl_is_ok(to, from, n))
682 __copy_user_zeroing(to, from, n);
683 else
684 n = __copy_user_zeroing_intel(to, from, n);
685 return n;
686}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700687EXPORT_SYMBOL(__copy_from_user_ll);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
NeilBrown7c12d812006-06-25 05:48:02 -0700689unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
690 unsigned long n)
691{
NeilBrown7c12d812006-06-25 05:48:02 -0700692 if (movsl_is_ok(to, from, n))
693 __copy_user(to, from, n);
694 else
695 n = __copy_user_intel((void __user *)to,
696 (const void *)from, n);
697 return n;
698}
699EXPORT_SYMBOL(__copy_from_user_ll_nozero);
700
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700701unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
702 unsigned long n)
703{
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700704#ifdef CONFIG_X86_INTEL_USERCOPY
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100705 if (n > 64 && cpu_has_xmm2)
706 n = __copy_user_zeroing_intel_nocache(to, from, n);
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700707 else
708 __copy_user_zeroing(to, from, n);
709#else
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100710 __copy_user_zeroing(to, from, n);
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700711#endif
712 return n;
713}
Andrew Morton914c8262008-01-30 23:27:57 +0100714EXPORT_SYMBOL(__copy_from_user_ll_nocache);
Hiro Yoshiokac22ce142006-06-23 02:04:16 -0700715
NeilBrown7c12d812006-06-25 05:48:02 -0700716unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
717 unsigned long n)
718{
NeilBrown7c12d812006-06-25 05:48:02 -0700719#ifdef CONFIG_X86_INTEL_USERCOPY
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100720 if (n > 64 && cpu_has_xmm2)
721 n = __copy_user_intel_nocache(to, from, n);
NeilBrown7c12d812006-06-25 05:48:02 -0700722 else
723 __copy_user(to, from, n);
724#else
Paolo Ciarrocchi3f50dbc2008-02-29 12:50:56 +0100725 __copy_user(to, from, n);
NeilBrown7c12d812006-06-25 05:48:02 -0700726#endif
727 return n;
728}
Andrew Morton914c8262008-01-30 23:27:57 +0100729EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
NeilBrown7c12d812006-06-25 05:48:02 -0700730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731/**
732 * copy_to_user: - Copy a block of data into user space.
733 * @to: Destination address, in user space.
734 * @from: Source address, in kernel space.
735 * @n: Number of bytes to copy.
736 *
737 * Context: User context only. This function may sleep.
738 *
739 * Copy data from kernel space to user space.
740 *
741 * Returns number of bytes that could not be copied.
742 * On success, this will be zero.
743 */
744unsigned long
745copy_to_user(void __user *to, const void *from, unsigned long n)
746{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (access_ok(VERIFY_WRITE, to, n))
748 n = __copy_to_user(to, from, n);
749 return n;
750}
751EXPORT_SYMBOL(copy_to_user);
752
753/**
754 * copy_from_user: - Copy a block of data from user space.
755 * @to: Destination address, in kernel space.
756 * @from: Source address, in user space.
757 * @n: Number of bytes to copy.
758 *
759 * Context: User context only. This function may sleep.
760 *
761 * Copy data from user space to kernel space.
762 *
763 * Returns number of bytes that could not be copied.
764 * On success, this will be zero.
765 *
766 * If some data could not be copied, this function will pad the copied
767 * data to the requested size using zero bytes.
768 */
769unsigned long
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +0200770_copy_from_user(void *to, const void __user *from, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 if (access_ok(VERIFY_READ, from, n))
773 n = __copy_from_user(to, from, n);
774 else
775 memset(to, 0, n);
776 return n;
777}
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +0200778EXPORT_SYMBOL(_copy_from_user);
Arjan van de Ven4a312762009-09-30 13:05:23 +0200779
780void copy_from_user_overflow(void)
781{
782 WARN(1, "Buffer overflow detected!\n");
783}
784EXPORT_SYMBOL(copy_from_user_overflow);