x86: merge getuser.
[safe/jmp/linux-2.6] / include / asm-x86 / uaccess_32.h
1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/prefetch.h>
10 #include <linux/string.h>
11 #include <asm/asm.h>
12 #include <asm/page.h>
13
14 /*
15  * movsl can be slow when source and dest are not both 8-byte aligned
16  */
17 #ifdef CONFIG_X86_INTEL_USERCOPY
18 extern struct movsl_mask {
19         int mask;
20 } ____cacheline_aligned_in_smp movsl_mask;
21 #endif
22
23 #define __addr_ok(addr)                                 \
24         ((unsigned long __force)(addr) <                \
25          (current_thread_info()->addr_limit.seg))
26
27 extern void __put_user_bad(void);
28
29 /*
30  * Strange magic calling convention: pointer in %ecx,
31  * value in %eax(:%edx), return value in %eax, no clobbers.
32  */
33 extern void __put_user_1(void);
34 extern void __put_user_2(void);
35 extern void __put_user_4(void);
36 extern void __put_user_8(void);
37
38 #define __put_user_x(size, x, ptr)                              \
39         asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
40                      :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
41
42 #define __put_user_8(x, ptr)                                    \
43         asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
44                      : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
45
46
47 /**
48  * put_user: - Write a simple value into user space.
49  * @x:   Value to copy to user space.
50  * @ptr: Destination address, in user space.
51  *
52  * Context: User context only.  This function may sleep.
53  *
54  * This macro copies a single simple value from kernel space to user
55  * space.  It supports simple types like char and int, but not larger
56  * data types like structures or arrays.
57  *
58  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
59  * to the result of dereferencing @ptr.
60  *
61  * Returns zero on success, or -EFAULT on error.
62  */
63 #ifdef CONFIG_X86_WP_WORKS_OK
64
65 #define put_user(x, ptr)                                        \
66 ({                                                              \
67         int __ret_pu;                                           \
68         __typeof__(*(ptr)) __pu_val;                            \
69         __chk_user_ptr(ptr);                                    \
70         __pu_val = x;                                           \
71         switch (sizeof(*(ptr))) {                               \
72         case 1:                                                 \
73                 __put_user_x(1, __pu_val, ptr);                 \
74                 break;                                          \
75         case 2:                                                 \
76                 __put_user_x(2, __pu_val, ptr);                 \
77                 break;                                          \
78         case 4:                                                 \
79                 __put_user_x(4, __pu_val, ptr);                 \
80                 break;                                          \
81         case 8:                                                 \
82                 __put_user_8(__pu_val, ptr);                    \
83                 break;                                          \
84         default:                                                \
85                 __put_user_x(X, __pu_val, ptr);                 \
86                 break;                                          \
87         }                                                       \
88         __ret_pu;                                               \
89 })
90
91 #else
92 #define put_user(x, ptr)                                        \
93 ({                                                              \
94         int __ret_pu;                                           \
95         __typeof__(*(ptr))__pus_tmp = x;                        \
96         __ret_pu = 0;                                           \
97         if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp,         \
98                                        sizeof(*(ptr))) != 0))   \
99                 __ret_pu = -EFAULT;                             \
100         __ret_pu;                                               \
101 })
102
103
104 #endif
105
106 /**
107  * __get_user: - Get a simple variable from user space, with less checking.
108  * @x:   Variable to store result.
109  * @ptr: Source address, in user space.
110  *
111  * Context: User context only.  This function may sleep.
112  *
113  * This macro copies a single simple variable from user space to kernel
114  * space.  It supports simple types like char and int, but not larger
115  * data types like structures or arrays.
116  *
117  * @ptr must have pointer-to-simple-variable type, and the result of
118  * dereferencing @ptr must be assignable to @x without a cast.
119  *
120  * Caller must check the pointer with access_ok() before calling this
121  * function.
122  *
123  * Returns zero on success, or -EFAULT on error.
124  * On error, the variable @x is set to zero.
125  */
126 #define __get_user(x, ptr)                              \
127         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
128
129
130 /**
131  * __put_user: - Write a simple value into user space, with less checking.
132  * @x:   Value to copy to user space.
133  * @ptr: Destination address, in user space.
134  *
135  * Context: User context only.  This function may sleep.
136  *
137  * This macro copies a single simple value from kernel space to user
138  * space.  It supports simple types like char and int, but not larger
139  * data types like structures or arrays.
140  *
141  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
142  * to the result of dereferencing @ptr.
143  *
144  * Caller must check the pointer with access_ok() before calling this
145  * function.
146  *
147  * Returns zero on success, or -EFAULT on error.
148  */
149 #define __put_user(x, ptr)                                              \
150         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
151
152 #define __put_user_nocheck(x, ptr, size)                        \
153 ({                                                              \
154         long __pu_err;                                          \
155         __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
156         __pu_err;                                               \
157 })
158
159
160 #define __put_user_u64(x, addr, err)                                    \
161         asm volatile("1:        movl %%eax,0(%2)\n"                     \
162                      "2:        movl %%edx,4(%2)\n"                     \
163                      "3:\n"                                             \
164                      ".section .fixup,\"ax\"\n"                         \
165                      "4:        movl %3,%0\n"                           \
166                      "  jmp 3b\n"                                       \
167                      ".previous\n"                                      \
168                      _ASM_EXTABLE(1b, 4b)                               \
169                      _ASM_EXTABLE(2b, 4b)                               \
170                      : "=r" (err)                                       \
171                      : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
172
173 #ifdef CONFIG_X86_WP_WORKS_OK
174
175 #define __put_user_size(x, ptr, size, retval, errret)                   \
176 do {                                                                    \
177         retval = 0;                                                     \
178         __chk_user_ptr(ptr);                                            \
179         switch (size) {                                                 \
180         case 1:                                                         \
181                 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
182                 break;                                                  \
183         case 2:                                                         \
184                 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
185                 break;                                                  \
186         case 4:                                                         \
187                 __put_user_asm(x, ptr, retval, "l", "",  "ir", errret); \
188                 break;                                                  \
189         case 8:                                                         \
190                 __put_user_u64((__typeof__(*ptr))(x), ptr, retval);     \
191                 break;                                                  \
192         default:                                                        \
193                 __put_user_bad();                                       \
194         }                                                               \
195 } while (0)
196
197 #else
198
199 #define __put_user_size(x, ptr, size, retval, errret)                   \
200 do {                                                                    \
201         __typeof__(*(ptr))__pus_tmp = x;                                \
202         retval = 0;                                                     \
203                                                                         \
204         if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0))    \
205                 retval = errret;                                        \
206 } while (0)
207
208 #endif
209 struct __large_struct { unsigned long buf[100]; };
210 #define __m(x) (*(struct __large_struct __user *)(x))
211
212 /*
213  * Tell gcc we read from memory instead of writing: this is because
214  * we do not write to any memory gcc knows about, so there are no
215  * aliasing issues.
216  */
217 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
218         asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
219                      "2:\n"                                             \
220                      ".section .fixup,\"ax\"\n"                         \
221                      "3:        movl %3,%0\n"                           \
222                      "  jmp 2b\n"                                       \
223                      ".previous\n"                                      \
224                      _ASM_EXTABLE(1b, 3b)                               \
225                      : "=r"(err)                                        \
226                      : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
227
228
229 #define __get_user_nocheck(x, ptr, size)                                \
230 ({                                                                      \
231         long __gu_err;                                                  \
232         unsigned long __gu_val;                                         \
233         __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
234         (x) = (__typeof__(*(ptr)))__gu_val;                             \
235         __gu_err;                                                       \
236 })
237
238 #define __get_user_size(x, ptr, size, retval, errret)                   \
239 do {                                                                    \
240         retval = 0;                                                     \
241         __chk_user_ptr(ptr);                                            \
242         switch (size) {                                                 \
243         case 1:                                                         \
244                 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
245                 break;                                                  \
246         case 2:                                                         \
247                 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
248                 break;                                                  \
249         case 4:                                                         \
250                 __get_user_asm(x, ptr, retval, "l", "", "=r", errret);  \
251                 break;                                                  \
252         default:                                                        \
253                 (x) = __get_user_bad();                                 \
254         }                                                               \
255 } while (0)
256
257 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
258         asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
259                      "2:\n"                                             \
260                      ".section .fixup,\"ax\"\n"                         \
261                      "3:        movl %3,%0\n"                           \
262                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
263                      "  jmp 2b\n"                                       \
264                      ".previous\n"                                      \
265                      _ASM_EXTABLE(1b, 3b)                               \
266                      : "=r" (err), ltype (x)                            \
267                      : "m" (__m(addr)), "i" (errret), "0" (err))
268
269
270 unsigned long __must_check __copy_to_user_ll
271                 (void __user *to, const void *from, unsigned long n);
272 unsigned long __must_check __copy_from_user_ll
273                 (void *to, const void __user *from, unsigned long n);
274 unsigned long __must_check __copy_from_user_ll_nozero
275                 (void *to, const void __user *from, unsigned long n);
276 unsigned long __must_check __copy_from_user_ll_nocache
277                 (void *to, const void __user *from, unsigned long n);
278 unsigned long __must_check __copy_from_user_ll_nocache_nozero
279                 (void *to, const void __user *from, unsigned long n);
280
281 /**
282  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
283  * @to:   Destination address, in user space.
284  * @from: Source address, in kernel space.
285  * @n:    Number of bytes to copy.
286  *
287  * Context: User context only.
288  *
289  * Copy data from kernel space to user space.  Caller must check
290  * the specified block with access_ok() before calling this function.
291  * The caller should also make sure he pins the user space address
292  * so that the we don't result in page fault and sleep.
293  *
294  * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
295  * we return the initial request size (1, 2 or 4), as copy_*_user should do.
296  * If a store crosses a page boundary and gets a fault, the x86 will not write
297  * anything, so this is accurate.
298  */
299
300 static __always_inline unsigned long __must_check
301 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
302 {
303         if (__builtin_constant_p(n)) {
304                 unsigned long ret;
305
306                 switch (n) {
307                 case 1:
308                         __put_user_size(*(u8 *)from, (u8 __user *)to,
309                                         1, ret, 1);
310                         return ret;
311                 case 2:
312                         __put_user_size(*(u16 *)from, (u16 __user *)to,
313                                         2, ret, 2);
314                         return ret;
315                 case 4:
316                         __put_user_size(*(u32 *)from, (u32 __user *)to,
317                                         4, ret, 4);
318                         return ret;
319                 }
320         }
321         return __copy_to_user_ll(to, from, n);
322 }
323
324 /**
325  * __copy_to_user: - Copy a block of data into user space, with less checking.
326  * @to:   Destination address, in user space.
327  * @from: Source address, in kernel space.
328  * @n:    Number of bytes to copy.
329  *
330  * Context: User context only.  This function may sleep.
331  *
332  * Copy data from kernel space to user space.  Caller must check
333  * the specified block with access_ok() before calling this function.
334  *
335  * Returns number of bytes that could not be copied.
336  * On success, this will be zero.
337  */
338 static __always_inline unsigned long __must_check
339 __copy_to_user(void __user *to, const void *from, unsigned long n)
340 {
341        might_sleep();
342        return __copy_to_user_inatomic(to, from, n);
343 }
344
345 static __always_inline unsigned long
346 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
347 {
348         /* Avoid zeroing the tail if the copy fails..
349          * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
350          * but as the zeroing behaviour is only significant when n is not
351          * constant, that shouldn't be a problem.
352          */
353         if (__builtin_constant_p(n)) {
354                 unsigned long ret;
355
356                 switch (n) {
357                 case 1:
358                         __get_user_size(*(u8 *)to, from, 1, ret, 1);
359                         return ret;
360                 case 2:
361                         __get_user_size(*(u16 *)to, from, 2, ret, 2);
362                         return ret;
363                 case 4:
364                         __get_user_size(*(u32 *)to, from, 4, ret, 4);
365                         return ret;
366                 }
367         }
368         return __copy_from_user_ll_nozero(to, from, n);
369 }
370
371 /**
372  * __copy_from_user: - Copy a block of data from user space, with less checking.
373  * @to:   Destination address, in kernel space.
374  * @from: Source address, in user space.
375  * @n:    Number of bytes to copy.
376  *
377  * Context: User context only.  This function may sleep.
378  *
379  * Copy data from user space to kernel space.  Caller must check
380  * the specified block with access_ok() before calling this function.
381  *
382  * Returns number of bytes that could not be copied.
383  * On success, this will be zero.
384  *
385  * If some data could not be copied, this function will pad the copied
386  * data to the requested size using zero bytes.
387  *
388  * An alternate version - __copy_from_user_inatomic() - may be called from
389  * atomic context and will fail rather than sleep.  In this case the
390  * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
391  * for explanation of why this is needed.
392  */
393 static __always_inline unsigned long
394 __copy_from_user(void *to, const void __user *from, unsigned long n)
395 {
396         might_sleep();
397         if (__builtin_constant_p(n)) {
398                 unsigned long ret;
399
400                 switch (n) {
401                 case 1:
402                         __get_user_size(*(u8 *)to, from, 1, ret, 1);
403                         return ret;
404                 case 2:
405                         __get_user_size(*(u16 *)to, from, 2, ret, 2);
406                         return ret;
407                 case 4:
408                         __get_user_size(*(u32 *)to, from, 4, ret, 4);
409                         return ret;
410                 }
411         }
412         return __copy_from_user_ll(to, from, n);
413 }
414
415 #define ARCH_HAS_NOCACHE_UACCESS
416
417 static __always_inline unsigned long __copy_from_user_nocache(void *to,
418                                 const void __user *from, unsigned long n)
419 {
420         might_sleep();
421         if (__builtin_constant_p(n)) {
422                 unsigned long ret;
423
424                 switch (n) {
425                 case 1:
426                         __get_user_size(*(u8 *)to, from, 1, ret, 1);
427                         return ret;
428                 case 2:
429                         __get_user_size(*(u16 *)to, from, 2, ret, 2);
430                         return ret;
431                 case 4:
432                         __get_user_size(*(u32 *)to, from, 4, ret, 4);
433                         return ret;
434                 }
435         }
436         return __copy_from_user_ll_nocache(to, from, n);
437 }
438
439 static __always_inline unsigned long
440 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
441                                   unsigned long n)
442 {
443        return __copy_from_user_ll_nocache_nozero(to, from, n);
444 }
445
446 unsigned long __must_check copy_to_user(void __user *to,
447                                         const void *from, unsigned long n);
448 unsigned long __must_check copy_from_user(void *to,
449                                           const void __user *from,
450                                           unsigned long n);
451 long __must_check strncpy_from_user(char *dst, const char __user *src,
452                                     long count);
453 long __must_check __strncpy_from_user(char *dst,
454                                       const char __user *src, long count);
455
456 /**
457  * strlen_user: - Get the size of a string in user space.
458  * @str: The string to measure.
459  *
460  * Context: User context only.  This function may sleep.
461  *
462  * Get the size of a NUL-terminated string in user space.
463  *
464  * Returns the size of the string INCLUDING the terminating NUL.
465  * On exception, returns 0.
466  *
467  * If there is a limit on the length of a valid string, you may wish to
468  * consider using strnlen_user() instead.
469  */
470 #define strlen_user(str) strnlen_user(str, LONG_MAX)
471
472 long strnlen_user(const char __user *str, long n);
473 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
474 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
475
476 #endif /* __i386_UACCESS_H */