X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Fasm-i386%2Fuaccess.h;h=e2aa5e0d0cc7fa9c1a4ec84002f98a80bf697f11;hb=5cbc30737398b49f62ae8603129ce43ac7db1a41;hp=371457b1ceb6de2b47e8fcf7d125f8e57488cfb5;hpb=30e931d4092713cecd6b8c2fd70f268efaa6e428;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 371457b..e2aa5e0 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h @@ -4,7 +4,6 @@ /* * User space memory access functions */ -#include #include #include #include @@ -55,11 +54,11 @@ extern struct movsl_mask { * This needs 33-bit arithmetic. We have a carry... */ #define __range_ok(addr,size) ({ \ - unsigned long flag,sum; \ + unsigned long flag,roksum; \ __chk_user_ptr(addr); \ asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ - :"=&r" (flag), "=r" (sum) \ - :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \ + :"=&r" (flag), "=r" (roksum) \ + :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ flag; }) /** @@ -391,28 +390,32 @@ unsigned long __must_check __copy_to_user_ll(void __user *to, const void *from, unsigned long n); unsigned long __must_check __copy_from_user_ll(void *to, const void __user *from, unsigned long n); - -/* - * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault - * we return the initial request size (1, 2 or 4), as copy_*_user should do. - * If a store crosses a page boundary and gets a fault, the x86 will not write - * anything, so this is accurate. - */ +unsigned long __must_check __copy_from_user_ll_nozero(void *to, + const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nocache(void *to, + const void __user *from, unsigned long n); +unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, + const void __user *from, unsigned long n); /** - * __copy_to_user: - Copy a block of data into user space, with less checking. + * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. * @from: Source address, in kernel space. * @n: Number of bytes to copy. * - * Context: User context only. This function may sleep. + * Context: User context only. * * Copy data from kernel space to user space. Caller must check * the specified block with access_ok() before calling this function. + * The caller should also make sure he pins the user space address + * so that the we don't result in page fault and sleep. * - * Returns number of bytes that could not be copied. - * On success, this will be zero. + * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault + * we return the initial request size (1, 2 or 4), as copy_*_user should do. + * If a store crosses a page boundary and gets a fault, the x86 will not write + * anything, so this is accurate. */ + static __always_inline unsigned long __must_check __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { @@ -434,6 +437,20 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) return __copy_to_user_ll(to, from, n); } +/** + * __copy_to_user: - Copy a block of data into user space, with less checking. + * @to: Destination address, in user space. + * @from: Source address, in kernel space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from kernel space to user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ static __always_inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { @@ -441,6 +458,32 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) return __copy_to_user_inatomic(to, from, n); } +static __always_inline unsigned long +__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) +{ + /* Avoid zeroing the tail if the copy fails.. + * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, + * but as the zeroing behaviour is only significant when n is not + * constant, that shouldn't be a problem. + */ + if (__builtin_constant_p(n)) { + unsigned long ret; + + switch (n) { + case 1: + __get_user_size(*(u8 *)to, from, 1, ret, 1); + return ret; + case 2: + __get_user_size(*(u16 *)to, from, 2, ret, 2); + return ret; + case 4: + __get_user_size(*(u32 *)to, from, 4, ret, 4); + return ret; + } + } + return __copy_from_user_ll_nozero(to, from, n); +} + /** * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. @@ -457,10 +500,16 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) * * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. + * + * An alternate version - __copy_from_user_inatomic() - may be called from + * atomic context and will fail rather than sleep. In this case the + * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h + * for explanation of why this is needed. */ static __always_inline unsigned long -__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) +__copy_from_user(void *to, const void __user *from, unsigned long n) { + might_sleep(); if (__builtin_constant_p(n)) { unsigned long ret; @@ -479,12 +528,36 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) return __copy_from_user_ll(to, from, n); } +#define ARCH_HAS_NOCACHE_UACCESS + +static __always_inline unsigned long __copy_from_user_nocache(void *to, + const void __user *from, unsigned long n) +{ + might_sleep(); + if (__builtin_constant_p(n)) { + unsigned long ret; + + switch (n) { + case 1: + __get_user_size(*(u8 *)to, from, 1, ret, 1); + return ret; + case 2: + __get_user_size(*(u16 *)to, from, 2, ret, 2); + return ret; + case 4: + __get_user_size(*(u32 *)to, from, 4, ret, 4); + return ret; + } + } + return __copy_from_user_ll_nocache(to, from, n); +} + static __always_inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) +__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { - might_sleep(); - return __copy_from_user_inatomic(to, from, n); + return __copy_from_user_ll_nocache_nozero(to, from, n); } + unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n); unsigned long __must_check copy_from_user(void *to,