Merge commit 'v2.6.28-rc2' into core/locking
authorIngo Molnar <mingo@elte.hu>
Tue, 28 Oct 2008 15:54:49 +0000 (16:54 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 28 Oct 2008 15:54:49 +0000 (16:54 +0100)
Conflicts:
arch/um/include/asm/system.h

1  2 
arch/um/include/asm/system.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
arch/x86/lib/usercopy_32.c
include/linux/kernel.h
kernel/sched.c
mm/memory.c

index 0000000,753346e..ae5f94d
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,35 +1,35 @@@
 -#define local_save_flags(flags) do { typecheck(unsigned long, flags); \
+ #ifndef __UM_SYSTEM_GENERIC_H
+ #define __UM_SYSTEM_GENERIC_H
+ #include "sysdep/system.h"
+ extern void *switch_to(void *prev, void *next, void *last);
+ extern int get_signals(void);
+ extern int set_signals(int enable);
+ extern int get_signals(void);
+ extern void block_signals(void);
+ extern void unblock_signals(void);
 -#define local_irq_restore(flags) do { typecheck(unsigned long, flags); \
++#define raw_local_save_flags(flags) do { typecheck(unsigned long, flags); \
+                                    (flags) = get_signals(); } while(0)
 -#define local_irq_save(flags) do { local_save_flags(flags); \
 -                                   local_irq_disable(); } while(0)
++#define raw_local_irq_restore(flags) do { typecheck(unsigned long, flags); \
+                                     set_signals(flags); } while(0)
 -#define local_irq_enable() unblock_signals()
 -#define local_irq_disable() block_signals()
++#define raw_local_irq_save(flags) do { raw_local_save_flags(flags); \
++                                   raw_local_irq_disable(); } while(0)
 -        local_save_flags(flags);        \
++#define raw_local_irq_enable() unblock_signals()
++#define raw_local_irq_disable() block_signals()
+ #define irqs_disabled()                 \
+ ({                                      \
+         unsigned long flags;            \
++        raw_local_save_flags(flags);        \
+         (flags == 0);                   \
+ })
+ extern void *_switch_to(void *prev, void *next, void *last);
+ #define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
+ #endif
index 0000000,35c5492..99192bb
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,454 +1,456 @@@
+ #ifndef _ASM_X86_UACCESS_H
+ #define _ASM_X86_UACCESS_H
+ /*
+  * User space memory access functions
+  */
+ #include <linux/errno.h>
+ #include <linux/compiler.h>
+ #include <linux/thread_info.h>
+ #include <linux/prefetch.h>
+ #include <linux/string.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+ /*
+  * The fs value determines whether argument validity checking should be
+  * performed or not.  If get_fs() == USER_DS, checking is performed, with
+  * get_fs() == KERNEL_DS, checking is bypassed.
+  *
+  * For historical reasons, these macros are grossly misnamed.
+  */
+ #define MAKE_MM_SEG(s)        ((mm_segment_t) { (s) })
+ #define KERNEL_DS     MAKE_MM_SEG(-1UL)
+ #define USER_DS               MAKE_MM_SEG(PAGE_OFFSET)
+ #define get_ds()      (KERNEL_DS)
+ #define get_fs()      (current_thread_info()->addr_limit)
+ #define set_fs(x)     (current_thread_info()->addr_limit = (x))
+ #define segment_eq(a, b)      ((a).seg == (b).seg)
+ #define __addr_ok(addr)                                       \
+       ((unsigned long __force)(addr) <                \
+        (current_thread_info()->addr_limit.seg))
+ /*
+  * Test whether a block of memory is a valid user space address.
+  * Returns 0 if the range is valid, nonzero otherwise.
+  *
+  * This is equivalent to the following test:
+  * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
+  *
+  * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
+  */
+ #define __range_not_ok(addr, size)                                    \
+ ({                                                                    \
+       unsigned long flag, roksum;                                     \
+       __chk_user_ptr(addr);                                           \
+       asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0"             \
+           : "=&r" (flag), "=r" (roksum)                               \
+           : "1" (addr), "g" ((long)(size)),                           \
+             "rm" (current_thread_info()->addr_limit.seg));            \
+       flag;                                                           \
+ })
+ /**
+  * access_ok: - Checks if a user space pointer is valid
+  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
+  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
+  *        to write to a block, it is always safe to read from it.
+  * @addr: User space pointer to start of block to check
+  * @size: Size of block to check
+  *
+  * Context: User context only.  This function may sleep.
+  *
+  * Checks if a pointer to a block of memory in user space is valid.
+  *
+  * Returns true (nonzero) if the memory block may be valid, false (zero)
+  * if it is definitely invalid.
+  *
+  * Note that, depending on architecture, this function probably just
+  * checks that the pointer is in the user space range - after calling
+  * this function, memory access functions may still return -EFAULT.
+  */
+ #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
+ /*
+  * The exception table consists of pairs of addresses: the first is the
+  * address of an instruction that is allowed to fault, and the second is
+  * the address at which the program should continue.  No registers are
+  * modified, so it is entirely up to the continuation code to figure out
+  * what to do.
+  *
+  * All the routines below use bits of fixup code that are out of line
+  * with the main instruction path.  This means when everything is well,
+  * we don't even have to jump over them.  Further, they do not intrude
+  * on our cache or tlb entries.
+  */
+ struct exception_table_entry {
+       unsigned long insn, fixup;
+ };
+ extern int fixup_exception(struct pt_regs *regs);
+ /*
+  * These are the main single-value transfer routines.  They automatically
+  * use the right size if we just have the right pointer type.
+  *
+  * This gets kind of ugly. We want to return _two_ values in "get_user()"
+  * and yet we don't want to do any pointers, because that is too much
+  * of a performance impact. Thus we have a few rather ugly macros here,
+  * and hide all the ugliness from the user.
+  *
+  * The "__xxx" versions of the user access functions are versions that
+  * do not verify the address space, that must have been done previously
+  * with a separate "access_ok()" call (this is used when we do multiple
+  * accesses to the same area of user memory).
+  */
+ extern int __get_user_1(void);
+ extern int __get_user_2(void);
+ extern int __get_user_4(void);
+ extern int __get_user_8(void);
+ extern int __get_user_bad(void);
+ #define __get_user_x(size, ret, x, ptr)                     \
+       asm volatile("call __get_user_" #size         \
+                    : "=a" (ret),"=d" (x)            \
+                    : "0" (ptr))                     \
+ /* Careful: we have to cast the result to the type of the pointer
+  * for sign reasons */
+ /**
+  * get_user: - Get a simple variable from user space.
+  * @x:   Variable to store result.
+  * @ptr: Source address, in user space.
+  *
+  * Context: User context only.  This function may sleep.
+  *
+  * This macro copies a single simple variable from user space to kernel
+  * space.  It supports simple types like char and int, but not larger
+  * data types like structures or arrays.
+  *
+  * @ptr must have pointer-to-simple-variable type, and the result of
+  * dereferencing @ptr must be assignable to @x without a cast.
+  *
+  * Returns zero on success, or -EFAULT on error.
+  * On error, the variable @x is set to zero.
+  */
+ #ifdef CONFIG_X86_32
+ #define __get_user_8(__ret_gu, __val_gu, ptr)                         \
+               __get_user_x(X, __ret_gu, __val_gu, ptr)
+ #else
+ #define __get_user_8(__ret_gu, __val_gu, ptr)                         \
+               __get_user_x(8, __ret_gu, __val_gu, ptr)
+ #endif
+ #define get_user(x, ptr)                                              \
+ ({                                                                    \
+       int __ret_gu;                                                   \
+       unsigned long __val_gu;                                         \
+       __chk_user_ptr(ptr);                                            \
++      might_fault();                                                  \
+       switch (sizeof(*(ptr))) {                                       \
+       case 1:                                                         \
+               __get_user_x(1, __ret_gu, __val_gu, ptr);               \
+               break;                                                  \
+       case 2:                                                         \
+               __get_user_x(2, __ret_gu, __val_gu, ptr);               \
+               break;                                                  \
+       case 4:                                                         \
+               __get_user_x(4, __ret_gu, __val_gu, ptr);               \
+               break;                                                  \
+       case 8:                                                         \
+               __get_user_8(__ret_gu, __val_gu, ptr);                  \
+               break;                                                  \
+       default:                                                        \
+               __get_user_x(X, __ret_gu, __val_gu, ptr);               \
+               break;                                                  \
+       }                                                               \
+       (x) = (__typeof__(*(ptr)))__val_gu;                             \
+       __ret_gu;                                                       \
+ })
+ #define __put_user_x(size, x, ptr, __ret_pu)                  \
+       asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+                    :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+ #ifdef CONFIG_X86_32
+ #define __put_user_u64(x, addr, err)                                  \
+       asm volatile("1:        movl %%eax,0(%2)\n"                     \
+                    "2:        movl %%edx,4(%2)\n"                     \
+                    "3:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "4:        movl %3,%0\n"                           \
+                    "  jmp 3b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 4b)                               \
+                    _ASM_EXTABLE(2b, 4b)                               \
+                    : "=r" (err)                                       \
+                    : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
+ #define __put_user_x8(x, ptr, __ret_pu)                               \
+       asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
+                    : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+ #else
+ #define __put_user_u64(x, ptr, retval) \
+       __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
+ #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
+ #endif
+ extern void __put_user_bad(void);
+ /*
+  * Strange magic calling convention: pointer in %ecx,
+  * value in %eax(:%edx), return value in %eax. clobbers %rbx
+  */
+ extern void __put_user_1(void);
+ extern void __put_user_2(void);
+ extern void __put_user_4(void);
+ extern void __put_user_8(void);
+ #ifdef CONFIG_X86_WP_WORKS_OK
+ /**
+  * put_user: - Write a simple value into user space.
+  * @x:   Value to copy to user space.
+  * @ptr: Destination address, in user space.
+  *
+  * Context: User context only.  This function may sleep.
+  *
+  * This macro copies a single simple value from kernel space to user
+  * space.  It supports simple types like char and int, but not larger
+  * data types like structures or arrays.
+  *
+  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+  * to the result of dereferencing @ptr.
+  *
+  * Returns zero on success, or -EFAULT on error.
+  */
+ #define put_user(x, ptr)                                      \
+ ({                                                            \
+       int __ret_pu;                                           \
+       __typeof__(*(ptr)) __pu_val;                            \
+       __chk_user_ptr(ptr);                                    \
++      might_fault();                                          \
+       __pu_val = x;                                           \
+       switch (sizeof(*(ptr))) {                               \
+       case 1:                                                 \
+               __put_user_x(1, __pu_val, ptr, __ret_pu);       \
+               break;                                          \
+       case 2:                                                 \
+               __put_user_x(2, __pu_val, ptr, __ret_pu);       \
+               break;                                          \
+       case 4:                                                 \
+               __put_user_x(4, __pu_val, ptr, __ret_pu);       \
+               break;                                          \
+       case 8:                                                 \
+               __put_user_x8(__pu_val, ptr, __ret_pu);         \
+               break;                                          \
+       default:                                                \
+               __put_user_x(X, __pu_val, ptr, __ret_pu);       \
+               break;                                          \
+       }                                                       \
+       __ret_pu;                                               \
+ })
+ #define __put_user_size(x, ptr, size, retval, errret)                 \
+ do {                                                                  \
+       retval = 0;                                                     \
+       __chk_user_ptr(ptr);                                            \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
+               break;                                                  \
+       case 2:                                                         \
+               __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
+               break;                                                  \
+       case 4:                                                         \
+               __put_user_asm(x, ptr, retval, "l", "k",  "ir", errret);\
+               break;                                                  \
+       case 8:                                                         \
+               __put_user_u64((__typeof__(*ptr))(x), ptr, retval);     \
+               break;                                                  \
+       default:                                                        \
+               __put_user_bad();                                       \
+       }                                                               \
+ } while (0)
+ #else
+ #define __put_user_size(x, ptr, size, retval, errret)                 \
+ do {                                                                  \
+       __typeof__(*(ptr))__pus_tmp = x;                                \
+       retval = 0;                                                     \
+                                                                       \
+       if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0))    \
+               retval = errret;                                        \
+ } while (0)
+ #define put_user(x, ptr)                                      \
+ ({                                                            \
+       int __ret_pu;                                           \
+       __typeof__(*(ptr))__pus_tmp = x;                        \
+       __ret_pu = 0;                                           \
+       if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp,         \
+                                      sizeof(*(ptr))) != 0))   \
+               __ret_pu = -EFAULT;                             \
+       __ret_pu;                                               \
+ })
+ #endif
+ #ifdef CONFIG_X86_32
+ #define __get_user_asm_u64(x, ptr, retval, errret)    (x) = __get_user_bad()
+ #else
+ #define __get_user_asm_u64(x, ptr, retval, errret) \
+        __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
+ #endif
+ #define __get_user_size(x, ptr, size, retval, errret)                 \
+ do {                                                                  \
+       retval = 0;                                                     \
+       __chk_user_ptr(ptr);                                            \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
+               break;                                                  \
+       case 2:                                                         \
+               __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
+               break;                                                  \
+       case 4:                                                         \
+               __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
+               break;                                                  \
+       case 8:                                                         \
+               __get_user_asm_u64(x, ptr, retval, errret);             \
+               break;                                                  \
+       default:                                                        \
+               (x) = __get_user_bad();                                 \
+       }                                                               \
+ } while (0)
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)     \
+       asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
+                    "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:        mov %3,%0\n"                            \
+                    "  xor"itype" %"rtype"1,%"rtype"1\n"               \
+                    "  jmp 2b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : "=r" (err), ltype(x)                             \
+                    : "m" (__m(addr)), "i" (errret), "0" (err))
+ #define __put_user_nocheck(x, ptr, size)                      \
+ ({                                                            \
+       long __pu_err;                                          \
+       __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
+       __pu_err;                                               \
+ })
+ #define __get_user_nocheck(x, ptr, size)                              \
+ ({                                                                    \
+       long __gu_err;                                                  \
+       unsigned long __gu_val;                                         \
+       __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+       __gu_err;                                                       \
+ })
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+ #define __m(x) (*(struct __large_struct __user *)(x))
+ /*
+  * Tell gcc we read from memory instead of writing: this is because
+  * we do not write to any memory gcc knows about, so there are no
+  * aliasing issues.
+  */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)     \
+       asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
+                    "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:        mov %3,%0\n"                            \
+                    "  jmp 2b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : "=r"(err)                                        \
+                    : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+ /**
+  * __get_user: - Get a simple variable from user space, with less checking.
+  * @x:   Variable to store result.
+  * @ptr: Source address, in user space.
+  *
+  * Context: User context only.  This function may sleep.
+  *
+  * This macro copies a single simple variable from user space to kernel
+  * space.  It supports simple types like char and int, but not larger
+  * data types like structures or arrays.
+  *
+  * @ptr must have pointer-to-simple-variable type, and the result of
+  * dereferencing @ptr must be assignable to @x without a cast.
+  *
+  * Caller must check the pointer with access_ok() before calling this
+  * function.
+  *
+  * Returns zero on success, or -EFAULT on error.
+  * On error, the variable @x is set to zero.
+  */
+ #define __get_user(x, ptr)                                            \
+       __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+ /**
+  * __put_user: - Write a simple value into user space, with less checking.
+  * @x:   Value to copy to user space.
+  * @ptr: Destination address, in user space.
+  *
+  * Context: User context only.  This function may sleep.
+  *
+  * This macro copies a single simple value from kernel space to user
+  * space.  It supports simple types like char and int, but not larger
+  * data types like structures or arrays.
+  *
+  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+  * to the result of dereferencing @ptr.
+  *
+  * Caller must check the pointer with access_ok() before calling this
+  * function.
+  *
+  * Returns zero on success, or -EFAULT on error.
+  */
+ #define __put_user(x, ptr)                                            \
+       __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+ #define __get_user_unaligned __get_user
+ #define __put_user_unaligned __put_user
+ /*
+  * movsl can be slow when source and dest are not both 8-byte aligned
+  */
+ #ifdef CONFIG_X86_INTEL_USERCOPY
+ extern struct movsl_mask {
+       int mask;
+ } ____cacheline_aligned_in_smp movsl_mask;
+ #endif
+ #define ARCH_HAS_NOCACHE_UACCESS 1
+ #ifdef CONFIG_X86_32
+ # include "uaccess_32.h"
+ #else
+ # define ARCH_HAS_SEARCH_EXTABLE
+ # include "uaccess_64.h"
+ #endif
+ #endif /* _ASM_X86_UACCESS_H */
index 0000000,d095a3a..5e06259
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,218 +1,218 @@@
 -       might_sleep();
 -       return __copy_to_user_inatomic(to, from, n);
+ #ifndef _ASM_X86_UACCESS_32_H
+ #define _ASM_X86_UACCESS_32_H
+ /*
+  * User space memory access functions
+  */
+ #include <linux/errno.h>
+ #include <linux/thread_info.h>
+ #include <linux/prefetch.h>
+ #include <linux/string.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+ unsigned long __must_check __copy_to_user_ll
+               (void __user *to, const void *from, unsigned long n);
+ unsigned long __must_check __copy_from_user_ll
+               (void *to, const void __user *from, unsigned long n);
+ unsigned long __must_check __copy_from_user_ll_nozero
+               (void *to, const void __user *from, unsigned long n);
+ unsigned long __must_check __copy_from_user_ll_nocache
+               (void *to, const void __user *from, unsigned long n);
+ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+               (void *to, const void __user *from, unsigned long n);
+ /**
+  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
+  * @to:   Destination address, in user space.
+  * @from: Source address, in kernel space.
+  * @n:    Number of bytes to copy.
+  *
+  * Context: User context only.
+  *
+  * Copy data from kernel space to user space.  Caller must check
+  * the specified block with access_ok() before calling this function.
+  * The caller should also make sure he pins the user space address
+  * so that the we don't result in page fault and sleep.
+  *
+  * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
+  * we return the initial request size (1, 2 or 4), as copy_*_user should do.
+  * If a store crosses a page boundary and gets a fault, the x86 will not write
+  * anything, so this is accurate.
+  */
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
+       if (__builtin_constant_p(n)) {
+               unsigned long ret;
+               switch (n) {
+               case 1:
+                       __put_user_size(*(u8 *)from, (u8 __user *)to,
+                                       1, ret, 1);
+                       return ret;
+               case 2:
+                       __put_user_size(*(u16 *)from, (u16 __user *)to,
+                                       2, ret, 2);
+                       return ret;
+               case 4:
+                       __put_user_size(*(u32 *)from, (u32 __user *)to,
+                                       4, ret, 4);
+                       return ret;
+               }
+       }
+       return __copy_to_user_ll(to, from, n);
+ }
+ /**
+  * __copy_to_user: - Copy a block of data into user space, with less checking.
+  * @to:   Destination address, in user space.
+  * @from: Source address, in kernel space.
+  * @n:    Number of bytes to copy.
+  *
+  * Context: User context only.  This function may sleep.
+  *
+  * Copy data from kernel space to user space.  Caller must check
+  * the specified block with access_ok() before calling this function.
+  *
+  * Returns number of bytes that could not be copied.
+  * On success, this will be zero.
+  */
+ static __always_inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
 -      might_sleep();
++      might_fault();
++      return __copy_to_user_inatomic(to, from, n);
+ }
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
+       /* Avoid zeroing the tail if the copy fails..
+        * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+        * but as the zeroing behaviour is only significant when n is not
+        * constant, that shouldn't be a problem.
+        */
+       if (__builtin_constant_p(n)) {
+               unsigned long ret;
+               switch (n) {
+               case 1:
+                       __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       return ret;
+               case 2:
+                       __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       return ret;
+               case 4:
+                       __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       return ret;
+               }
+       }
+       return __copy_from_user_ll_nozero(to, from, n);
+ }
+ /**
+  * __copy_from_user: - Copy a block of data from user space, with less checking.
+  * @to:   Destination address, in kernel space.
+  * @from: Source address, in user space.
+  * @n:    Number of bytes to copy.
+  *
+  * Context: User context only.  This function may sleep.
+  *
+  * Copy data from user space to kernel space.  Caller must check
+  * the specified block with access_ok() before calling this function.
+  *
+  * Returns number of bytes that could not be copied.
+  * On success, this will be zero.
+  *
+  * If some data could not be copied, this function will pad the copied
+  * data to the requested size using zero bytes.
+  *
+  * An alternate version - __copy_from_user_inatomic() - may be called from
+  * atomic context and will fail rather than sleep.  In this case the
+  * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
+  * for explanation of why this is needed.
+  */
+ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
 -      might_sleep();
++      might_fault();
+       if (__builtin_constant_p(n)) {
+               unsigned long ret;
+               switch (n) {
+               case 1:
+                       __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       return ret;
+               case 2:
+                       __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       return ret;
+               case 4:
+                       __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       return ret;
+               }
+       }
+       return __copy_from_user_ll(to, from, n);
+ }
+ static __always_inline unsigned long __copy_from_user_nocache(void *to,
+                               const void __user *from, unsigned long n)
+ {
++      might_fault();
+       if (__builtin_constant_p(n)) {
+               unsigned long ret;
+               switch (n) {
+               case 1:
+                       __get_user_size(*(u8 *)to, from, 1, ret, 1);
+                       return ret;
+               case 2:
+                       __get_user_size(*(u16 *)to, from, 2, ret, 2);
+                       return ret;
+               case 4:
+                       __get_user_size(*(u32 *)to, from, 4, ret, 4);
+                       return ret;
+               }
+       }
+       return __copy_from_user_ll_nocache(to, from, n);
+ }
+ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+                                 unsigned long n)
+ {
+        return __copy_from_user_ll_nocache_nozero(to, from, n);
+ }
+ unsigned long __must_check copy_to_user(void __user *to,
+                                       const void *from, unsigned long n);
+ unsigned long __must_check copy_from_user(void *to,
+                                         const void __user *from,
+                                         unsigned long n);
+ long __must_check strncpy_from_user(char *dst, const char __user *src,
+                                   long count);
+ long __must_check __strncpy_from_user(char *dst,
+                                     const char __user *src, long count);
+ /**
+  * strlen_user: - Get the size of a string in user space.
+  * @str: The string to measure.
+  *
+  * Context: User context only.  This function may sleep.
+  *
+  * Get the size of a NUL-terminated string in user space.
+  *
+  * Returns the size of the string INCLUDING the terminating NUL.
+  * On exception, returns 0.
+  *
+  * If there is a limit on the length of a valid string, you may wish to
+  * consider using strnlen_user() instead.
+  */
+ #define strlen_user(str) strnlen_user(str, LONG_MAX)
+ long strnlen_user(const char __user *str, long n);
+ unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+ unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+ #endif /* _ASM_X86_UACCESS_32_H */
index 0000000,664f152..543ba88
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,202 +1,208 @@@
+ #ifndef _ASM_X86_UACCESS_64_H
+ #define _ASM_X86_UACCESS_64_H
+ /*
+  * User space memory access functions
+  */
+ #include <linux/compiler.h>
+ #include <linux/errno.h>
+ #include <linux/prefetch.h>
+ #include <linux/lockdep.h>
+ #include <asm/page.h>
+ /*
+  * Copy To/From Userspace
+  */
+ /* Handles exceptions in both to and from, but doesn't do access_ok */
+ __must_check unsigned long
+ copy_user_generic(void *to, const void *from, unsigned len);
+ __must_check unsigned long
+ copy_to_user(void __user *to, const void *from, unsigned len);
+ __must_check unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned len);
+ __must_check unsigned long
+ copy_in_user(void __user *to, const void __user *from, unsigned len);
+ static __always_inline __must_check
+ int __copy_from_user(void *dst, const void __user *src, unsigned size)
+ {
+       int ret = 0;
++
++      might_fault();
+       if (!__builtin_constant_p(size))
+               return copy_user_generic(dst, (__force void *)src, size);
+       switch (size) {
+       case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+                             ret, "b", "b", "=q", 1);
+               return ret;
+       case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+                             ret, "w", "w", "=r", 2);
+               return ret;
+       case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+                             ret, "l", "k", "=r", 4);
+               return ret;
+       case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+                             ret, "q", "", "=r", 8);
+               return ret;
+       case 10:
+               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+                              ret, "q", "", "=r", 16);
+               if (unlikely(ret))
+                       return ret;
+               __get_user_asm(*(u16 *)(8 + (char *)dst),
+                              (u16 __user *)(8 + (char __user *)src),
+                              ret, "w", "w", "=r", 2);
+               return ret;
+       case 16:
+               __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+                              ret, "q", "", "=r", 16);
+               if (unlikely(ret))
+                       return ret;
+               __get_user_asm(*(u64 *)(8 + (char *)dst),
+                              (u64 __user *)(8 + (char __user *)src),
+                              ret, "q", "", "=r", 8);
+               return ret;
+       default:
+               return copy_user_generic(dst, (__force void *)src, size);
+       }
+ }
+ static __always_inline __must_check
+ int __copy_to_user(void __user *dst, const void *src, unsigned size)
+ {
+       int ret = 0;
++
++      might_fault();
+       if (!__builtin_constant_p(size))
+               return copy_user_generic((__force void *)dst, src, size);
+       switch (size) {
+       case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+                             ret, "b", "b", "iq", 1);
+               return ret;
+       case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+                             ret, "w", "w", "ir", 2);
+               return ret;
+       case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+                             ret, "l", "k", "ir", 4);
+               return ret;
+       case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+                             ret, "q", "", "ir", 8);
+               return ret;
+       case 10:
+               __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+                              ret, "q", "", "ir", 10);
+               if (unlikely(ret))
+                       return ret;
+               asm("":::"memory");
+               __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+                              ret, "w", "w", "ir", 2);
+               return ret;
+       case 16:
+               __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+                              ret, "q", "", "ir", 16);
+               if (unlikely(ret))
+                       return ret;
+               asm("":::"memory");
+               __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+                              ret, "q", "", "ir", 8);
+               return ret;
+       default:
+               return copy_user_generic((__force void *)dst, src, size);
+       }
+ }
+ static __always_inline __must_check
+ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ {
+       int ret = 0;
++
++      might_fault();
+       if (!__builtin_constant_p(size))
+               return copy_user_generic((__force void *)dst,
+                                        (__force void *)src, size);
+       switch (size) {
+       case 1: {
+               u8 tmp;
+               __get_user_asm(tmp, (u8 __user *)src,
+                              ret, "b", "b", "=q", 1);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u8 __user *)dst,
+                                      ret, "b", "b", "iq", 1);
+               return ret;
+       }
+       case 2: {
+               u16 tmp;
+               __get_user_asm(tmp, (u16 __user *)src,
+                              ret, "w", "w", "=r", 2);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u16 __user *)dst,
+                                      ret, "w", "w", "ir", 2);
+               return ret;
+       }
+       case 4: {
+               u32 tmp;
+               __get_user_asm(tmp, (u32 __user *)src,
+                              ret, "l", "k", "=r", 4);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u32 __user *)dst,
+                                      ret, "l", "k", "ir", 4);
+               return ret;
+       }
+       case 8: {
+               u64 tmp;
+               __get_user_asm(tmp, (u64 __user *)src,
+                              ret, "q", "", "=r", 8);
+               if (likely(!ret))
+                       __put_user_asm(tmp, (u64 __user *)dst,
+                                      ret, "q", "", "ir", 8);
+               return ret;
+       }
+       default:
+               return copy_user_generic((__force void *)dst,
+                                        (__force void *)src, size);
+       }
+ }
+ __must_check long
+ strncpy_from_user(char *dst, const char __user *src, long count);
+ __must_check long
+ __strncpy_from_user(char *dst, const char __user *src, long count);
+ __must_check long strnlen_user(const char __user *str, long n);
+ __must_check long __strnlen_user(const char __user *str, long n);
+ __must_check long strlen_user(const char __user *str);
+ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
+ __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
+ __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
+                                           unsigned size);
+ static __must_check __always_inline int
+ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+ {
+       return copy_user_generic((__force void *)dst, src, size);
+ }
+ extern long __copy_user_nocache(void *dst, const void __user *src,
+                               unsigned size, int zerorest);
+ static inline int __copy_from_user_nocache(void *dst, const void __user *src,
+                                          unsigned size)
+ {
+       might_sleep();
+       return __copy_user_nocache(dst, src, size, 1);
+ }
+ static inline int __copy_from_user_inatomic_nocache(void *dst,
+                                                   const void __user *src,
+                                                   unsigned size)
+ {
+       return __copy_user_nocache(dst, src, size, 0);
+ }
+ unsigned long
+ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+ #endif /* _ASM_X86_UACCESS_64_H */
Simple merge
Simple merge
diff --cc kernel/sched.c
Simple merge
diff --cc mm/memory.c
Simple merge