x86, mm: pass in 'total' to __copy_from_user_*nocache()
authorIngo Molnar <mingo@elte.hu>
Wed, 25 Feb 2009 07:21:52 +0000 (08:21 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 25 Feb 2009 09:20:03 +0000 (10:20 +0100)
Impact: cleanup, enable future change

Add a 'total bytes copied' parameter to __copy_from_user_*nocache(),
and update all the callsites.

The parameter is not used yet - architecture code can use it to
more intelligently decide whether the copy should be cached or
non-temporal.

Cc: Salman Qazi <sqazi@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/uaccess_64.h
drivers/gpu/drm/i915/i915_gem.c
include/linux/uaccess.h
mm/filemap.c
mm/filemap_xip.c

index 5e06259..a0ba613 100644 (file)
@@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
 }
 
 static __always_inline unsigned long __copy_from_user_nocache(void *to,
-                               const void __user *from, unsigned long n)
+               const void __user *from, unsigned long n, unsigned long total)
 {
        might_fault();
        if (__builtin_constant_p(n)) {
@@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
 
 static __always_inline unsigned long
 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
-                                 unsigned long n)
+                                 unsigned long n, unsigned long total)
 {
        return __copy_from_user_ll_nocache_nozero(to, from, n);
 }
index 987a2c1..a748253 100644 (file)
@@ -189,7 +189,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
                                unsigned size, int zerorest);
 
 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
-                                          unsigned size)
+                                  unsigned size, unsigned long total)
 {
        might_sleep();
        /*
@@ -205,8 +205,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src,
 }
 
 static inline int __copy_from_user_inatomic_nocache(void *dst,
-                                                   const void __user *src,
-                                                   unsigned size)
+           const void __user *src, unsigned size, unsigned total)
 {
        if (likely(size >= PAGE_SIZE))
                return __copy_user_nocache(dst, src, size, 0);
index 8185766..6b209db 100644 (file)
@@ -215,7 +215,7 @@ fast_user_write(struct io_mapping *mapping,
 
        vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
        unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
-                                                     user_data, length);
+                                                     user_data, length, length);
        io_mapping_unmap_atomic(vaddr_atomic);
        if (unwritten)
                return -EFAULT;
index 6b58367..6f3c603 100644 (file)
@@ -41,13 +41,13 @@ static inline void pagefault_enable(void)
 #ifndef ARCH_HAS_NOCACHE_UACCESS
 
 static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
-                               const void __user *from, unsigned long n)
+               const void __user *from, unsigned long n, unsigned long total)
 {
        return __copy_from_user_inatomic(to, from, n);
 }
 
 static inline unsigned long __copy_from_user_nocache(void *to,
-                               const void __user *from, unsigned long n)
+               const void __user *from, unsigned long n, unsigned long total)
 {
        return __copy_from_user(to, from, n);
 }
index 23acefe..60fd567 100644 (file)
@@ -1816,14 +1816,14 @@ EXPORT_SYMBOL(file_remove_suid);
 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                        const struct iovec *iov, size_t base, size_t bytes)
 {
-       size_t copied = 0, left = 0;
+       size_t copied = 0, left = 0, total = bytes;
 
        while (bytes) {
                char __user *buf = iov->iov_base + base;
                int copy = min(bytes, iov->iov_len - base);
 
                base = 0;
-               left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
+               left = __copy_from_user_inatomic_nocache(vaddr, buf, copy, total);
                copied += copy;
                bytes -= copy;
                vaddr += copy;
@@ -1851,8 +1851,9 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
+
                left = __copy_from_user_inatomic_nocache(kaddr + offset,
-                                                       buf, bytes);
+                                                       buf, bytes, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1880,7 +1881,8 @@ size_t iov_iter_copy_from_user(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+
+               left = __copy_from_user_nocache(kaddr + offset, buf, bytes, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
index 0c04615..bf54f8a 100644 (file)
@@ -354,7 +354,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
                        break;
 
                copied = bytes -
-                       __copy_from_user_nocache(xip_mem + offset, buf, bytes);
+                       __copy_from_user_nocache(xip_mem + offset, buf, bytes, bytes);
 
                if (likely(copied > 0)) {
                        status = copied;