/dev/mem: introduce size_inside_page()
authorWu Fengguang <fengguang.wu@intel.com>
Tue, 15 Dec 2009 01:58:07 +0000 (17:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Dec 2009 16:53:11 +0000 (08:53 -0800)
Introduce size_inside_page() to replace duplicate /dev/mem code.

Also apply it to /dev/kmem, whose alignment logic was buggy.

Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@suse.de>
Cc: Mark Brown <broonie@opensource.wolfsonmicro.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Avi Kivity <avi@qumranet.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/char/mem.c

index f0a9059..aaa9c24 100644 (file)
 # include <linux/efi.h>
 #endif
 
+static inline unsigned long size_inside_page(unsigned long start,
+                                            unsigned long size)
+{
+       unsigned long sz;
+
+       if (-start & (PAGE_SIZE - 1))
+               sz = -start & (PAGE_SIZE - 1);
+       else
+               sz = PAGE_SIZE;
+
+       return min_t(unsigned long, sz, size);
+}
+
 /*
  * Architectures vary in how they handle caching for addresses
  * outside of main memory.
@@ -141,15 +154,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
 #endif
 
        while (count > 0) {
-               /*
-                * Handle first page in case it's not aligned
-                */
-               if (-p & (PAGE_SIZE - 1))
-                       sz = -p & (PAGE_SIZE - 1);
-               else
-                       sz = PAGE_SIZE;
-
-               sz = min_t(unsigned long, sz, count);
+               sz = size_inside_page(p, count);
 
                if (!range_is_allowed(p >> PAGE_SHIFT, count))
                        return -EPERM;
@@ -208,15 +213,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
 #endif
 
        while (count > 0) {
-               /*
-                * Handle first page in case it's not aligned
-                */
-               if (-p & (PAGE_SIZE - 1))
-                       sz = -p & (PAGE_SIZE - 1);
-               else
-                       sz = PAGE_SIZE;
-
-               sz = min_t(unsigned long, sz, count);
+               sz = size_inside_page(p, count);
 
                if (!range_is_allowed(p >> PAGE_SHIFT, sz))
                        return -EPERM;
@@ -429,15 +426,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
                }
 #endif
                while (low_count > 0) {
-                       /*
-                        * Handle first page in case it's not aligned
-                        */
-                       if (-p & (PAGE_SIZE - 1))
-                               sz = -p & (PAGE_SIZE - 1);
-                       else
-                               sz = PAGE_SIZE;
-
-                       sz = min_t(unsigned long, sz, low_count);
+                       sz = size_inside_page(p, low_count);
 
                        /*
                         * On ia64 if a page has been mapped somewhere as
@@ -461,10 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
                if (!kbuf)
                        return -ENOMEM;
                while (count > 0) {
-                       int len = count;
+                       int len = size_inside_page(p, count);
 
-                       if (len > PAGE_SIZE)
-                               len = PAGE_SIZE;
                        len = vread(kbuf, (char *)p, len);
                        if (!len)
                                break;
@@ -509,15 +496,8 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
 
        while (count > 0) {
                char *ptr;
-               /*
-                * Handle first page in case it's not aligned
-                */
-               if (-realp & (PAGE_SIZE - 1))
-                       sz = -realp & (PAGE_SIZE - 1);
-               else
-                       sz = PAGE_SIZE;
 
-               sz = min_t(unsigned long, sz, count);
+               sz = size_inside_page(realp, count);
 
                /*
                 * On ia64 if a page has been mapped somewhere as
@@ -577,10 +557,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
                if (!kbuf)
                        return wrote ? wrote : -ENOMEM;
                while (count > 0) {
-                       int len = count;
+                       int len = size_inside_page(p, count);
 
-                       if (len > PAGE_SIZE)
-                               len = PAGE_SIZE;
                        written = copy_from_user(kbuf, buf, len);
                        if (written) {
                                if (wrote + virtr)