Use pgoff_t instead of unsigned long
[safe/jmp/linux-2.6] / mm / filemap_xip.c
index 82f4b8e..5e598c4 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/rmap.h>
 #include <linux/sched.h>
 #include <asm/tlbflush.h>
-#include "filemap.h"
 
 /*
  * We do use our own empty page to avoid interference with other users
@@ -26,14 +25,15 @@ static struct page *__xip_sparse_page;
 static struct page *xip_sparse_page(void)
 {
        if (!__xip_sparse_page) {
-               unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER);
-               if (zeroes) {
+               struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+
+               if (page) {
                        static DEFINE_SPINLOCK(xip_alloc_lock);
                        spin_lock(&xip_alloc_lock);
                        if (!__xip_sparse_page)
-                               __xip_sparse_page = virt_to_page(zeroes);
+                               __xip_sparse_page = page;
                        else
-                               free_page(zeroes);
+                               __free_page(page);
                        spin_unlock(&xip_alloc_lock);
                }
        }
@@ -56,7 +56,8 @@ do_xip_mapping_read(struct address_space *mapping,
                    read_actor_t actor)
 {
        struct inode *inode = mapping->host;
-       unsigned long index, end_index, offset;
+       pgoff_t index, end_index;
+       unsigned long offset;
        loff_t isize;
 
        BUG_ON(!mapping->a_ops->get_xip_page);
@@ -210,8 +211,7 @@ __xip_unmap (struct address_space * mapping,
  *
  * This function is derived from filemap_fault, but used for execute in place
  */
-static struct page *xip_file_fault(struct vm_area_struct *area,
-                                       struct fault_data *fdata)
+static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 {
        struct file *file = area->vm_file;
        struct address_space *mapping = file->f_mapping;
@@ -222,19 +222,15 @@ static struct page *xip_file_fault(struct vm_area_struct *area,
        /* XXX: are VM_FAULT_ codes OK? */
 
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (fdata->pgoff >= size) {
-               fdata->type = VM_FAULT_SIGBUS;
-               return NULL;
-       }
+       if (vmf->pgoff >= size)
+               return VM_FAULT_SIGBUS;
 
        page = mapping->a_ops->get_xip_page(mapping,
-                                       fdata->pgoff*(PAGE_SIZE/512), 0);
+                                       vmf->pgoff*(PAGE_SIZE/512), 0);
        if (!IS_ERR(page))
                goto out;
-       if (PTR_ERR(page) != -ENODATA) {
-               fdata->type = VM_FAULT_OOM;
-               return NULL;
-       }
+       if (PTR_ERR(page) != -ENODATA)
+               return VM_FAULT_OOM;
 
        /* sparse block */
        if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
@@ -242,26 +238,22 @@ static struct page *xip_file_fault(struct vm_area_struct *area,
            (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
                /* maybe shared writable, allocate new block */
                page = mapping->a_ops->get_xip_page(mapping,
-                                       fdata->pgoff*(PAGE_SIZE/512), 1);
-               if (IS_ERR(page)) {
-                       fdata->type = VM_FAULT_SIGBUS;
-                       return NULL;
-               }
+                                       vmf->pgoff*(PAGE_SIZE/512), 1);
+               if (IS_ERR(page))
+                       return VM_FAULT_SIGBUS;
                /* unmap page at pgoff from all other vmas */
-               __xip_unmap(mapping, fdata->pgoff);
+               __xip_unmap(mapping, vmf->pgoff);
        } else {
                /* not shared and writable, use xip_sparse_page() */
                page = xip_sparse_page();
-               if (!page) {
-                       fdata->type = VM_FAULT_OOM;
-                       return NULL;
-               }
+               if (!page)
+                       return VM_FAULT_OOM;
        }
 
 out:
-       fdata->type = VM_FAULT_MINOR;
        page_cache_get(page);
-       return page;
+       vmf->page = page;
+       return 0;
 }
 
 static struct vm_operations_struct xip_file_vm_ops = {
@@ -297,6 +289,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
                unsigned long index;
                unsigned long offset;
                size_t copied;
+               char *kaddr;
 
                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
                index = pos >> PAGE_CACHE_SHIFT;
@@ -304,14 +297,6 @@ __xip_file_write(struct file *filp, const char __user *buf,
                if (bytes > count)
                        bytes = count;
 
-               /*
-                * Bring in the user page that we will copy from _first_.
-                * Otherwise there's a nasty deadlock on copying from the
-                * same page as we're writing to, without it being marked
-                * up-to-date.
-                */
-               fault_in_pages_readable(buf, bytes);
-
                page = a_ops->get_xip_page(mapping,
                                           index*(PAGE_SIZE/512), 0);
                if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
@@ -328,8 +313,13 @@ __xip_file_write(struct file *filp, const char __user *buf,
                        break;
                }
 
-               copied = filemap_copy_from_user(page, offset, buf, bytes);
+               fault_in_pages_readable(buf, bytes);
+               kaddr = kmap_atomic(page, KM_USER0);
+               copied = bytes -
+                       __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
+               kunmap_atomic(kaddr, KM_USER0);
                flush_dcache_page(page);
+
                if (likely(copied > 0)) {
                        status = copied;
 
@@ -442,7 +432,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
                else
                        return PTR_ERR(page);
        }
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
        return 0;
 }
 EXPORT_SYMBOL_GPL(xip_truncate_page);