[PATCH] m68k: convert thread flags to use bit fields
[safe/jmp/linux-2.6] / mm / filemap.c
index 1d33fec..5d6e4c2 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/blkdev.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
+#include "filemap.h"
 /*
  * FIXME: remove all knowledge of the buffer layer from the core VM
  */
 #include <asm/uaccess.h>
 #include <asm/mman.h>
 
+static ssize_t
+generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+       loff_t offset, unsigned long nr_segs);
+
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  * though.
  *
  *  ->i_mmap_lock              (vmtruncate)
  *    ->private_lock           (__free_pte->__set_page_dirty_buffers)
- *      ->swap_list_lock
- *        ->swap_device_lock   (exclusive_swap_page, others)
- *          ->mapping->tree_lock
+ *      ->swap_lock            (exclusive_swap_page, others)
+ *        ->mapping->tree_lock
  *
  *  ->i_sem
  *    ->i_mmap_lock            (truncate->unmap_mapping_range)
  *
  *  ->mmap_sem
  *    ->i_mmap_lock
- *      ->page_table_lock      (various places, mainly in mmap.c)
+ *      ->page_table_lock or pte_lock  (various, mainly in memory.c)
  *        ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
  *
  *  ->mmap_sem
  *    ->anon_vma.lock          (vma_adjust)
  *
  *  ->anon_vma.lock
- *    ->page_table_lock                (anon_vma_prepare and various)
+ *    ->page_table_lock or pte_lock    (anon_vma_prepare and various)
  *
- *  ->page_table_lock
- *    ->swap_device_lock       (try_to_unmap_one)
+ *  ->page_table_lock or pte_lock
+ *    ->swap_lock              (try_to_unmap_one)
  *    ->private_lock           (try_to_unmap_one)
  *    ->tree_lock              (try_to_unmap_one)
  *    ->zone.lru_lock          (follow_page->mark_page_accessed)
@@ -148,7 +152,7 @@ static int sync_page(void *word)
         * in the ->sync_page() methods make essential use of the
         * page_mapping(), merely passing the page down to the backing
         * device's unplug functions when it's non-NULL, which in turn
-        * ignore it for all cases but swap, where only page->private is
+        * ignore it for all cases but swap, where only page_private(page) is
         * of interest. When page_mapping() does go NULL, the entire
         * call stack gracefully ignores the page and returns.
         * -- wli
@@ -301,8 +305,9 @@ EXPORT_SYMBOL(sync_page_range);
  * as it forces O_SYNC writers to different parts of the same file
  * to be serialised right until io completion.
  */
-int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
-                       loff_t pos, size_t count)
+static int sync_page_range_nolock(struct inode *inode,
+                                 struct address_space *mapping,
+                                 loff_t pos, size_t count)
 {
        pgoff_t start = pos >> PAGE_CACHE_SHIFT;
        pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
@@ -317,7 +322,6 @@ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
                ret = wait_on_page_writeback_range(mapping, start, end);
        return ret;
 }
-EXPORT_SYMBOL(sync_page_range_nolock);
 
 /**
  * filemap_fdatawait - walk the list of under-writeback pages of the given
@@ -373,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
  * This function does not add the page to the LRU.  The caller must do that.
  */
 int add_to_page_cache(struct page *page, struct address_space *mapping,
-               pgoff_t offset, int gfp_mask)
+               pgoff_t offset, gfp_t gfp_mask)
 {
        int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
 
@@ -397,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
 EXPORT_SYMBOL(add_to_page_cache);
 
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
-                               pgoff_t offset, int gfp_mask)
+                               pgoff_t offset, gfp_t gfp_mask)
 {
        int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
        if (ret == 0)
@@ -587,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page);
  * memory exhaustion.
  */
 struct page *find_or_create_page(struct address_space *mapping,
-               unsigned long index, unsigned int gfp_mask)
+               unsigned long index, gfp_t gfp_mask)
 {
        struct page *page, *cached_page = NULL;
        int err;
@@ -679,7 +683,7 @@ struct page *
 grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
 {
        struct page *page = find_get_page(mapping, index);
-       unsigned int gfp_mask;
+       gfp_t gfp_mask;
 
        if (page) {
                if (!TestSetPageLocked(page))
@@ -1026,8 +1030,8 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                        desc.error = 0;
                        do_generic_file_read(filp,ppos,&desc,file_read_actor);
                        retval += desc.written;
-                       if (!retval) {
-                               retval = desc.error;
+                       if (desc.error) {
+                               retval = retval ?: desc.error;
                                break;
                        }
                }
@@ -1504,15 +1508,22 @@ repeat:
                return -EINVAL;
 
        page = filemap_getpage(file, pgoff, nonblock);
+
+       /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
+        * done in shmem_populate calling shmem_getpage */
        if (!page && !nonblock)
                return -ENOMEM;
+
        if (page) {
                err = install_page(mm, vma, addr, page, prot);
                if (err) {
                        page_cache_release(page);
                        return err;
                }
-       } else {
+       } else if (vma->vm_flags & VM_NONLINEAR) {
+               /* No page was found just because we can't read it in now (being
+                * here implies nonblock != 0), but the page may exist, so set
+                * the PTE to fault it in later. */
                err = install_file_pte(mm, vma, addr, pgoff, prot);
                if (err)
                        return err;
@@ -1526,6 +1537,7 @@ repeat:
 
        return 0;
 }
+EXPORT_SYMBOL(filemap_populate);
 
 struct vm_operations_struct generic_file_vm_ops = {
        .nopage         = filemap_nopage,
@@ -1544,7 +1556,6 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
        vma->vm_ops = &generic_file_vm_ops;
        return 0;
 }
-EXPORT_SYMBOL(filemap_populate);
 
 /*
  * This is for filesystems which do not implement ->writepage.
@@ -1714,32 +1725,7 @@ int remove_suid(struct dentry *dentry)
 }
 EXPORT_SYMBOL(remove_suid);
 
-/*
- * Copy as much as we can into the page and return the number of bytes which
- * were sucessfully copied.  If a fault is encountered then clear the page
- * out to (offset+bytes) and return the number of bytes which were copied.
- */
-static inline size_t
-filemap_copy_from_user(struct page *page, unsigned long offset,
-                       const char __user *buf, unsigned bytes)
-{
-       char *kaddr;
-       int left;
-
-       kaddr = kmap_atomic(page, KM_USER0);
-       left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
-       kunmap_atomic(kaddr, KM_USER0);
-
-       if (left != 0) {
-               /* Do it the slow way */
-               kaddr = kmap(page);
-               left = __copy_from_user(kaddr + offset, buf, bytes);
-               kunmap(page);
-       }
-       return bytes - left;
-}
-
-static size_t
+size_t
 __filemap_copy_from_user_iovec(char *vaddr, 
                        const struct iovec *iov, size_t base, size_t bytes)
 {
@@ -1767,52 +1753,6 @@ __filemap_copy_from_user_iovec(char *vaddr,
 }
 
 /*
- * This has the same sideeffects and return value as filemap_copy_from_user().
- * The difference is that on a fault we need to memset the remainder of the
- * page (out to offset+bytes), to emulate filemap_copy_from_user()'s
- * single-segment behaviour.
- */
-static inline size_t
-filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
-                       const struct iovec *iov, size_t base, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       kaddr = kmap_atomic(page, KM_USER0);
-       copied = __filemap_copy_from_user_iovec(kaddr + offset, iov,
-                                               base, bytes);
-       kunmap_atomic(kaddr, KM_USER0);
-       if (copied != bytes) {
-               kaddr = kmap(page);
-               copied = __filemap_copy_from_user_iovec(kaddr + offset, iov,
-                                                       base, bytes);
-               kunmap(page);
-       }
-       return copied;
-}
-
-static inline void
-filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
-{
-       const struct iovec *iov = *iovp;
-       size_t base = *basep;
-
-       while (bytes) {
-               int copy = min(bytes, iov->iov_len - base);
-
-               bytes -= copy;
-               base += copy;
-               if (iov->iov_len == base) {
-                       iov++;
-                       base = 0;
-               }
-       }
-       *iovp = iov;
-       *basep = base;
-}
-
-/*
  * Performs necessary checks before doing a write
  *
  * Can adjust writing position aor amount of bytes to write.
@@ -1827,12 +1767,6 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
         if (unlikely(*pos < 0))
                 return -EINVAL;
 
-        if (unlikely(file->f_error)) {
-                int err = file->f_error;
-                file->f_error = 0;
-                return err;
-        }
-
        if (!isblk) {
                /* FIXME: this is for backwards compatibility with 2.4 */
                if (file->f_flags & O_APPEND)
@@ -1927,8 +1861,11 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
         * i_sem is held, which protects generic_osync_inode() from
         * livelocking.
         */
-       if (written >= 0 && file->f_flags & O_SYNC)
-               generic_osync_inode(inode, mapping, OSYNC_METADATA);
+       if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+               int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
+               if (err < 0)
+                       written = err;
+       }
        if (written == count && !is_sync_kiocb(iocb))
                written = -EIOCBQUEUED;
        return written;
@@ -1968,6 +1905,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
        do {
                unsigned long index;
                unsigned long offset;
+               unsigned long maxlen;
                size_t copied;
 
                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
@@ -1982,7 +1920,10 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                 * same page as we're writing to, without it being marked
                 * up-to-date.
                 */
-               fault_in_pages_readable(buf, bytes);
+               maxlen = cur_iov->iov_len - iov_base;
+               if (maxlen > bytes)
+                       maxlen = bytes;
+               fault_in_pages_readable(buf, maxlen);
 
                page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
                if (!page) {
@@ -2023,7 +1964,11 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
                                if (unlikely(nr_segs > 1)) {
                                        filemap_set_next_iovec(&cur_iov,
                                                        &iov_base, status);
-                                       buf = cur_iov->iov_base + iov_base;
+                                       if (count)
+                                               buf = cur_iov->iov_base +
+                                                       iov_base;
+                               } else {
+                                       iov_base += status;
                                }
                        }
                }
@@ -2067,7 +2012,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_file_buffered_write);
 
-ssize_t
+static ssize_t
 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
                                unsigned long nr_segs, loff_t *ppos)
 {
@@ -2167,7 +2112,7 @@ generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
        return ret;
 }
 
-ssize_t
+static ssize_t
 __generic_file_write_nolock(struct file *file, const struct iovec *iov,
                                unsigned long nr_segs, loff_t *ppos)
 {
@@ -2288,7 +2233,7 @@ EXPORT_SYMBOL(generic_file_writev);
  * Called under i_sem for writes to S_ISREG files.   Returns -EIO if something
  * went wrong during pagecache shootdown.
  */
-ssize_t
+static ssize_t
 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        loff_t offset, unsigned long nr_segs)
 {
@@ -2323,4 +2268,3 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        }
        return retval;
 }
-EXPORT_SYMBOL_GPL(generic_file_direct_IO);