cpm1: don't send break on TX_STOP, don't interrupt RX/TX when adjusting termios param...
[safe/jmp/linux-2.6] / mm / filemap.c
index df343d1..7675b91 100644 (file)
@@ -42,9 +42,6 @@
 
 #include <asm/mman.h>
 
-static ssize_t
-generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-       loff_t offset, unsigned long nr_segs);
 
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
@@ -236,11 +233,12 @@ int filemap_fdatawrite(struct address_space *mapping)
 }
 EXPORT_SYMBOL(filemap_fdatawrite);
 
-static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
+int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
                                loff_t end)
 {
        return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 }
+EXPORT_SYMBOL(filemap_fdatawrite_range);
 
 /**
  * filemap_flush - mostly a non-blocking flush
@@ -343,7 +341,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
 EXPORT_SYMBOL(sync_page_range);
 
 /**
- * sync_page_range_nolock
+ * sync_page_range_nolock - write & wait on all pages in the passed range without locking
  * @inode:     target inode
  * @mapping:   target address_space
  * @pos:       beginning offset in pages to write
@@ -576,10 +574,12 @@ EXPORT_SYMBOL(unlock_page);
  */
 void end_page_writeback(struct page *page)
 {
-       if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
-               if (!test_clear_page_writeback(page))
-                       BUG();
-       }
+       if (TestClearPageReclaim(page))
+               rotate_reclaimable_page(page);
+
+       if (!test_clear_page_writeback(page))
+               BUG();
+
        smp_mb__after_clear_bit();
        wake_up_page(page, PG_writeback);
 }
@@ -611,7 +611,10 @@ int __lock_page_killable(struct page *page)
                                        sync_page_killable, TASK_KILLABLE);
 }
 
-/*
+/**
+ * __lock_page_nosync - get a lock on the page, without calling sync_page()
+ * @page: the page to lock
+ *
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
  */
@@ -1194,42 +1197,41 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
 
                mapping = filp->f_mapping;
                inode = mapping->host;
-               retval = 0;
                if (!count)
                        goto out; /* skip atime */
                size = i_size_read(inode);
                if (pos < size) {
-                       retval = generic_file_direct_IO(READ, iocb,
-                                               iov, pos, nr_segs);
+                       retval = filemap_write_and_wait(mapping);
+                       if (!retval) {
+                               retval = mapping->a_ops->direct_IO(READ, iocb,
+                                                       iov, pos, nr_segs);
+                       }
                        if (retval > 0)
                                *ppos = pos + retval;
-               }
-               if (likely(retval != 0)) {
-                       file_accessed(filp);
-                       goto out;
+                       if (retval) {
+                               file_accessed(filp);
+                               goto out;
+                       }
                }
        }
 
-       retval = 0;
-       if (count) {
-               for (seg = 0; seg < nr_segs; seg++) {
-                       read_descriptor_t desc;
+       for (seg = 0; seg < nr_segs; seg++) {
+               read_descriptor_t desc;
 
-                       desc.written = 0;
-                       desc.arg.buf = iov[seg].iov_base;
-                       desc.count = iov[seg].iov_len;
-                       if (desc.count == 0)
-                               continue;
-                       desc.error = 0;
-                       do_generic_file_read(filp,ppos,&desc,file_read_actor);
-                       retval += desc.written;
-                       if (desc.error) {
-                               retval = retval ?: desc.error;
-                               break;
-                       }
-                       if (desc.count > 0)
-                               break;
+               desc.written = 0;
+               desc.arg.buf = iov[seg].iov_base;
+               desc.count = iov[seg].iov_len;
+               if (desc.count == 0)
+                       continue;
+               desc.error = 0;
+               do_generic_file_read(filp, ppos, &desc, file_read_actor);
+               retval += desc.written;
+               if (desc.error) {
+                       retval = retval ?: desc.error;
+                       break;
                }
+               if (desc.count > 0)
+                       break;
        }
 out:
        return retval;
@@ -1456,6 +1458,11 @@ page_not_uptodate:
         */
        ClearPageError(page);
        error = mapping->a_ops->readpage(file, page);
+       if (!error) {
+               wait_on_page_locked(page);
+               if (!PageUptodate(page))
+                       error = -EIO;
+       }
        page_cache_release(page);
 
        if (!error || error == AOP_TRUNCATED_PAGE)
@@ -1538,9 +1545,20 @@ repeat:
        return page;
 }
 
-/*
+/**
+ * read_cache_page_async - read into page cache, fill it if needed
+ * @mapping:   the page's address_space
+ * @index:     the page index
+ * @filler:    function to perform the read
+ * @data:      destination for read data
+ *
  * Same as read_cache_page, but don't wait for page to become unlocked
  * after submitting it to the filler.
+ *
+ * Read into the page cache. If a page already exists, and PageUptodate() is
+ * not set, try to fill the page but don't wait for it to become unlocked.
+ *
+ * If the page does not get brought uptodate, return -EIO.
  */
 struct page *read_cache_page_async(struct address_space *mapping,
                                pgoff_t index,
@@ -1639,7 +1657,7 @@ int should_remove_suid(struct dentry *dentry)
 }
 EXPORT_SYMBOL(should_remove_suid);
 
-int __remove_suid(struct dentry *dentry, int kill)
+static int __remove_suid(struct dentry *dentry, int kill)
 {
        struct iattr newattrs;
 
@@ -1982,11 +2000,55 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        struct address_space *mapping = file->f_mapping;
        struct inode    *inode = mapping->host;
        ssize_t         written;
+       size_t          write_len;
+       pgoff_t         end;
 
        if (count != ocount)
                *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
 
-       written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+       /*
+        * Unmap all mmappings of the file up-front.
+        *
+        * This will cause any pte dirty bits to be propagated into the
+        * pageframes for the subsequent filemap_write_and_wait().
+        */
+       write_len = iov_length(iov, *nr_segs);
+       end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
+       if (mapping_mapped(mapping))
+               unmap_mapping_range(mapping, pos, write_len, 0);
+
+       written = filemap_write_and_wait(mapping);
+       if (written)
+               goto out;
+
+       /*
+        * After a write we want buffered reads to be sure to go to disk to get
+        * the new data.  We invalidate clean cached page from the region we're
+        * about to write.  We do this *before* the write so that we can return
+        * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
+        */
+       if (mapping->nrpages) {
+               written = invalidate_inode_pages2_range(mapping,
+                                       pos >> PAGE_CACHE_SHIFT, end);
+               if (written)
+                       goto out;
+       }
+
+       written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+
+       /*
+        * Finally, try again to invalidate clean pages which might have been
+        * cached by non-direct readahead, or faulted in by get_user_pages()
+        * if the source of the write was an mmap'ed region of the file
+        * we're writing.  Either one is a pretty crazy thing to do,
+        * so we don't support it 100%.  If this invalidation
+        * fails, tough, the write still worked...
+        */
+       if (mapping->nrpages) {
+               invalidate_inode_pages2_range(mapping,
+                                             pos >> PAGE_CACHE_SHIFT, end);
+       }
+
        if (written > 0) {
                loff_t end = pos + written;
                if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
@@ -2002,6 +2064,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
         * i_mutex is held, which protects generic_osync_inode() from
         * livelocking.  AIO O_DIRECT ops attempt to sync metadata here.
         */
+out:
        if ((written >= 0 || written == -EIOCBQUEUED) &&
            ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
                int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
@@ -2489,66 +2552,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_file_aio_write);
 
-/*
- * Called under i_mutex for writes to S_ISREG files.   Returns -EIO if something
- * went wrong during pagecache shootdown.
- */
-static ssize_t
-generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-       loff_t offset, unsigned long nr_segs)
-{
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       ssize_t retval;
-       size_t write_len;
-       pgoff_t end = 0; /* silence gcc */
-
-       /*
-        * If it's a write, unmap all mmappings of the file up-front.  This
-        * will cause any pte dirty bits to be propagated into the pageframes
-        * for the subsequent filemap_write_and_wait().
-        */
-       if (rw == WRITE) {
-               write_len = iov_length(iov, nr_segs);
-               end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
-               if (mapping_mapped(mapping))
-                       unmap_mapping_range(mapping, offset, write_len, 0);
-       }
-
-       retval = filemap_write_and_wait(mapping);
-       if (retval)
-               goto out;
-
-       /*
-        * After a write we want buffered reads to be sure to go to disk to get
-        * the new data.  We invalidate clean cached page from the region we're
-        * about to write.  We do this *before* the write so that we can return
-        * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
-        */
-       if (rw == WRITE && mapping->nrpages) {
-               retval = invalidate_inode_pages2_range(mapping,
-                                       offset >> PAGE_CACHE_SHIFT, end);
-               if (retval)
-                       goto out;
-       }
-
-       retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
-
-       /*
-        * Finally, try again to invalidate clean pages which might have been
-        * cached by non-direct readahead, or faulted in by get_user_pages()
-        * if the source of the write was an mmap'ed region of the file
-        * we're writing.  Either one is a pretty crazy thing to do,
-        * so we don't support it 100%.  If this invalidation
-        * fails, tough, the write still worked...
-        */
-       if (rw == WRITE && mapping->nrpages) {
-               invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
-       }
-out:
-       return retval;
-}
-
 /**
  * try_to_release_page() - release old fs-specific metadata on a page
  *