Do not include linux/backing-dev.h twice
[safe/jmp/linux-2.6] / mm / filemap.c
index 7c86436..ab98557 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
-#include <linux/backing-dev.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/cpuset.h>
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
+#include <linux/memcontrol.h>
 #include "internal.h"
 
 /*
@@ -65,7 +65,6 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  *    ->private_lock           (__free_pte->__set_page_dirty_buffers)
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->mapping->tree_lock
- *          ->zone.lock
  *
  *  ->i_mutex
  *    ->i_mmap_lock            (truncate->unmap_mapping_range)
@@ -119,11 +118,24 @@ void __remove_from_page_cache(struct page *page)
 {
        struct address_space *mapping = page->mapping;
 
+       mem_cgroup_uncharge_page(page);
        radix_tree_delete(&mapping->page_tree, page->index);
        page->mapping = NULL;
        mapping->nrpages--;
        __dec_zone_page_state(page, NR_FILE_PAGES);
        BUG_ON(page_mapped(page));
+
+       /*
+        * Some filesystems seem to re-dirty the page even after
+        * the VM has canceled the dirty bit (eg ext3 journaling).
+        *
+        * Fix it up by doing a final dirty accounting check after
+        * having removed the page entirely.
+        */
+       if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
+               dec_zone_page_state(page, NR_FILE_DIRTY);
+               dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+       }
 }
 
 void remove_from_page_cache(struct page *page)
@@ -173,6 +185,12 @@ static int sync_page(void *word)
        return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+       sync_page(word);
+       return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -441,8 +459,12 @@ int filemap_write_and_wait_range(struct address_space *mapping,
 int add_to_page_cache(struct page *page, struct address_space *mapping,
                pgoff_t offset, gfp_t gfp_mask)
 {
-       int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+       int error = mem_cgroup_cache_charge(page, current->mm,
+                                       gfp_mask & ~__GFP_HIGHMEM);
+       if (error)
+               goto out;
 
+       error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
        if (error == 0) {
                write_lock_irq(&mapping->tree_lock);
                error = radix_tree_insert(&mapping->page_tree, offset, page);
@@ -453,10 +475,14 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
                        page->index = offset;
                        mapping->nrpages++;
                        __inc_zone_page_state(page, NR_FILE_PAGES);
-               }
+               } else
+                       mem_cgroup_uncharge_page(page);
+
                write_unlock_irq(&mapping->tree_lock);
                radix_tree_preload_end();
-       }
+       } else
+               mem_cgroup_uncharge_page(page);
+out:
        return error;
 }
 EXPORT_SYMBOL(add_to_page_cache);
@@ -510,7 +536,7 @@ static inline void wake_up_page(struct page *page, int bit)
        __wake_up_bit(page_waitqueue(page), &page->flags, bit);
 }
 
-void fastcall wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_page_bit(struct page *page, int bit_nr)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 
@@ -534,7 +560,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
  * the clear_bit and the read of the waitqueue (to avoid SMP races with a
  * parallel wait_on_page_locked()).
  */
-void fastcall unlock_page(struct page *page)
+void unlock_page(struct page *page)
 {
        smp_mb__before_clear_bit();
        if (!TestClearPageLocked(page))
@@ -568,7 +594,7 @@ EXPORT_SYMBOL(end_page_writeback);
  * chances are that on the second loop, the block layer's plug list is empty,
  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
-void fastcall __lock_page(struct page *page)
+void __lock_page(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 
@@ -577,11 +603,19 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
+int __lock_page_killable(struct page *page)
+{
+       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+       return __wait_on_bit_lock(page_waitqueue(page), &wait,
+                                       sync_page_killable, TASK_KILLABLE);
+}
+
 /*
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
  */
-void fastcall __lock_page_nosync(struct page *page)
+void __lock_page_nosync(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
        __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -840,9 +874,7 @@ static void shrink_readahead_size_eio(struct file *filp,
 }
 
 /**
- * do_generic_mapping_read - generic file read routine
- * @mapping:   address_space to be read
- * @ra:                file's readahead state
+ * do_generic_file_read - generic file read routine
  * @filp:      the file to read
  * @ppos:      current file position
  * @desc:      read_descriptor
@@ -853,18 +885,13 @@ static void shrink_readahead_size_eio(struct file *filp,
  *
  * This is really ugly. But the goto's actually try to clarify some
  * of the logic when it comes to error handling etc.
- *
- * Note the struct file* is only passed for the use of readpage.
- * It may be NULL.
  */
-void do_generic_mapping_read(struct address_space *mapping,
-                            struct file_ra_state *ra,
-                            struct file *filp,
-                            loff_t *ppos,
-                            read_descriptor_t *desc,
-                            read_actor_t actor)
+static void do_generic_file_read(struct file *filp, loff_t *ppos,
+               read_descriptor_t *desc, read_actor_t actor)
 {
+       struct address_space *mapping = filp->f_mapping;
        struct inode *inode = mapping->host;
+       struct file_ra_state *ra = &filp->f_ra;
        pgoff_t index;
        pgoff_t last_index;
        pgoff_t prev_index;
@@ -968,7 +995,8 @@ page_ok:
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
-               lock_page(page);
+               if (lock_page_killable(page))
+                       goto readpage_eio;
 
                /* Did it get truncated before we got the lock? */
                if (!page->mapping) {
@@ -996,7 +1024,8 @@ readpage:
                }
 
                if (!PageUptodate(page)) {
-                       lock_page(page);
+                       if (lock_page_killable(page))
+                               goto readpage_eio;
                        if (!PageUptodate(page)) {
                                if (page->mapping == NULL) {
                                        /*
@@ -1007,15 +1036,16 @@ readpage:
                                        goto find_page;
                                }
                                unlock_page(page);
-                               error = -EIO;
                                shrink_readahead_size_eio(filp, ra);
-                               goto readpage_error;
+                               goto readpage_eio;
                        }
                        unlock_page(page);
                }
 
                goto page_ok;
 
+readpage_eio:
+               error = -EIO;
 readpage_error:
                /* UHHUH! A synchronous read error occurred. Report it */
                desc->error = error;
@@ -1053,7 +1083,6 @@ out:
        if (filp)
                file_accessed(filp);
 }
-EXPORT_SYMBOL(do_generic_mapping_read);
 
 int file_read_actor(read_descriptor_t *desc, struct page *page,
                        unsigned long offset, unsigned long size)
@@ -1248,7 +1277,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
  * This adds the requested page to the page cache if it isn't already there,
  * and schedules an I/O to read in its contents from disk.
  */
-static int fastcall page_cache_read(struct file * file, pgoff_t offset)
+static int page_cache_read(struct file *file, pgoff_t offset)
 {
        struct address_space *mapping = file->f_mapping;
        struct page *page; 
@@ -1294,13 +1323,13 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct file_ra_state *ra = &file->f_ra;
        struct inode *inode = mapping->host;
        struct page *page;
-       unsigned long size;
+       pgoff_t size;
        int did_readaround = 0;
        int ret = 0;
 
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        if (vmf->pgoff >= size)
-               goto outside_data_content;
+               return VM_FAULT_SIGBUS;
 
        /* If we don't want any read-ahead, don't bother */
        if (VM_RandomReadHint(vma))
@@ -1377,7 +1406,7 @@ retry_find:
        if (unlikely(vmf->pgoff >= size)) {
                unlock_page(page);
                page_cache_release(page);
-               goto outside_data_content;
+               return VM_FAULT_SIGBUS;
        }
 
        /*
@@ -1388,15 +1417,6 @@ retry_find:
        vmf->page = page;
        return ret | VM_FAULT_LOCKED;
 
-outside_data_content:
-       /*
-        * An external ptracer can access pages that normally aren't
-        * accessible..
-        */
-       if (vma->vm_mm == current->mm)
-               return VM_FAULT_SIGBUS;
-
-       /* Fall through to the non-read-ahead case */
 no_cached_page:
        /*
         * We're only likely to ever get here if MADV_RANDOM is in
@@ -1730,7 +1750,11 @@ static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
                const struct iovec *iov = i->iov;
                size_t base = i->iov_offset;
 
-               while (bytes) {
+               /*
+                * The !iov->iov_len check ensures we skip over unlikely
+                * zero-length segments.
+                */
+               while (bytes || !iov->iov_len) {
                        int copy = min(bytes, iov->iov_len - base);
 
                        bytes -= copy;
@@ -2248,6 +2272,7 @@ again:
 
                cond_resched();
 
+               iov_iter_advance(i, copied);
                if (unlikely(copied == 0)) {
                        /*
                         * If we were unable to copy any data at all, we must
@@ -2261,7 +2286,6 @@ again:
                                                iov_iter_single_seg_count(i));
                        goto again;
                }
-               iov_iter_advance(i, copied);
                pos += copied;
                written += copied;
 
@@ -2511,21 +2535,17 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        }
 
        retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
-       if (retval)
-               goto out;
 
        /*
         * Finally, try again to invalidate clean pages which might have been
-        * faulted in by get_user_pages() if the source of the write was an
-        * mmap()ed region of the file we're writing.  That's a pretty crazy
-        * thing to do, so we don't support it 100%.  If this invalidation
-        * fails and we have -EIOCBQUEUED we ignore the failure.
+        * cached by non-direct readahead, or faulted in by get_user_pages()
+        * if the source of the write was an mmap'ed region of the file
+        * we're writing.  Either one is a pretty crazy thing to do,
+        * so we don't support it 100%.  If this invalidation
+        * fails, tough, the write still worked...
         */
        if (rw == WRITE && mapping->nrpages) {
-               int err = invalidate_inode_pages2_range(mapping,
-                                             offset >> PAGE_CACHE_SHIFT, end);
-               if (err && retval >= 0)
-                       retval = err;
+               invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
        }
 out:
        return retval;