loop: fix NULL dereference if mount fails
[safe/jmp/linux-2.6] / mm / filemap.c
index 849293c..ef169f3 100644 (file)
@@ -58,7 +58,7 @@
 /*
  * Lock ordering:
  *
- *  ->i_mmap_lock              (vmtruncate)
+ *  ->i_mmap_lock              (truncate_pagecache)
  *    ->private_lock           (__free_pte->__set_page_dirty_buffers)
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->mapping->tree_lock
  *
  *  ->task->proc_lock
  *    ->dcache_lock            (proc_pid_lookup)
+ *
+ *  (code doesn't rely on that order, so you could switch it around)
+ *  ->tasklist_lock             (memory_failure, collect_procs_ao)
+ *    ->i_mmap_lock
  */
 
 /*
@@ -119,6 +123,8 @@ void __remove_from_page_cache(struct page *page)
        page->mapping = NULL;
        mapping->nrpages--;
        __dec_zone_page_state(page, NR_FILE_PAGES);
+       if (PageSwapBacked(page))
+               __dec_zone_page_state(page, NR_SHMEM);
        BUG_ON(page_mapped(page));
 
        /*
@@ -326,70 +332,6 @@ int filemap_fdatawait_range(struct address_space *mapping, loff_t start,
 EXPORT_SYMBOL(filemap_fdatawait_range);
 
 /**
- * sync_page_range - write and wait on all pages in the passed range
- * @inode:     target inode
- * @mapping:   target address_space
- * @pos:       beginning offset in pages to write
- * @count:     number of bytes to write
- *
- * Write and wait upon all the pages in the passed range.  This is a "data
- * integrity" operation.  It waits upon in-flight writeout before starting and
- * waiting upon new writeout.  If there was an IO error, return it.
- *
- * We need to re-take i_mutex during the generic_osync_inode list walk because
- * it is otherwise livelockable.
- */
-int sync_page_range(struct inode *inode, struct address_space *mapping,
-                       loff_t pos, loff_t count)
-{
-       pgoff_t start = pos >> PAGE_CACHE_SHIFT;
-       pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
-       int ret;
-
-       if (!mapping_cap_writeback_dirty(mapping) || !count)
-               return 0;
-       ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
-       if (ret == 0) {
-               mutex_lock(&inode->i_mutex);
-               ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
-               mutex_unlock(&inode->i_mutex);
-       }
-       if (ret == 0)
-               ret = wait_on_page_writeback_range(mapping, start, end);
-       return ret;
-}
-EXPORT_SYMBOL(sync_page_range);
-
-/**
- * sync_page_range_nolock - write & wait on all pages in the passed range without locking
- * @inode:     target inode
- * @mapping:   target address_space
- * @pos:       beginning offset in pages to write
- * @count:     number of bytes to write
- *
- * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
- * as it forces O_SYNC writers to different parts of the same file
- * to be serialised right until io completion.
- */
-int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
-                          loff_t pos, loff_t count)
-{
-       pgoff_t start = pos >> PAGE_CACHE_SHIFT;
-       pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
-       int ret;
-
-       if (!mapping_cap_writeback_dirty(mapping) || !count)
-               return 0;
-       ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
-       if (ret == 0)
-               ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
-       if (ret == 0)
-               ret = wait_on_page_writeback_range(mapping, start, end);
-       return ret;
-}
-EXPORT_SYMBOL(sync_page_range_nolock);
-
-/**
  * filemap_fdatawait - wait for all under-writeback pages to complete
  * @mapping: address space structure to wait for
  *
@@ -495,6 +437,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                if (likely(!error)) {
                        mapping->nrpages++;
                        __inc_zone_page_state(page, NR_FILE_PAGES);
+                       if (PageSwapBacked(page))
+                               __inc_zone_page_state(page, NR_SHMEM);
                        spin_unlock_irq(&mapping->tree_lock);
                } else {
                        page->mapping = NULL;
@@ -1667,7 +1611,7 @@ page_not_uptodate:
 }
 EXPORT_SYMBOL(filemap_fault);
 
-struct vm_operations_struct generic_file_vm_ops = {
+const struct vm_operations_struct generic_file_vm_ops = {
        .fault          = filemap_fault,
 };