/*
* FIXME: remove all knowledge of the buffer layer from the core VM
*/
-#include <linux/buffer_head.h> /* for generic_osync_inode */
+#include <linux/buffer_head.h> /* for try_to_free_buffers */
#include <asm/mman.h>
-
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
/*
* Lock ordering:
*
- * ->i_mmap_lock (vmtruncate)
+ * ->i_mmap_lock (truncate_pagecache)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
*
* ->task->proc_lock
* ->dcache_lock (proc_pid_lookup)
+ *
+ * (code doesn't rely on that order, so you could switch it around)
+ * ->tasklist_lock (memory_failure, collect_procs_ao)
+ * ->i_mmap_lock
*/
/*
page->mapping = NULL;
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
+ if (PageSwapBacked(page))
+ __dec_zone_page_state(page, NR_SHMEM);
BUG_ON(page_mapped(page));
/*
EXPORT_SYMBOL(filemap_fdatawait_range);
/**
- * sync_page_range - write and wait on all pages in the passed range
- * @inode: target inode
- * @mapping: target address_space
- * @pos: beginning offset in pages to write
- * @count: number of bytes to write
- *
- * Write and wait upon all the pages in the passed range. This is a "data
- * integrity" operation. It waits upon in-flight writeout before starting and
- * waiting upon new writeout. If there was an IO error, return it.
- *
- * We need to re-take i_mutex during the generic_osync_inode list walk because
- * it is otherwise livelockable.
- */
-int sync_page_range(struct inode *inode, struct address_space *mapping,
- loff_t pos, loff_t count)
-{
- pgoff_t start = pos >> PAGE_CACHE_SHIFT;
- pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
- int ret;
-
- if (!mapping_cap_writeback_dirty(mapping) || !count)
- return 0;
- ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
- if (ret == 0) {
- mutex_lock(&inode->i_mutex);
- ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
- mutex_unlock(&inode->i_mutex);
- }
- if (ret == 0)
- ret = wait_on_page_writeback_range(mapping, start, end);
- return ret;
-}
-EXPORT_SYMBOL(sync_page_range);
-
-/**
- * sync_page_range_nolock - write & wait on all pages in the passed range without locking
- * @inode: target inode
- * @mapping: target address_space
- * @pos: beginning offset in pages to write
- * @count: number of bytes to write
- *
- * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
- * as it forces O_SYNC writers to different parts of the same file
- * to be serialised right until io completion.
- */
-int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
- loff_t pos, loff_t count)
-{
- pgoff_t start = pos >> PAGE_CACHE_SHIFT;
- pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
- int ret;
-
- if (!mapping_cap_writeback_dirty(mapping) || !count)
- return 0;
- ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
- if (ret == 0)
- ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
- if (ret == 0)
- ret = wait_on_page_writeback_range(mapping, start, end);
- return ret;
-}
-EXPORT_SYMBOL(sync_page_range_nolock);
-
-/**
* filemap_fdatawait - wait for all under-writeback pages to complete
* @mapping: address space structure to wait for
*
if (likely(!error)) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
+ if (PageSwapBacked(page))
+ __inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
}
EXPORT_SYMBOL(filemap_fault);
-struct vm_operations_struct generic_file_vm_ops = {
+const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
};
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
+ struct inode *inode = file->f_mapping->host;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
- if ((ret > 0 || ret == -EIOCBQUEUED) &&
- ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+ if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
- err = sync_page_range(inode, mapping, pos, ret);
+ err = generic_write_sync(file, pos, ret);
if (err < 0 && ret > 0)
ret = err;
}