* async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/capability.h>
#include <linux/blkdev.h>
#include <linux/file.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/bio.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/bit_spinlock.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
-static void invalidate_bh_lrus(void);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
void fastcall unlock_buffer(struct buffer_head *bh)
{
+ smp_mb__before_clear_bit();
clear_buffer_locked(bh);
smp_mb__after_clear_bit();
wake_up_bit(&bh->b_state, BH_Lock);
/*
* Write out and wait upon all dirty data associated with this
- * superblock. Filesystem data as well as the underlying block
- * device. Takes the superblock lock.
- */
-int fsync_super(struct super_block *sb)
-{
- sync_inodes_sb(sb, 0);
- DQUOT_SYNC(sb);
- lock_super(sb);
- if (sb->s_dirt && sb->s_op->write_super)
- sb->s_op->write_super(sb);
- unlock_super(sb);
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
- sync_blockdev(sb->s_bdev);
- sync_inodes_sb(sb, 1);
-
- return sync_blockdev(sb->s_bdev);
-}
-
-/*
- * Write out and wait upon all dirty data associated with this
* device. Filesystem data as well as the underlying block
* device. Takes the superblock lock.
*/
* freeze_bdev -- lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
*
- * This takes the block device bd_mount_mutex to make sure no new mounts
+ * This takes the block device bd_mount_sem to make sure no new mounts
* happen on bdev until thaw_bdev() is called.
* If a superblock is found on this device, we take the s_umount semaphore
* on it to make sure nobody unmounts until the snapshot creation is done.
{
struct super_block *sb;
- mutex_lock(&bdev->bd_mount_mutex);
+ down(&bdev->bd_mount_sem);
sb = get_super(bdev);
if (sb && !(sb->s_flags & MS_RDONLY)) {
sb->s_frozen = SB_FREEZE_WRITE;
smp_wmb();
- sync_inodes_sb(sb, 0);
- DQUOT_SYNC(sb);
-
- lock_super(sb);
- if (sb->s_dirt && sb->s_op->write_super)
- sb->s_op->write_super(sb);
- unlock_super(sb);
-
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
-
- sync_blockdev(sb->s_bdev);
- sync_inodes_sb(sb, 1);
+ __fsync_super(sb);
sb->s_frozen = SB_FREEZE_TRANS;
smp_wmb();
drop_super(sb);
}
- mutex_unlock(&bdev->bd_mount_mutex);
+ up(&bdev->bd_mount_sem);
}
EXPORT_SYMBOL(thaw_bdev);
/*
- * sync everything. Start out by waking pdflush, because that writes back
- * all queues in parallel.
- */
-static void do_sync(unsigned long wait)
-{
- wakeup_pdflush(0);
- sync_inodes(0); /* All mappings, inodes and their blockdevs */
- DQUOT_SYNC(NULL);
- sync_supers(); /* Write the superblocks */
- sync_filesystems(0); /* Start syncing the filesystems */
- sync_filesystems(wait); /* Waitingly sync the filesystems */
- sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
- if (!wait)
- printk("Emergency Sync complete\n");
- if (unlikely(laptop_mode))
- laptop_sync_completion();
-}
-
-asmlinkage long sys_sync(void)
-{
- do_sync(1);
- return 0;
-}
-
-void emergency_sync(void)
-{
- pdflush_operation(do_sync, 0);
-}
-
-/*
- * Generic function to fsync a file.
- *
- * filp may be NULL if called via the msync of a vma.
- */
-
-int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
-{
- struct inode * inode = dentry->d_inode;
- struct super_block * sb;
- int ret, err;
-
- /* sync the inode to buffers */
- ret = write_inode_now(inode, 0);
-
- /* sync the superblock to buffers */
- sb = inode->i_sb;
- lock_super(sb);
- if (sb->s_op->write_super)
- sb->s_op->write_super(sb);
- unlock_super(sb);
-
- /* .. finally sync the buffers to disk */
- err = sync_blockdev(sb->s_bdev);
- if (!ret)
- ret = err;
- return ret;
-}
-
-static long do_fsync(unsigned int fd, int datasync)
-{
- struct file * file;
- struct address_space *mapping;
- int ret, err;
-
- ret = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
-
- ret = -EINVAL;
- if (!file->f_op || !file->f_op->fsync) {
- /* Why? We can still call filemap_fdatawrite */
- goto out_putf;
- }
-
- mapping = file->f_mapping;
-
- current->flags |= PF_SYNCWRITE;
- ret = filemap_fdatawrite(mapping);
-
- /*
- * We need to protect against concurrent writers,
- * which could cause livelocks in fsync_buffers_list
- */
- mutex_lock(&mapping->host->i_mutex);
- err = file->f_op->fsync(file, file->f_dentry, datasync);
- if (!ret)
- ret = err;
- mutex_unlock(&mapping->host->i_mutex);
- err = filemap_fdatawait(mapping);
- if (!ret)
- ret = err;
- current->flags &= ~PF_SYNCWRITE;
-
-out_putf:
- fput(file);
-out:
- return ret;
-}
-
-asmlinkage long sys_fsync(unsigned int fd)
-{
- return do_fsync(fd, 0);
-}
-
-asmlinkage long sys_fdatasync(unsigned int fd)
-{
- return do_fsync(fd, 1);
-}
-
-/*
* Various filesystems appear to want __find_get_block to be non-blocking.
* But it's the page lock which protects the buffers. To get around this,
* we get exclusion from try_to_free_buffers with the blockdev mapping's
if (all_mapped) {
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block, (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+ (unsigned long long)block,
+ (unsigned long long)bh->b_blocknr);
+ printk("b_state=0x%08lx, b_size=%zu\n",
+ bh->b_state, bh->b_size);
printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
}
out_unlock:
we think the disk contains more recent information than the buffercache.
The update == 1 pass marks the buffers we need to update, the update == 2
pass does the actual I/O. */
-void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
+void invalidate_bdev(struct block_device *bdev)
{
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+ if (mapping->nrpages == 0)
+ return;
+
invalidate_bh_lrus();
- /*
- * FIXME: what about destroy_dirty_buffers?
- * We really want to use invalidate_inode_pages2() for
- * that, but not until that's cleaned up.
- */
- invalidate_inode_pages(bdev->bd_inode->i_mapping);
+ invalidate_mapping_pages(mapping, 0, -1);
}
/*
wakeup_pdflush(1024);
yield();
- for_each_pgdat(pgdat) {
+ for_each_online_pgdat(pgdat) {
zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
if (*zones)
try_to_free_pages(zones, GFP_NOFS);
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
*/
-void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
unsigned long flags;
bdevname(bh->b_bdev, b));
}
set_bit(AS_EIO, &page->mapping->flags);
+ set_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
SetPageError(page);
}
static inline void __remove_assoc_queue(struct buffer_head *bh)
{
list_del_init(&bh->b_assoc_buffers);
+ WARN_ON(!bh->b_assoc_map);
+ if (buffer_write_io_error(bh))
+ set_bit(AS_EIO, &bh->b_assoc_map->flags);
+ bh->b_assoc_map = NULL;
}
int inode_has_buffers(struct inode *inode)
if (!mapping->assoc_mapping) {
mapping->assoc_mapping = buffer_mapping;
} else {
- if (mapping->assoc_mapping != buffer_mapping)
- BUG();
+ BUG_ON(mapping->assoc_mapping != buffer_mapping);
}
if (list_empty(&bh->b_assoc_buffers)) {
spin_lock(&buffer_mapping->private_lock);
list_move_tail(&bh->b_assoc_buffers,
&mapping->private_list);
+ bh->b_assoc_map = mapping;
spin_unlock(&buffer_mapping->private_lock);
}
}
*/
int __set_page_dirty_buffers(struct page *page)
{
- struct address_space * const mapping = page->mapping;
+ struct address_space * const mapping = page_mapping(page);
+
+ if (unlikely(!mapping))
+ return !TestSetPageDirty(page);
spin_lock(&mapping->private_lock);
if (page_has_buffers(page)) {
}
spin_unlock(&mapping->private_lock);
- if (!TestSetPageDirty(page)) {
- write_lock_irq(&mapping->tree_lock);
- if (page->mapping) { /* Race with truncate? */
- if (mapping_cap_account_dirty(mapping))
- inc_page_state(nr_dirty);
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page),
- PAGECACHE_TAG_DIRTY);
+ if (TestSetPageDirty(page))
+ return 0;
+
+ write_lock_irq(&mapping->tree_lock);
+ if (page->mapping) { /* Race with truncate? */
+ if (mapping_cap_account_dirty(mapping)) {
+ __inc_zone_page_state(page, NR_FILE_DIRTY);
+ task_io_account_write(PAGE_CACHE_SIZE);
}
- write_unlock_irq(&mapping->tree_lock);
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
}
-
- return 0;
+ write_unlock_irq(&mapping->tree_lock);
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return 1;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
spin_lock(lock);
while (!list_empty(list)) {
bh = BH_ENTRY(list->next);
- list_del_init(&bh->b_assoc_buffers);
+ __remove_assoc_queue(bh);
if (buffer_dirty(bh) || buffer_locked(bh)) {
list_add(&bh->b_assoc_buffers, &tmp);
if (buffer_dirty(bh)) {
while (!list_empty(&tmp)) {
bh = BH_ENTRY(tmp.prev);
- __remove_assoc_queue(bh);
+ list_del_init(&bh->b_assoc_buffers);
get_bh(bh);
spin_unlock(lock);
wait_on_buffer(bh);
if (!page)
return NULL;
- if (!PageLocked(page))
- BUG();
+ BUG_ON(!PageLocked(page));
if (page_has_buffers(page)) {
bh = page_buffers(page);
} while ((size << sizebits) < PAGE_SIZE);
index = block >> sizebits;
- block = index << sizebits;
+ /*
+ * Check for a block which wants to lie outside our maximum possible
+ * pagecache index. (this comparison is done using sector_t types).
+ */
+ if (unlikely(index != block >> sizebits)) {
+ char b[BDEVNAME_SIZE];
+
+ printk(KERN_ERR "%s: requested out-of-range block %llu for "
+ "device %s\n",
+ __FUNCTION__, (unsigned long long)block,
+ bdevname(bdev, b));
+ return -EIO;
+ }
+ block = index << sizebits;
/* Create a page with the proper size buffers.. */
page = grow_dev_page(bdev, block, index, size);
if (!page)
for (;;) {
struct buffer_head * bh;
+ int ret;
bh = __find_get_block(bdev, block, size);
if (bh)
return bh;
- if (!grow_buffers(bdev, block, size))
+ ret = grow_buffers(bdev, block, size);
+ if (ret < 0)
+ return NULL;
+ if (ret == 0)
free_more_memory();
}
}
spin_lock(&buffer_mapping->private_lock);
list_del_init(&bh->b_assoc_buffers);
+ bh->b_assoc_map = NULL;
spin_unlock(&buffer_mapping->private_lock);
}
__brelse(bh);
* Look up the bh in this cpu's LRU. If it's there, move it to the head.
*/
static struct buffer_head *
-lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
+lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *ret = NULL;
struct bh_lru *lru;
- int i;
+ unsigned int i;
check_irqs_on();
bh_lru_lock();
* NULL
*/
struct buffer_head *
-__find_get_block(struct block_device *bdev, sector_t block, int size)
+__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
* attempt is failing. FIXME, perhaps?
*/
struct buffer_head *
-__getblk(struct block_device *bdev, sector_t block, int size)
+__getblk(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __find_get_block(bdev, block, size);
/*
* Do async read-ahead on a buffer..
*/
-void __breadahead(struct block_device *bdev, sector_t block, int size)
+void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh)) {
* It returns NULL if the block was unreadable.
*/
struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, int size)
+__bread(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
put_cpu_var(bh_lrus);
}
-static void invalidate_bh_lrus(void)
+void invalidate_bh_lrus(void)
{
on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
}
struct page *page, unsigned long offset)
{
bh->b_page = page;
- if (offset >= PAGE_SIZE)
- BUG();
+ BUG_ON(offset >= PAGE_SIZE);
if (PageHighMem(page))
/*
* This catches illegal uses and preserves the offset:
clear_buffer_req(bh);
clear_buffer_new(bh);
clear_buffer_delay(bh);
+ clear_buffer_unwritten(bh);
unlock_buffer(bh);
}
/**
- * try_to_release_page() - release old fs-specific metadata on a page
- *
- * @page: the page which the kernel is trying to free
- * @gfp_mask: memory allocation flags (and I/O mode)
- *
- * The address_space is to try to release any data against the page
- * (presumably at page->private). If the release was successful, return `1'.
- * Otherwise return zero.
- *
- * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
- *
- * NOTE: @gfp_mask may go away, and this function may become non-blocking.
- */
-int try_to_release_page(struct page *page, gfp_t gfp_mask)
-{
- struct address_space * const mapping = page->mapping;
-
- BUG_ON(!PageLocked(page));
- if (PageWriteback(page))
- return 0;
-
- if (mapping && mapping->a_ops->releasepage)
- return mapping->a_ops->releasepage(page, gfp_mask);
- return try_to_free_buffers(page);
-}
-EXPORT_SYMBOL(try_to_release_page);
-
-/**
* block_invalidatepage - invalidate part of all of a buffer-backed page
*
* @page: the page which is affected
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-int block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
- int ret = 1;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
* so real IO is not possible anymore.
*/
if (offset == 0)
- ret = try_to_release_page(page, 0);
+ try_to_release_page(page, 0);
out:
- return ret;
+ return;
}
EXPORT_SYMBOL(block_invalidatepage);
-int do_invalidatepage(struct page *page, unsigned long offset)
-{
- int (*invalidatepage)(struct page *, unsigned long);
- invalidatepage = page->mapping->a_ops->invalidatepage;
- if (invalidatepage == NULL)
- invalidatepage = block_invalidatepage;
- return (*invalidatepage)(page, offset);
-}
-
/*
* We attach and possibly dirty the buffers atomically wrt
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
+ const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
BUG_ON(!PageLocked(page));
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
- create_empty_buffers(page, 1 << inode->i_blkbits,
+ create_empty_buffers(page, blocksize,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
* clean. Someone wrote them back by hand with
* ll_rw_block/submit_bh. A rare case.
*/
- int uptodate = 1;
- do {
- if (!buffer_uptodate(bh)) {
- uptodate = 0;
- break;
- }
- bh = bh->b_this_page;
- } while (bh != head);
- if (uptodate)
- SetPageUptodate(page);
end_page_writeback(page);
+
/*
* The page and buffer_heads can be released at any time from
* here on.
} while ((bh = bh->b_this_page) != head);
SetPageError(page);
BUG_ON(PageWriteback(page));
+ mapping_set_error(page->mapping, err);
set_page_writeback(page);
- unlock_page(page);
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
}
bh = next;
} while (bh != head);
+ unlock_page(page);
goto done;
}
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
continue;
}
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+ !buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh);
*wait_bh++=bh;
if (block_start >= to)
break;
if (buffer_new(bh)) {
- void *kaddr;
-
clear_buffer_new(bh);
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr+block_start, 0, bh->b_size);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, block_start, bh->b_size, KM_USER0);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
fully_mapped = 0;
if (iblock < lblock) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
SetPageError(page);
}
if (!buffer_mapped(bh)) {
- void *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + i * blocksize, 0, blocksize);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, i * blocksize, blocksize,
+ KM_USER0);
if (!err)
set_buffer_uptodate(bh);
continue;
long status;
unsigned zerofrom;
unsigned blocksize = 1 << inode->i_blkbits;
- void *kaddr;
while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
status = -ENOMEM;
PAGE_CACHE_SIZE, get_block);
if (status)
goto out_unmap;
- kaddr = kmap_atomic(new_page, KM_USER0);
- memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
- flush_dcache_page(new_page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
+ KM_USER0);
generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
unlock_page(new_page);
page_cache_release(new_page);
if (status)
goto out1;
if (zerofrom < offset) {
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr+zerofrom, 0, offset-zerofrom);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
__block_commit_write(inode, page, zerofrom, offset);
}
return 0;
int i;
int ret = 0;
int is_mapped_to_disk = 1;
- int dirtied_it = 0;
if (PageMappedToDisk(page))
return 0;
create = 1;
if (block_start >= to)
create = 0;
+ map_bh.b_size = blocksize;
ret = get_block(inode, block_in_file + block_in_page,
&map_bh, create);
if (ret)
continue;
if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
kaddr = kmap_atomic(page, KM_USER0);
- if (block_start < from) {
+ if (block_start < from)
memset(kaddr+block_start, 0, from-block_start);
- dirtied_it = 1;
- }
- if (block_end > to) {
+ if (block_end > to)
memset(kaddr + to, 0, block_end - to);
- dirtied_it = 1;
- }
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
continue;
if (is_mapped_to_disk)
SetPageMappedToDisk(page);
- SetPageUptodate(page);
-
- /*
- * Setting the page dirty here isn't necessary for the prepare_write
- * function - commit_write will do that. But if/when this function is
- * used within the pagefault handler to ensure that all mmapped pages
- * have backing space in the filesystem, we will need to dirty the page
- * if its contents were altered.
- */
- if (dirtied_it)
- set_page_dirty(page);
return 0;
* Error recovery is pretty slack. Clear the page and mark it dirty
* so we'll later zero out any blocks which _were_ allocated.
*/
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr, 0, PAGE_CACHE_SIZE);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
SetPageUptodate(page);
set_page_dirty(page);
return ret;
}
EXPORT_SYMBOL(nobh_prepare_write);
+/*
+ * Make sure any changes to nobh_commit_write() are reflected in
+ * nobh_truncate_page(), since it doesn't call commit_write().
+ */
int nobh_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+ SetPageUptodate(page);
set_page_dirty(page);
if (pos > inode->i_size) {
i_size_write(inode, pos);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
- void *kaddr;
int ret;
/* Is the page fully inside i_size? */
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned to;
struct page *page;
- struct address_space_operations *a_ops = mapping->a_ops;
- char *kaddr;
+ const struct address_space_operations *a_ops = mapping->a_ops;
int ret = 0;
if ((offset & (blocksize - 1)) == 0)
to = (offset + blocksize) & ~(blocksize - 1);
ret = a_ops->prepare_write(NULL, page, offset, to);
if (ret == 0) {
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
+ KM_USER0);
+ /*
+ * It would be more correct to call aops->commit_write()
+ * here, but this is more efficient.
+ */
+ SetPageUptodate(page);
set_page_dirty(page);
}
unlock_page(page);
struct inode *inode = mapping->host;
struct page *page;
struct buffer_head *bh;
- void *kaddr;
int err;
blocksize = 1 << inode->i_blkbits;
err = 0;
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
goto unlock;
if (PageUptodate(page))
set_buffer_uptodate(bh);
- if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
+ if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
err = -EIO;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
goto unlock;
}
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, length);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
-
+ zero_user_page(page, offset, length, KM_USER0);
mark_buffer_dirty(bh);
err = 0;
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
- void *kaddr;
/* Is the page fully inside i_size? */
if (page->index < end_index)
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
return __block_write_full_page(inode, page, get_block, wbc);
}
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
+ tmp.b_size = 1 << inode->i_blkbits;
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
spin_lock(&mapping->private_lock);
ret = drop_buffers(page, &buffers_to_free);
- if (ret) {
- /*
- * If the filesystem writes its buffers by hand (eg ext3)
- * then we can have clean buffers against a dirty page. We
- * clean the page here; otherwise later reattachment of buffers
- * could encounter a non-uptodate page, which is unresolvable.
- * This only applies in the rare case where try_to_free_buffers
- * succeeds but the page is not freed.
- */
- clear_page_dirty(page);
- }
+
+ /*
+ * If the filesystem writes its buffers by hand (eg ext3)
+ * then we can have clean buffers against a dirty page. We
+ * clean the page here; otherwise the VM will never notice
+ * that the filesystem did any IO at all.
+ *
+ * Also, during truncate, discard_buffer will have marked all
+ * the page's buffers clean. We discover that here and clean
+ * the page also.
+ *
+ * private_lock must be held over this entire operation in order
+ * to synchronise against __set_page_dirty_buffers and prevent the
+ * dirty bit from being lost.
+ */
+ if (ret)
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
spin_unlock(&mapping->private_lock);
out:
if (buffers_to_free) {
}
EXPORT_SYMBOL(try_to_free_buffers);
-int block_sync_page(struct page *page)
+void block_sync_page(struct page *page)
{
struct address_space *mapping;
mapping = page_mapping(page);
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, page);
- return 0;
}
/*
/*
* Buffer-head allocation
*/
-static kmem_cache_t *bh_cachep;
+static struct kmem_cache *bh_cachep;
/*
* Once the number of bh's in the machine exceeds this level, we start
if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
return;
__get_cpu_var(bh_accounting).ratelimit = 0;
- for_each_cpu(i)
+ for_each_online_cpu(i)
tot += per_cpu(bh_accounting, i).nr;
buffer_heads_over_limit = (tot > max_buffer_heads);
}
EXPORT_SYMBOL(free_buffer_head);
static void
-init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
+init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
{
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
struct buffer_head * bh = (struct buffer_head *)data;
memset(bh, 0, sizeof(*bh));
}
}
-#ifdef CONFIG_HOTPLUG_CPU
static void buffer_exit_cpu(int cpu)
{
int i;
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
+ get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+ per_cpu(bh_accounting, cpu).nr = 0;
+ put_cpu_var(bh_accounting);
}
static int buffer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- if (action == CPU_DEAD)
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
buffer_exit_cpu((unsigned long)hcpu);
return NOTIFY_OK;
}
-#endif /* CONFIG_HOTPLUG_CPU */
void __init buffer_init(void)
{
EXPORT_SYMBOL(block_truncate_page);
EXPORT_SYMBOL(block_write_full_page);
EXPORT_SYMBOL(cont_prepare_write);
-EXPORT_SYMBOL(end_buffer_async_write);
EXPORT_SYMBOL(end_buffer_read_sync);
EXPORT_SYMBOL(end_buffer_write_sync);
EXPORT_SYMBOL(file_fsync);