* async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/capability.h>
#include <linux/blkdev.h>
#include <linux/file.h>
#include <linux/quotaops.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/bio.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/mpage.h>
+#include <linux/bit_spinlock.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
-static void invalidate_bh_lrus(void);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
void fastcall unlock_buffer(struct buffer_head *bh)
{
+ smp_mb__before_clear_bit();
clear_buffer_locked(bh);
smp_mb__after_clear_bit();
wake_up_bit(&bh->b_state, BH_Lock);
__clear_page_buffers(struct page *page)
{
ClearPagePrivate(page);
- page->private = 0;
+ set_page_private(page, 0);
page_cache_release(page);
}
}
/*
- * Default synchronous end-of-IO handler.. Just mark it up-to-date and
- * unlock the buffer. This is what ll_rw_block uses too.
+ * End-of-IO handler helper function which does not touch the bh after
+ * unlocking it.
+ * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
+ * a race there is benign: unlock_buffer() only use the bh's address for
+ * hashing after unlocking the buffer, so it doesn't actually touch the bh
+ * itself.
*/
-void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
+static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
{
if (uptodate) {
set_buffer_uptodate(bh);
clear_buffer_uptodate(bh);
}
unlock_buffer(bh);
+}
+
+/*
+ * Default synchronous end-of-IO handler.. Just mark it up-to-date and
+ * unlock the buffer. This is what ll_rw_block uses too.
+ */
+void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
+{
+ __end_buffer_read_notouch(bh, uptodate);
put_bh(bh);
}
{
int ret = 0;
- if (bdev) {
- int err;
-
- ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
- err = filemap_fdatawait(bdev->bd_inode->i_mapping);
- if (!ret)
- ret = err;
- }
+ if (bdev)
+ ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
return ret;
}
EXPORT_SYMBOL(sync_blockdev);
/*
* Write out and wait upon all dirty data associated with this
- * superblock. Filesystem data as well as the underlying block
- * device. Takes the superblock lock.
- */
-int fsync_super(struct super_block *sb)
-{
- sync_inodes_sb(sb, 0);
- DQUOT_SYNC(sb);
- lock_super(sb);
- if (sb->s_dirt && sb->s_op->write_super)
- sb->s_op->write_super(sb);
- unlock_super(sb);
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
- sync_blockdev(sb->s_bdev);
- sync_inodes_sb(sb, 1);
-
- return sync_blockdev(sb->s_bdev);
-}
-
-/*
- * Write out and wait upon all dirty data associated with this
* device. Filesystem data as well as the underlying block
* device. Takes the superblock lock.
*/
sb->s_frozen = SB_FREEZE_WRITE;
smp_wmb();
- sync_inodes_sb(sb, 0);
- DQUOT_SYNC(sb);
-
- lock_super(sb);
- if (sb->s_dirt && sb->s_op->write_super)
- sb->s_op->write_super(sb);
- unlock_super(sb);
-
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
-
- sync_blockdev(sb->s_bdev);
- sync_inodes_sb(sb, 1);
+ __fsync_super(sb);
sb->s_frozen = SB_FREEZE_TRANS;
smp_wmb();
EXPORT_SYMBOL(thaw_bdev);
/*
- * sync everything. Start out by waking pdflush, because that writes back
- * all queues in parallel.
- */
-static void do_sync(unsigned long wait)
-{
- wakeup_pdflush(0);
- sync_inodes(0); /* All mappings, inodes and their blockdevs */
- DQUOT_SYNC(NULL);
- sync_supers(); /* Write the superblocks */
- sync_filesystems(0); /* Start syncing the filesystems */
- sync_filesystems(wait); /* Waitingly sync the filesystems */
- sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
- if (!wait)
- printk("Emergency Sync complete\n");
- if (unlikely(laptop_mode))
- laptop_sync_completion();
-}
-
-asmlinkage long sys_sync(void)
-{
- do_sync(1);
- return 0;
-}
-
-void emergency_sync(void)
-{
- pdflush_operation(do_sync, 0);
-}
-
-/*
- * Generic function to fsync a file.
- *
- * filp may be NULL if called via the msync of a vma.
- */
-
-int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
-{
- struct inode * inode = dentry->d_inode;
- struct super_block * sb;
- int ret, err;
-
- /* sync the inode to buffers */
- ret = write_inode_now(inode, 0);
-
- /* sync the superblock to buffers */
- sb = inode->i_sb;
- lock_super(sb);
- if (sb->s_op->write_super)
- sb->s_op->write_super(sb);
- unlock_super(sb);
-
- /* .. finally sync the buffers to disk */
- err = sync_blockdev(sb->s_bdev);
- if (!ret)
- ret = err;
- return ret;
-}
-
-static long do_fsync(unsigned int fd, int datasync)
-{
- struct file * file;
- struct address_space *mapping;
- int ret, err;
-
- ret = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
-
- ret = -EINVAL;
- if (!file->f_op || !file->f_op->fsync) {
- /* Why? We can still call filemap_fdatawrite */
- goto out_putf;
- }
-
- mapping = file->f_mapping;
-
- current->flags |= PF_SYNCWRITE;
- ret = filemap_fdatawrite(mapping);
-
- /*
- * We need to protect against concurrent writers,
- * which could cause livelocks in fsync_buffers_list
- */
- down(&mapping->host->i_sem);
- err = file->f_op->fsync(file, file->f_dentry, datasync);
- if (!ret)
- ret = err;
- up(&mapping->host->i_sem);
- err = filemap_fdatawait(mapping);
- if (!ret)
- ret = err;
- current->flags &= ~PF_SYNCWRITE;
-
-out_putf:
- fput(file);
-out:
- return ret;
-}
-
-asmlinkage long sys_fsync(unsigned int fd)
-{
- return do_fsync(fd, 0);
-}
-
-asmlinkage long sys_fdatasync(unsigned int fd)
-{
- return do_fsync(fd, 1);
-}
-
-/*
* Various filesystems appear to want __find_get_block to be non-blocking.
* But it's the page lock which protects the buffers. To get around this,
* we get exclusion from try_to_free_buffers with the blockdev mapping's
* private_lock is contended then so is mapping->tree_lock).
*/
static struct buffer_head *
-__find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
+__find_get_block_slow(struct block_device *bdev, sector_t block)
{
struct inode *bd_inode = bdev->bd_inode;
struct address_space *bd_mapping = bd_inode->i_mapping;
if (all_mapped) {
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block, (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+ (unsigned long long)block,
+ (unsigned long long)bh->b_blocknr);
+ printk("b_state=0x%08lx, b_size=%zu\n",
+ bh->b_state, bh->b_size);
printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
}
out_unlock:
we think the disk contains more recent information than the buffercache.
The update == 1 pass marks the buffers we need to update, the update == 2
pass does the actual I/O. */
-void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
+void invalidate_bdev(struct block_device *bdev)
{
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+ if (mapping->nrpages == 0)
+ return;
+
invalidate_bh_lrus();
- /*
- * FIXME: what about destroy_dirty_buffers?
- * We really want to use invalidate_inode_pages2() for
- * that, but not until that's cleaned up.
- */
- invalidate_inode_pages(bdev->bd_inode->i_mapping);
+ invalidate_mapping_pages(mapping, 0, -1);
}
/*
wakeup_pdflush(1024);
yield();
- for_each_pgdat(pgdat) {
- zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
+ for_each_online_pgdat(pgdat) {
+ zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
if (*zones)
- try_to_free_pages(zones, GFP_NOFS);
+ try_to_free_pages(zones, 0, GFP_NOFS);
}
}
*/
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
- static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
+ struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
int page_uptodate = 1;
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*/
- spin_lock_irqsave(&page_uptodate_lock, flags);
+ first = page_buffers(page);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
/*
* If none of the buffers had errors and they are all
return;
still_busy:
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
return;
}
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
*/
-void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
- static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
+ struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
bdevname(bh->b_bdev, b));
}
set_bit(AS_EIO, &page->mapping->flags);
+ set_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
SetPageError(page);
}
- spin_lock_irqsave(&page_uptodate_lock, flags);
+ first = page_buffers(page);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+
clear_buffer_async_write(bh);
unlock_buffer(bh);
tmp = bh->b_this_page;
}
tmp = tmp->b_this_page;
}
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
end_page_writeback(page);
return;
still_busy:
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
return;
}
static inline void __remove_assoc_queue(struct buffer_head *bh)
{
list_del_init(&bh->b_assoc_buffers);
+ WARN_ON(!bh->b_assoc_map);
+ if (buffer_write_io_error(bh))
+ set_bit(AS_EIO, &bh->b_assoc_map->flags);
+ bh->b_assoc_map = NULL;
}
int inode_has_buffers(struct inode *inode)
if (!mapping->assoc_mapping) {
mapping->assoc_mapping = buffer_mapping;
} else {
- if (mapping->assoc_mapping != buffer_mapping)
- BUG();
+ BUG_ON(mapping->assoc_mapping != buffer_mapping);
}
if (list_empty(&bh->b_assoc_buffers)) {
spin_lock(&buffer_mapping->private_lock);
list_move_tail(&bh->b_assoc_buffers,
&mapping->private_list);
+ bh->b_assoc_map = mapping;
spin_unlock(&buffer_mapping->private_lock);
}
}
EXPORT_SYMBOL(mark_buffer_dirty_inode);
/*
+ * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
+ * dirty.
+ *
+ * If warn is true, then emit a warning if the page is not uptodate and has
+ * not been truncated.
+ */
+static int __set_page_dirty(struct page *page,
+ struct address_space *mapping, int warn)
+{
+ if (unlikely(!mapping))
+ return !TestSetPageDirty(page);
+
+ if (TestSetPageDirty(page))
+ return 0;
+
+ write_lock_irq(&mapping->tree_lock);
+ if (page->mapping) { /* Race with truncate? */
+ WARN_ON_ONCE(warn && !PageUptodate(page));
+
+ if (mapping_cap_account_dirty(mapping)) {
+ __inc_zone_page_state(page, NR_FILE_DIRTY);
+ __inc_bdi_stat(mapping->backing_dev_info,
+ BDI_RECLAIMABLE);
+ task_io_account_write(PAGE_CACHE_SIZE);
+ }
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ }
+ write_unlock_irq(&mapping->tree_lock);
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+
+ return 1;
+}
+
+/*
* Add a page to the dirty page list.
*
* It is a sad fact of life that this function is called from several places
*/
int __set_page_dirty_buffers(struct page *page)
{
- struct address_space * const mapping = page->mapping;
+ struct address_space *mapping = page_mapping(page);
+
+ if (unlikely(!mapping))
+ return !TestSetPageDirty(page);
spin_lock(&mapping->private_lock);
if (page_has_buffers(page)) {
}
spin_unlock(&mapping->private_lock);
- if (!TestSetPageDirty(page)) {
- write_lock_irq(&mapping->tree_lock);
- if (page->mapping) { /* Race with truncate? */
- if (mapping_cap_account_dirty(mapping))
- inc_page_state(nr_dirty);
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page),
- PAGECACHE_TAG_DIRTY);
- }
- write_unlock_irq(&mapping->tree_lock);
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- }
-
- return 0;
+ return __set_page_dirty(page, mapping, 1);
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
spin_lock(lock);
while (!list_empty(list)) {
bh = BH_ENTRY(list->next);
- list_del_init(&bh->b_assoc_buffers);
+ __remove_assoc_queue(bh);
if (buffer_dirty(bh) || buffer_locked(bh)) {
list_add(&bh->b_assoc_buffers, &tmp);
if (buffer_dirty(bh)) {
* contents - it is a noop if I/O is still in
* flight on potentially older contents.
*/
- wait_on_buffer(bh);
- ll_rw_block(WRITE, 1, &bh);
+ ll_rw_block(SWRITE, 1, &bh);
brelse(bh);
spin_lock(lock);
}
while (!list_empty(&tmp)) {
bh = BH_ENTRY(tmp.prev);
- __remove_assoc_queue(bh);
+ list_del_init(&bh->b_assoc_buffers);
get_bh(bh);
spin_unlock(lock);
wait_on_buffer(bh);
bh->b_state = 0;
atomic_set(&bh->b_count, 0);
+ bh->b_private = NULL;
bh->b_size = size;
/* Link the buffer to its page */
set_bh_page(bh, page, offset);
- bh->b_end_io = NULL;
+ init_buffer(bh, NULL, NULL);
}
return head;
/*
struct page *page;
struct buffer_head *bh;
- page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+ page = find_or_create_page(inode->i_mapping, index,
+ (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
if (!page)
return NULL;
- if (!PageLocked(page))
- BUG();
+ BUG_ON(!PageLocked(page));
if (page_has_buffers(page)) {
bh = page_buffers(page);
/*
* Create buffers for the specified block device block's page. If
* that page was dirty, the buffers are set dirty also.
- *
- * Except that's a bug. Attaching dirty buffers to a dirty
- * blockdev's page can result in filesystem corruption, because
- * some of those buffers may be aliases of filesystem data.
- * grow_dev_page() will go BUG() if this happens.
*/
-static inline int
+static int
grow_buffers(struct block_device *bdev, sector_t block, int size)
{
struct page *page;
} while ((size << sizebits) < PAGE_SIZE);
index = block >> sizebits;
- block = index << sizebits;
+ /*
+ * Check for a block which wants to lie outside our maximum possible
+ * pagecache index. (this comparison is done using sector_t types).
+ */
+ if (unlikely(index != block >> sizebits)) {
+ char b[BDEVNAME_SIZE];
+
+ printk(KERN_ERR "%s: requested out-of-range block %llu for "
+ "device %s\n",
+ __FUNCTION__, (unsigned long long)block,
+ bdevname(bdev, b));
+ return -EIO;
+ }
+ block = index << sizebits;
/* Create a page with the proper size buffers.. */
page = grow_dev_page(bdev, block, index, size);
if (!page)
for (;;) {
struct buffer_head * bh;
+ int ret;
bh = __find_get_block(bdev, block, size);
if (bh)
return bh;
- if (!grow_buffers(bdev, block, size))
+ ret = grow_buffers(bdev, block, size);
+ if (ret < 0)
+ return NULL;
+ if (ret == 0)
free_more_memory();
}
}
*/
void fastcall mark_buffer_dirty(struct buffer_head *bh)
{
+ WARN_ON_ONCE(!buffer_uptodate(bh));
if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
- __set_page_dirty_nobuffers(bh->b_page);
+ __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
}
/*
spin_lock(&buffer_mapping->private_lock);
list_del_init(&bh->b_assoc_buffers);
+ bh->b_assoc_map = NULL;
spin_unlock(&buffer_mapping->private_lock);
}
__brelse(bh);
/*
* Look up the bh in this cpu's LRU. If it's there, move it to the head.
*/
-static inline struct buffer_head *
-lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
+static struct buffer_head *
+lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *ret = NULL;
struct bh_lru *lru;
- int i;
+ unsigned int i;
check_irqs_on();
bh_lru_lock();
* NULL
*/
struct buffer_head *
-__find_get_block(struct block_device *bdev, sector_t block, int size)
+__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
if (bh == NULL) {
- bh = __find_get_block_slow(bdev, block, size);
+ bh = __find_get_block_slow(bdev, block);
if (bh)
bh_lru_install(bh);
}
* attempt is failing. FIXME, perhaps?
*/
struct buffer_head *
-__getblk(struct block_device *bdev, sector_t block, int size)
+__getblk(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __find_get_block(bdev, block, size);
/*
* Do async read-ahead on a buffer..
*/
-void __breadahead(struct block_device *bdev, sector_t block, int size)
+void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
- ll_rw_block(READA, 1, &bh);
- brelse(bh);
+ if (likely(bh)) {
+ ll_rw_block(READA, 1, &bh);
+ brelse(bh);
+ }
}
EXPORT_SYMBOL(__breadahead);
* It returns NULL if the block was unreadable.
*/
struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, int size)
+__bread(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
- if (!buffer_uptodate(bh))
+ if (likely(bh) && !buffer_uptodate(bh))
bh = __bread_slow(bh);
return bh;
}
put_cpu_var(bh_lrus);
}
-static void invalidate_bh_lrus(void)
+void invalidate_bh_lrus(void)
{
on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
}
struct page *page, unsigned long offset)
{
bh->b_page = page;
- if (offset >= PAGE_SIZE)
- BUG();
+ BUG_ON(offset >= PAGE_SIZE);
if (PageHighMem(page))
/*
* This catches illegal uses and preserves the offset:
/*
* Called when truncating a buffer on a page completely.
*/
-static inline void discard_buffer(struct buffer_head * bh)
+static void discard_buffer(struct buffer_head * bh)
{
lock_buffer(bh);
clear_buffer_dirty(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
clear_buffer_delay(bh);
+ clear_buffer_unwritten(bh);
unlock_buffer(bh);
}
/**
- * try_to_release_page() - release old fs-specific metadata on a page
- *
- * @page: the page which the kernel is trying to free
- * @gfp_mask: memory allocation flags (and I/O mode)
- *
- * The address_space is to try to release any data against the page
- * (presumably at page->private). If the release was successful, return `1'.
- * Otherwise return zero.
- *
- * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
- *
- * NOTE: @gfp_mask may go away, and this function may become non-blocking.
- */
-int try_to_release_page(struct page *page, int gfp_mask)
-{
- struct address_space * const mapping = page->mapping;
-
- BUG_ON(!PageLocked(page));
- if (PageWriteback(page))
- return 0;
-
- if (mapping && mapping->a_ops->releasepage)
- return mapping->a_ops->releasepage(page, gfp_mask);
- return try_to_free_buffers(page);
-}
-EXPORT_SYMBOL(try_to_release_page);
-
-/**
* block_invalidatepage - invalidate part of all of a buffer-backed page
*
* @page: the page which is affected
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
-int block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
- int ret = 1;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
* so real IO is not possible anymore.
*/
if (offset == 0)
- ret = try_to_release_page(page, 0);
+ try_to_release_page(page, 0);
out:
- return ret;
+ return;
}
EXPORT_SYMBOL(block_invalidatepage);
might_sleep();
- old_bh = __find_get_block_slow(bdev, block, 0);
+ old_bh = __find_get_block_slow(bdev, block);
if (old_bh) {
clear_buffer_dirty(old_bh);
wait_on_buffer(old_bh);
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
+ const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0;
BUG_ON(!PageLocked(page));
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
- create_empty_buffers(page, 1 << inode->i_blkbits,
+ create_empty_buffers(page, blocksize,
(1 << BH_Dirty)|(1 << BH_Uptodate));
}
* handle that here by just cleaning them.
*/
- block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
head = page_buffers(page);
bh = head;
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
* clean. Someone wrote them back by hand with
* ll_rw_block/submit_bh. A rare case.
*/
- int uptodate = 1;
- do {
- if (!buffer_uptodate(bh)) {
- uptodate = 0;
- break;
- }
- bh = bh->b_this_page;
- } while (bh != head);
- if (uptodate)
- SetPageUptodate(page);
end_page_writeback(page);
+
/*
* The page and buffer_heads can be released at any time from
* here on.
*/
- wbc->pages_skipped++; /* We didn't write this page */
}
return err;
} while ((bh = bh->b_this_page) != head);
SetPageError(page);
BUG_ON(PageWriteback(page));
+ mapping_set_error(page->mapping, err);
set_page_writeback(page);
- unlock_page(page);
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
}
bh = next;
} while (bh != head);
+ unlock_page(page);
goto done;
}
+/*
+ * If a page has any new buffers, zero them out here, and mark them uptodate
+ * and dirty so they'll be written out (in order to prevent uninitialised
+ * block data from leaking). And clear the new bit.
+ */
+void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
+{
+ unsigned int block_start, block_end;
+ struct buffer_head *head, *bh;
+
+ BUG_ON(!PageLocked(page));
+ if (!page_has_buffers(page))
+ return;
+
+ bh = head = page_buffers(page);
+ block_start = 0;
+ do {
+ block_end = block_start + bh->b_size;
+
+ if (buffer_new(bh)) {
+ if (block_end > from && block_start < to) {
+ if (!PageUptodate(page)) {
+ unsigned start, size;
+
+ start = max(from, block_start);
+ size = min(to, block_end) - start;
+
+ zero_user_page(page, start, size, KM_USER0);
+ set_buffer_uptodate(bh);
+ }
+
+ clear_buffer_new(bh);
+ mark_buffer_dirty(bh);
+ }
+ }
+
+ block_start = block_end;
+ bh = bh->b_this_page;
+ } while (bh != head);
+}
+EXPORT_SYMBOL(page_zero_new_buffers);
+
static int __block_prepare_write(struct inode *inode, struct page *page,
unsigned from, unsigned to, get_block_t *get_block)
{
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
+ clear_buffer_new(bh);
set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from) {
continue;
}
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+ !buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh);
*wait_bh++=bh;
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
- if (!err) {
- bh = head;
- do {
- if (buffer_new(bh))
- clear_buffer_new(bh);
- } while ((bh = bh->b_this_page) != head);
- return 0;
- }
- /* Error case: */
- /*
- * Zero out any newly allocated blocks to avoid exposing stale
- * data. If BH_New is set, we know that the block was newly
- * allocated in the above loop.
- */
- bh = head;
- block_start = 0;
- do {
- block_end = block_start+blocksize;
- if (block_end <= from)
- goto next_bh;
- if (block_start >= to)
- break;
- if (buffer_new(bh)) {
- void *kaddr;
-
- clear_buffer_new(bh);
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr+block_start, 0, bh->b_size);
- kunmap_atomic(kaddr, KM_USER0);
- set_buffer_uptodate(bh);
- mark_buffer_dirty(bh);
- }
-next_bh:
- block_start = block_end;
- bh = bh->b_this_page;
- } while (bh != head);
+ if (unlikely(err))
+ page_zero_new_buffers(page, from, to);
return err;
}
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
+ clear_buffer_new(bh);
}
/*
}
/*
+ * block_write_begin takes care of the basic task of block allocation and
+ * bringing partial write blocks uptodate first.
+ *
+ * If *pagep is not NULL, then block_write_begin uses the locked page
+ * at *pagep rather than allocating its own. In this case, the page will
+ * not be unlocked or deallocated on failure.
+ */
+int block_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata,
+ get_block_t *get_block)
+{
+ struct inode *inode = mapping->host;
+ int status = 0;
+ struct page *page;
+ pgoff_t index;
+ unsigned start, end;
+ int ownpage = 0;
+
+ index = pos >> PAGE_CACHE_SHIFT;
+ start = pos & (PAGE_CACHE_SIZE - 1);
+ end = start + len;
+
+ page = *pagep;
+ if (page == NULL) {
+ ownpage = 1;
+ page = __grab_cache_page(mapping, index);
+ if (!page) {
+ status = -ENOMEM;
+ goto out;
+ }
+ *pagep = page;
+ } else
+ BUG_ON(!PageLocked(page));
+
+ status = __block_prepare_write(inode, page, start, end, get_block);
+ if (unlikely(status)) {
+ ClearPageUptodate(page);
+
+ if (ownpage) {
+ unlock_page(page);
+ page_cache_release(page);
+ *pagep = NULL;
+
+ /*
+ * prepare_write() may have instantiated a few blocks
+ * outside i_size. Trim these off again. Don't need
+ * i_size_read because we hold i_mutex.
+ */
+ if (pos + len > inode->i_size)
+ vmtruncate(inode, inode->i_size);
+ }
+ goto out;
+ }
+
+out:
+ return status;
+}
+EXPORT_SYMBOL(block_write_begin);
+
+int block_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ unsigned start;
+
+ start = pos & (PAGE_CACHE_SIZE - 1);
+
+ if (unlikely(copied < len)) {
+ /*
+ * The buffers that were written will now be uptodate, so we
+ * don't have to worry about a readpage reading them and
+ * overwriting a partial write. However if we have encountered
+ * a short write and only partially written into a buffer, it
+ * will not be marked uptodate, so a readpage might come in and
+ * destroy our partial write.
+ *
+ * Do the simplest thing, and just treat any short write to a
+ * non uptodate page as a zero-length write, and force the
+ * caller to redo the whole thing.
+ */
+ if (!PageUptodate(page))
+ copied = 0;
+
+ page_zero_new_buffers(page, start+copied, start+len);
+ }
+ flush_dcache_page(page);
+
+ /* This could be a short (even 0-length) commit */
+ __block_commit_write(inode, page, start, start+copied);
+
+ return copied;
+}
+EXPORT_SYMBOL(block_write_end);
+
+int generic_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+
+ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+
+ /*
+ * No need to use i_size_read() here, the i_size
+ * cannot change under us because we hold i_mutex.
+ *
+ * But it's important to update i_size while still holding page lock:
+ * page writeout could otherwise come in and zero beyond i_size.
+ */
+ if (pos+copied > inode->i_size) {
+ i_size_write(inode, pos+copied);
+ mark_inode_dirty(inode);
+ }
+
+ unlock_page(page);
+ page_cache_release(page);
+
+ return copied;
+}
+EXPORT_SYMBOL(generic_write_end);
+
+/*
* Generic "read page" function for block devices that have the normal
* get_block functionality. This is most of the block device filesystems.
* Reads the page asynchronously --- the unlock_buffer() and
fully_mapped = 0;
if (iblock < lblock) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
SetPageError(page);
}
if (!buffer_mapped(bh)) {
- void *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + i * blocksize, 0, blocksize);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, i * blocksize, blocksize,
+ KM_USER0);
if (!err)
set_buffer_uptodate(bh);
continue;
}
/* utility function for filesystems that need to do work on expanding
- * truncates. Uses prepare/commit_write to allow the filesystem to
+ * truncates. Uses filesystem pagecache writes to allow the filesystem to
* deal with the hole.
*/
-int generic_cont_expand(struct inode *inode, loff_t size)
+int generic_cont_expand_simple(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
- unsigned long index, offset, limit;
+ void *fsdata;
+ unsigned long limit;
int err;
err = -EFBIG;
if (size > inode->i_sb->s_maxbytes)
goto out;
- offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
-
- /* ugh. in prepare/commit_write, if from==to==start of block, we
- ** skip the prepare. make sure we never send an offset for the start
- ** of a block
- */
- if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
- offset++;
- }
- index = size >> PAGE_CACHE_SHIFT;
- err = -ENOMEM;
- page = grab_cache_page(mapping, index);
- if (!page)
+ err = pagecache_write_begin(NULL, mapping, size, 0,
+ AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
+ &page, &fsdata);
+ if (err)
goto out;
- err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
- if (!err) {
- err = mapping->a_ops->commit_write(NULL, page, offset, offset);
- }
- unlock_page(page);
- page_cache_release(page);
- if (err > 0)
- err = 0;
+
+ err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
+ BUG_ON(err > 0);
+
out:
return err;
}
-/*
- * For moronic filesystems that do not allow holes in file.
- * We may have to extend the file.
- */
-
-int cont_prepare_write(struct page *page, unsigned offset,
- unsigned to, get_block_t *get_block, loff_t *bytes)
+int cont_expand_zero(struct file *file, struct address_space *mapping,
+ loff_t pos, loff_t *bytes)
{
- struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
- struct page *new_page;
- pgoff_t pgpos;
- long status;
- unsigned zerofrom;
unsigned blocksize = 1 << inode->i_blkbits;
- void *kaddr;
+ struct page *page;
+ void *fsdata;
+ pgoff_t index, curidx;
+ loff_t curpos;
+ unsigned zerofrom, offset, len;
+ int err = 0;
- while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
- status = -ENOMEM;
- new_page = grab_cache_page(mapping, pgpos);
- if (!new_page)
- goto out;
- /* we might sleep */
- if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
- unlock_page(new_page);
- page_cache_release(new_page);
- continue;
- }
- zerofrom = *bytes & ~PAGE_CACHE_MASK;
+ index = pos >> PAGE_CACHE_SHIFT;
+ offset = pos & ~PAGE_CACHE_MASK;
+
+ while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
+ zerofrom = curpos & ~PAGE_CACHE_MASK;
if (zerofrom & (blocksize-1)) {
*bytes |= (blocksize-1);
(*bytes)++;
}
- status = __block_prepare_write(inode, new_page, zerofrom,
- PAGE_CACHE_SIZE, get_block);
- if (status)
- goto out_unmap;
- kaddr = kmap_atomic(new_page, KM_USER0);
- memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
- flush_dcache_page(new_page);
- kunmap_atomic(kaddr, KM_USER0);
- generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
- unlock_page(new_page);
- page_cache_release(new_page);
- }
-
- if (page->index < pgpos) {
- /* completely inside the area */
- zerofrom = offset;
- } else {
- /* page covers the boundary, find the boundary offset */
- zerofrom = *bytes & ~PAGE_CACHE_MASK;
+ len = PAGE_CACHE_SIZE - zerofrom;
+
+ err = pagecache_write_begin(file, mapping, curpos, len,
+ AOP_FLAG_UNINTERRUPTIBLE,
+ &page, &fsdata);
+ if (err)
+ goto out;
+ zero_user_page(page, zerofrom, len, KM_USER0);
+ err = pagecache_write_end(file, mapping, curpos, len, len,
+ page, fsdata);
+ if (err < 0)
+ goto out;
+ BUG_ON(err != len);
+ err = 0;
+ }
+ /* page covers the boundary, find the boundary offset */
+ if (index == curidx) {
+ zerofrom = curpos & ~PAGE_CACHE_MASK;
/* if we will expand the thing last block will be filled */
- if (to > zerofrom && (zerofrom & (blocksize-1))) {
+ if (offset <= zerofrom) {
+ goto out;
+ }
+ if (zerofrom & (blocksize-1)) {
*bytes |= (blocksize-1);
(*bytes)++;
}
+ len = offset - zerofrom;
- /* starting below the boundary? Nothing to zero out */
- if (offset <= zerofrom)
- zerofrom = offset;
+ err = pagecache_write_begin(file, mapping, curpos, len,
+ AOP_FLAG_UNINTERRUPTIBLE,
+ &page, &fsdata);
+ if (err)
+ goto out;
+ zero_user_page(page, zerofrom, len, KM_USER0);
+ err = pagecache_write_end(file, mapping, curpos, len, len,
+ page, fsdata);
+ if (err < 0)
+ goto out;
+ BUG_ON(err != len);
+ err = 0;
}
- status = __block_prepare_write(inode, page, zerofrom, to, get_block);
- if (status)
- goto out1;
- if (zerofrom < offset) {
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr+zerofrom, 0, offset-zerofrom);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- __block_commit_write(inode, page, zerofrom, offset);
+out:
+ return err;
+}
+
+/*
+ * For moronic filesystems that do not allow holes in file.
+ * We may have to extend the file.
+ */
+int cont_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata,
+ get_block_t *get_block, loff_t *bytes)
+{
+ struct inode *inode = mapping->host;
+ unsigned blocksize = 1 << inode->i_blkbits;
+ unsigned zerofrom;
+ int err;
+
+ err = cont_expand_zero(file, mapping, pos, bytes);
+ if (err)
+ goto out;
+
+ zerofrom = *bytes & ~PAGE_CACHE_MASK;
+ if (pos+len > *bytes && zerofrom & (blocksize-1)) {
+ *bytes |= (blocksize-1);
+ (*bytes)++;
}
- return 0;
-out1:
- ClearPageUptodate(page);
- return status;
-out_unmap:
- ClearPageUptodate(new_page);
- unlock_page(new_page);
- page_cache_release(new_page);
+ *pagep = NULL;
+ err = block_write_begin(file, mapping, pos, len,
+ flags, pagep, fsdata, get_block);
out:
- return status;
+ return err;
}
int block_prepare_write(struct page *page, unsigned from, unsigned to,
__block_commit_write(inode,page,from,to);
/*
* No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_sem.
+ * cannot change under us because we hold i_mutex.
*/
if (pos > inode->i_size) {
i_size_write(inode, pos);
return 0;
}
+/*
+ * block_page_mkwrite() is not allowed to change the file size as it gets
+ * called from a page fault handler when a page is first dirtied. Hence we must
+ * be careful to check for EOF conditions here. We set the page up correctly
+ * for a written page which means we get ENOSPC checking when writing into
+ * holes and correct delalloc and unwritten extent mapping on filesystems that
+ * support these features.
+ *
+ * We are not allowed to take the i_mutex here so we have to play games to
+ * protect against truncate races as the page could now be beyond EOF. Because
+ * vmtruncate() writes the inode size before removing pages, once we have the
+ * page lock we can determine safely if the page is beyond EOF. If it is not
+ * beyond EOF, then the page is guaranteed safe against truncation until we
+ * unlock the page.
+ */
+int
+block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+ get_block_t get_block)
+{
+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ unsigned long end;
+ loff_t size;
+ int ret = -EINVAL;
+
+ lock_page(page);
+ size = i_size_read(inode);
+ if ((page->mapping != inode->i_mapping) ||
+ (page_offset(page) > size)) {
+ /* page got truncated out from underneath us */
+ goto out_unlock;
+ }
+
+ /* page is wholly or partially inside EOF */
+ if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
+ end = size & ~PAGE_CACHE_MASK;
+ else
+ end = PAGE_CACHE_SIZE;
+
+ ret = block_prepare_write(page, 0, end, get_block);
+ if (!ret)
+ ret = block_commit_write(page, 0, end);
+
+out_unlock:
+ unlock_page(page);
+ return ret;
+}
/*
- * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
+ * nobh_write_begin()'s prereads are special: the buffer_heads are freed
* immediately, while under the page lock. So it needs a special end_io
* handler which does not touch the bh after unlocking it.
- *
- * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
- * a race there is benign: unlock_buffer() only use the bh's address for
- * hashing after unlocking the buffer, so it doesn't actually touch the bh
- * itself.
*/
static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
{
- if (uptodate) {
- set_buffer_uptodate(bh);
- } else {
- /* This happens, due to failed READA attempts. */
- clear_buffer_uptodate(bh);
- }
- unlock_buffer(bh);
+ __end_buffer_read_notouch(bh, uptodate);
+}
+
+/*
+ * Attach the singly-linked list of buffers created by nobh_write_begin, to
+ * the page (converting it to circular linked list and taking care of page
+ * dirty races).
+ */
+static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
+{
+ struct buffer_head *bh;
+
+ BUG_ON(!PageLocked(page));
+
+ spin_lock(&page->mapping->private_lock);
+ bh = head;
+ do {
+ if (PageDirty(page))
+ set_buffer_dirty(bh);
+ if (!bh->b_this_page)
+ bh->b_this_page = head;
+ bh = bh->b_this_page;
+ } while (bh != head);
+ attach_page_buffers(page, head);
+ spin_unlock(&page->mapping->private_lock);
}
/*
* On entry, the page is fully not uptodate.
* On exit the page is fully uptodate in the areas outside (from,to)
*/
-int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
+int nobh_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata,
get_block_t *get_block)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
const unsigned blocksize = 1 << blkbits;
- struct buffer_head map_bh;
- struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
+ struct buffer_head *head, *bh;
+ struct page *page;
+ pgoff_t index;
+ unsigned from, to;
unsigned block_in_page;
- unsigned block_start;
+ unsigned block_start, block_end;
sector_t block_in_file;
char *kaddr;
int nr_reads = 0;
- int i;
int ret = 0;
int is_mapped_to_disk = 1;
- int dirtied_it = 0;
+
+ index = pos >> PAGE_CACHE_SHIFT;
+ from = pos & (PAGE_CACHE_SIZE - 1);
+ to = from + len;
+
+ page = __grab_cache_page(mapping, index);
+ if (!page)
+ return -ENOMEM;
+ *pagep = page;
+ *fsdata = NULL;
+
+ if (page_has_buffers(page)) {
+ unlock_page(page);
+ page_cache_release(page);
+ *pagep = NULL;
+ return block_write_begin(file, mapping, pos, len, flags, pagep,
+ fsdata, get_block);
+ }
if (PageMappedToDisk(page))
return 0;
+ /*
+ * Allocate buffers so that we can keep track of state, and potentially
+ * attach them to the page if an error occurs. In the common case of
+ * no error, they will just be freed again without ever being attached
+ * to the page (which is all OK, because we're under the page lock).
+ *
+ * Be careful: the buffer linked list is a NULL terminated one, rather
+ * than the circular one we're used to.
+ */
+ head = alloc_page_buffers(page, blocksize, 0);
+ if (!head) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+
block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
- map_bh.b_page = page;
/*
* We loop across all blocks in the page, whether or not they are
* part of the affected region. This is so we can discover if the
* page is fully mapped-to-disk.
*/
- for (block_start = 0, block_in_page = 0;
+ for (block_start = 0, block_in_page = 0, bh = head;
block_start < PAGE_CACHE_SIZE;
- block_in_page++, block_start += blocksize) {
- unsigned block_end = block_start + blocksize;
+ block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
int create;
- map_bh.b_state = 0;
+ block_end = block_start + blocksize;
+ bh->b_state = 0;
create = 1;
if (block_start >= to)
create = 0;
ret = get_block(inode, block_in_file + block_in_page,
- &map_bh, create);
+ bh, create);
if (ret)
goto failed;
- if (!buffer_mapped(&map_bh))
+ if (!buffer_mapped(bh))
is_mapped_to_disk = 0;
- if (buffer_new(&map_bh))
- unmap_underlying_metadata(map_bh.b_bdev,
- map_bh.b_blocknr);
- if (PageUptodate(page))
+ if (buffer_new(bh))
+ unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+ if (PageUptodate(page)) {
+ set_buffer_uptodate(bh);
continue;
- if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
+ }
+ if (buffer_new(bh) || !buffer_mapped(bh)) {
kaddr = kmap_atomic(page, KM_USER0);
- if (block_start < from) {
+ if (block_start < from)
memset(kaddr+block_start, 0, from-block_start);
- dirtied_it = 1;
- }
- if (block_end > to) {
+ if (block_end > to)
memset(kaddr + to, 0, block_end - to);
- dirtied_it = 1;
- }
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
continue;
}
- if (buffer_uptodate(&map_bh))
+ if (buffer_uptodate(bh))
continue; /* reiserfs does this */
if (block_start < from || block_end > to) {
- struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
-
- if (!bh) {
- ret = -ENOMEM;
- goto failed;
- }
- bh->b_state = map_bh.b_state;
- atomic_set(&bh->b_count, 0);
- bh->b_this_page = NULL;
- bh->b_page = page;
- bh->b_blocknr = map_bh.b_blocknr;
- bh->b_size = blocksize;
- bh->b_data = (char *)(long)block_start;
- bh->b_bdev = map_bh.b_bdev;
- bh->b_private = NULL;
- read_bh[nr_reads++] = bh;
+ lock_buffer(bh);
+ bh->b_end_io = end_buffer_read_nobh;
+ submit_bh(READ, bh);
+ nr_reads++;
}
}
if (nr_reads) {
- struct buffer_head *bh;
-
/*
* The page is locked, so these buffers are protected from
* any VM or truncate activity. Hence we don't need to care
* for the buffer_head refcounts.
*/
- for (i = 0; i < nr_reads; i++) {
- bh = read_bh[i];
- lock_buffer(bh);
- bh->b_end_io = end_buffer_read_nobh;
- submit_bh(READ, bh);
- }
- for (i = 0; i < nr_reads; i++) {
- bh = read_bh[i];
+ for (bh = head; bh; bh = bh->b_this_page) {
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
ret = -EIO;
- free_buffer_head(bh);
- read_bh[i] = NULL;
}
if (ret)
goto failed;
if (is_mapped_to_disk)
SetPageMappedToDisk(page);
- SetPageUptodate(page);
- /*
- * Setting the page dirty here isn't necessary for the prepare_write
- * function - commit_write will do that. But if/when this function is
- * used within the pagefault handler to ensure that all mmapped pages
- * have backing space in the filesystem, we will need to dirty the page
- * if its contents were altered.
- */
- if (dirtied_it)
- set_page_dirty(page);
+ *fsdata = head; /* to be released by nobh_write_end */
return 0;
failed:
- for (i = 0; i < nr_reads; i++) {
- if (read_bh[i])
- free_buffer_head(read_bh[i]);
- }
-
+ BUG_ON(!ret);
/*
- * Error recovery is pretty slack. Clear the page and mark it dirty
- * so we'll later zero out any blocks which _were_ allocated.
+ * Error recovery is a bit difficult. We need to zero out blocks that
+ * were newly allocated, and dirty them to ensure they get written out.
+ * Buffers need to be attached to the page at this point, otherwise
+ * the handling of potential IO errors during writeout would be hard
+ * (could try doing synchronous writeout, but what if that fails too?)
*/
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr, 0, PAGE_CACHE_SIZE);
- kunmap_atomic(kaddr, KM_USER0);
- SetPageUptodate(page);
- set_page_dirty(page);
+ attach_nobh_buffers(page, head);
+ page_zero_new_buffers(page, from, to);
+
+out_release:
+ unlock_page(page);
+ page_cache_release(page);
+ *pagep = NULL;
+
+ if (pos + len > inode->i_size)
+ vmtruncate(inode, inode->i_size);
+
return ret;
}
-EXPORT_SYMBOL(nobh_prepare_write);
+EXPORT_SYMBOL(nobh_write_begin);
-int nobh_commit_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
+int nobh_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
{
struct inode *inode = page->mapping->host;
- loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+ struct buffer_head *head = NULL;
+ struct buffer_head *bh;
+
+ if (!PageMappedToDisk(page)) {
+ if (unlikely(copied < len) && !page_has_buffers(page))
+ attach_nobh_buffers(page, head);
+ if (page_has_buffers(page))
+ return generic_write_end(file, mapping, pos, len,
+ copied, page, fsdata);
+ }
+ SetPageUptodate(page);
set_page_dirty(page);
- if (pos > inode->i_size) {
- i_size_write(inode, pos);
+ if (pos+copied > inode->i_size) {
+ i_size_write(inode, pos+copied);
mark_inode_dirty(inode);
}
- return 0;
+
+ unlock_page(page);
+ page_cache_release(page);
+
+ head = fsdata;
+ while (head) {
+ bh = head;
+ head = head->b_this_page;
+ free_buffer_head(bh);
+ }
+
+ return copied;
}
-EXPORT_SYMBOL(nobh_commit_write);
+EXPORT_SYMBOL(nobh_write_end);
/*
* nobh_writepage() - based on block_full_write_page() except
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
- void *kaddr;
int ret;
/* Is the page fully inside i_size? */
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
}
EXPORT_SYMBOL(nobh_writepage);
-/*
- * This function assumes that ->prepare_write() uses nobh_prepare_write().
- */
-int nobh_truncate_page(struct address_space *mapping, loff_t from)
+int nobh_truncate_page(struct address_space *mapping,
+ loff_t from, get_block_t *get_block)
{
- struct inode *inode = mapping->host;
- unsigned blocksize = 1 << inode->i_blkbits;
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
- unsigned to;
+ unsigned blocksize;
+ sector_t iblock;
+ unsigned length, pos;
+ struct inode *inode = mapping->host;
struct page *page;
- struct address_space_operations *a_ops = mapping->a_ops;
- char *kaddr;
- int ret = 0;
+ struct buffer_head map_bh;
+ int err;
- if ((offset & (blocksize - 1)) == 0)
- goto out;
+ blocksize = 1 << inode->i_blkbits;
+ length = offset & (blocksize - 1);
+
+ /* Block boundary? Nothing to do */
+ if (!length)
+ return 0;
+
+ length = blocksize - length;
+ iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- ret = -ENOMEM;
page = grab_cache_page(mapping, index);
+ err = -ENOMEM;
if (!page)
goto out;
- to = (offset + blocksize) & ~(blocksize - 1);
- ret = a_ops->prepare_write(NULL, page, offset, to);
- if (ret == 0) {
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- set_page_dirty(page);
+ if (page_has_buffers(page)) {
+has_buffers:
+ unlock_page(page);
+ page_cache_release(page);
+ return block_truncate_page(mapping, from, get_block);
}
+
+ /* Find the buffer that contains "offset" */
+ pos = blocksize;
+ while (offset >= pos) {
+ iblock++;
+ pos += blocksize;
+ }
+
+ err = get_block(inode, iblock, &map_bh, 0);
+ if (err)
+ goto unlock;
+ /* unmapped? It's a hole - nothing to do */
+ if (!buffer_mapped(&map_bh))
+ goto unlock;
+
+ /* Ok, it's mapped. Make sure it's up-to-date */
+ if (!PageUptodate(page)) {
+ err = mapping->a_ops->readpage(NULL, page);
+ if (err) {
+ page_cache_release(page);
+ goto out;
+ }
+ lock_page(page);
+ if (!PageUptodate(page)) {
+ err = -EIO;
+ goto unlock;
+ }
+ if (page_has_buffers(page))
+ goto has_buffers;
+ }
+ zero_user_page(page, offset, length, KM_USER0);
+ set_page_dirty(page);
+ err = 0;
+
+unlock:
unlock_page(page);
page_cache_release(page);
out:
- return ret;
+ return err;
}
EXPORT_SYMBOL(nobh_truncate_page);
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned blocksize;
- pgoff_t iblock;
+ sector_t iblock;
unsigned length, pos;
struct inode *inode = mapping->host;
struct page *page;
struct buffer_head *bh;
- void *kaddr;
int err;
blocksize = 1 << inode->i_blkbits;
return 0;
length = blocksize - length;
- iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
err = -ENOMEM;
err = 0;
if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
err = get_block(inode, iblock, bh, 0);
if (err)
goto unlock;
if (PageUptodate(page))
set_buffer_uptodate(bh);
- if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
+ if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
err = -EIO;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
goto unlock;
}
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, length);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
-
+ zero_user_page(page, offset, length, KM_USER0);
mark_buffer_dirty(bh);
err = 0;
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
- void *kaddr;
/* Is the page fully inside i_size? */
if (page->index < end_index)
* they may have been added in ext3_writepage(). Make them
* freeable here, so the page does not leak.
*/
- block_invalidatepage(page, 0);
+ do_invalidatepage(page, 0);
unlock_page(page);
return 0; /* don't care */
}
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
return __block_write_full_page(inode, page, get_block, wbc);
}
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
+ tmp.b_size = 1 << inode->i_blkbits;
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
-static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
+static void end_bio_bh_io_sync(struct bio *bio, int err)
{
struct buffer_head *bh = bio->bi_private;
- if (bio->bi_size)
- return 1;
-
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
set_bit(BH_Eopnotsupp, &bh->b_state);
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
- return 0;
}
int submit_bh(int rw, struct buffer_head * bh)
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
+ * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
- * ll_rw_block() takes an array of pointers to &struct buffer_heads,
- * and requests an I/O operation on them, either a %READ or a %WRITE.
- * The third %READA option is described in the documentation for
- * generic_make_request() which ll_rw_block() calls.
+ * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
+ * requests an I/O operation on them, either a %READ or a %WRITE. The third
+ * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
+ * are sent to disk. The fourth %READA option is described in the documentation
+ * for generic_make_request() which ll_rw_block() calls.
*
* This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit), any buffer that appears to be clean when doing a
- * write request, and any buffer that appears to be up-to-date when doing
- * read request. Further it marks as clean buffers that are processed for
- * writing (the buffer cache won't assume that they are actually clean until
- * the buffer gets unlocked).
+ * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
+ * clean when doing a write request, and any buffer that appears to be
+ * up-to-date when doing read request. Further it marks as clean buffers that
+ * are processed for writing (the buffer cache won't assume that they are
+ * actually clean until the buffer gets unlocked).
*
* ll_rw_block sets b_end_io to simple completion handler that marks
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
- if (test_set_buffer_locked(bh))
+ if (rw == SWRITE)
+ lock_buffer(bh);
+ else if (test_set_buffer_locked(bh))
continue;
- get_bh(bh);
- if (rw == WRITE) {
+ if (rw == WRITE || rw == SWRITE) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
+ get_bh(bh);
submit_bh(WRITE, bh);
continue;
}
} else {
if (!buffer_uptodate(bh)) {
bh->b_end_io = end_buffer_read_sync;
+ get_bh(bh);
submit_bh(rw, bh);
continue;
}
}
unlock_buffer(bh);
- put_bh(bh);
}
}
spin_lock(&mapping->private_lock);
ret = drop_buffers(page, &buffers_to_free);
- if (ret) {
- /*
- * If the filesystem writes its buffers by hand (eg ext3)
- * then we can have clean buffers against a dirty page. We
- * clean the page here; otherwise later reattachment of buffers
- * could encounter a non-uptodate page, which is unresolvable.
- * This only applies in the rare case where try_to_free_buffers
- * succeeds but the page is not freed.
- */
- clear_page_dirty(page);
- }
+
+ /*
+ * If the filesystem writes its buffers by hand (eg ext3)
+ * then we can have clean buffers against a dirty page. We
+ * clean the page here; otherwise the VM will never notice
+ * that the filesystem did any IO at all.
+ *
+ * Also, during truncate, discard_buffer will have marked all
+ * the page's buffers clean. We discover that here and clean
+ * the page also.
+ *
+ * private_lock must be held over this entire operation in order
+ * to synchronise against __set_page_dirty_buffers and prevent the
+ * dirty bit from being lost.
+ */
+ if (ret)
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
spin_unlock(&mapping->private_lock);
out:
if (buffers_to_free) {
}
EXPORT_SYMBOL(try_to_free_buffers);
-int block_sync_page(struct page *page)
+void block_sync_page(struct page *page)
{
struct address_space *mapping;
mapping = page_mapping(page);
if (mapping)
blk_run_backing_dev(mapping->backing_dev_info, page);
- return 0;
}
/*
/*
* Buffer-head allocation
*/
-static kmem_cache_t *bh_cachep;
+static struct kmem_cache *bh_cachep;
/*
* Once the number of bh's in the machine exceeds this level, we start
if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
return;
__get_cpu_var(bh_accounting).ratelimit = 0;
- for_each_cpu(i)
+ for_each_online_cpu(i)
tot += per_cpu(bh_accounting, i).nr;
buffer_heads_over_limit = (tot > max_buffer_heads);
}
-struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
+struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
- struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
+ struct buffer_head *ret = kmem_cache_zalloc(bh_cachep,
+ set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
if (ret) {
- preempt_disable();
- __get_cpu_var(bh_accounting).nr++;
+ INIT_LIST_HEAD(&ret->b_assoc_buffers);
+ get_cpu_var(bh_accounting).nr++;
recalc_bh_state();
- preempt_enable();
+ put_cpu_var(bh_accounting);
}
return ret;
}
{
BUG_ON(!list_empty(&bh->b_assoc_buffers));
kmem_cache_free(bh_cachep, bh);
- preempt_disable();
- __get_cpu_var(bh_accounting).nr--;
+ get_cpu_var(bh_accounting).nr--;
recalc_bh_state();
- preempt_enable();
+ put_cpu_var(bh_accounting);
}
EXPORT_SYMBOL(free_buffer_head);
-static void
-init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
-{
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
- struct buffer_head * bh = (struct buffer_head *)data;
-
- memset(bh, 0, sizeof(*bh));
- INIT_LIST_HEAD(&bh->b_assoc_buffers);
- }
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
static void buffer_exit_cpu(int cpu)
{
int i;
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
+ get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+ per_cpu(bh_accounting, cpu).nr = 0;
+ put_cpu_var(bh_accounting);
}
static int buffer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- if (action == CPU_DEAD)
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
buffer_exit_cpu((unsigned long)hcpu);
return NOTIFY_OK;
}
-#endif /* CONFIG_HOTPLUG_CPU */
void __init buffer_init(void)
{
int nrpages;
- bh_cachep = kmem_cache_create("buffer_head",
- sizeof(struct buffer_head), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
+ bh_cachep = KMEM_CACHE(buffer_head,
+ SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
EXPORT_SYMBOL(__wait_on_buffer);
EXPORT_SYMBOL(block_commit_write);
EXPORT_SYMBOL(block_prepare_write);
+EXPORT_SYMBOL(block_page_mkwrite);
EXPORT_SYMBOL(block_read_full_page);
EXPORT_SYMBOL(block_sync_page);
EXPORT_SYMBOL(block_truncate_page);
EXPORT_SYMBOL(block_write_full_page);
-EXPORT_SYMBOL(cont_prepare_write);
-EXPORT_SYMBOL(end_buffer_async_write);
+EXPORT_SYMBOL(cont_write_begin);
EXPORT_SYMBOL(end_buffer_read_sync);
EXPORT_SYMBOL(end_buffer_write_sync);
EXPORT_SYMBOL(file_fsync);
EXPORT_SYMBOL(fsync_bdev);
EXPORT_SYMBOL(generic_block_bmap);
EXPORT_SYMBOL(generic_commit_write);
-EXPORT_SYMBOL(generic_cont_expand);
+EXPORT_SYMBOL(generic_cont_expand_simple);
EXPORT_SYMBOL(init_buffer);
EXPORT_SYMBOL(invalidate_bdev);
EXPORT_SYMBOL(ll_rw_block);