nfsd: move most of nfsfh.h to fs/nfsd
[safe/jmp/linux-2.6] / fs / buffer.c
index f961605..6fa5302 100644 (file)
  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
  */
 
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/syscalls.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/capability.h>
 #include <linux/blkdev.h>
 #include <linux/file.h>
 #include <linux/quotaops.h>
 #include <linux/hash.h>
 #include <linux/suspend.h>
 #include <linux/buffer_head.h>
+#include <linux/task_io_accounting_ops.h>
 #include <linux/bio.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/bitops.h>
 #include <linux/mpage.h>
+#include <linux/bit_spinlock.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
-static void invalidate_bh_lrus(void);
 
 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
 
@@ -52,6 +52,7 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
        bh->b_end_io = handler;
        bh->b_private = private;
 }
+EXPORT_SYMBOL(init_buffer);
 
 static int sync_buffer(void *word)
 {
@@ -67,19 +68,20 @@ static int sync_buffer(void *word)
        return 0;
 }
 
-void fastcall __lock_buffer(struct buffer_head *bh)
+void __lock_buffer(struct buffer_head *bh)
 {
        wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_buffer);
 
-void fastcall unlock_buffer(struct buffer_head *bh)
+void unlock_buffer(struct buffer_head *bh)
 {
-       clear_buffer_locked(bh);
+       clear_bit_unlock(BH_Lock, &bh->b_state);
        smp_mb__after_clear_bit();
        wake_up_bit(&bh->b_state, BH_Lock);
 }
+EXPORT_SYMBOL(unlock_buffer);
 
 /*
  * Block until a buffer comes unlocked.  This doesn't stop it
@@ -90,29 +92,42 @@ void __wait_on_buffer(struct buffer_head * bh)
 {
        wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
 }
+EXPORT_SYMBOL(__wait_on_buffer);
 
 static void
 __clear_page_buffers(struct page *page)
 {
        ClearPagePrivate(page);
-       page->private = 0;
+       set_page_private(page, 0);
        page_cache_release(page);
 }
 
+
+static int quiet_error(struct buffer_head *bh)
+{
+       if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
+               return 0;
+       return 1;
+}
+
+
 static void buffer_io_error(struct buffer_head *bh)
 {
        char b[BDEVNAME_SIZE];
-
        printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
                        bdevname(bh->b_bdev, b),
                        (unsigned long long)bh->b_blocknr);
 }
 
 /*
- * Default synchronous end-of-IO handler..  Just mark it up-to-date and
- * unlock the buffer. This is what ll_rw_block uses too.
+ * End-of-IO handler helper function which does not touch the bh after
+ * unlocking it.
+ * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
+ * a race there is benign: unlock_buffer() only use the bh's address for
+ * hashing after unlocking the buffer, so it doesn't actually touch the bh
+ * itself.
  */
-void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
+static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
 {
        if (uptodate) {
                set_buffer_uptodate(bh);
@@ -121,8 +136,18 @@ void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
                clear_buffer_uptodate(bh);
        }
        unlock_buffer(bh);
+}
+
+/*
+ * Default synchronous end-of-IO handler..  Just mark it up-to-date and
+ * unlock the buffer. This is what ll_rw_block uses too.
+ */
+void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
+{
+       __end_buffer_read_notouch(bh, uptodate);
        put_bh(bh);
 }
+EXPORT_SYMBOL(end_buffer_read_sync);
 
 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 {
@@ -131,7 +156,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
+               if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
                        buffer_io_error(bh);
                        printk(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
@@ -143,270 +168,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        unlock_buffer(bh);
        put_bh(bh);
 }
-
-/*
- * Write out and wait upon all the dirty data associated with a block
- * device via its mapping.  Does not take the superblock lock.
- */
-int sync_blockdev(struct block_device *bdev)
-{
-       int ret = 0;
-
-       if (bdev) {
-               int err;
-
-               ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
-               err = filemap_fdatawait(bdev->bd_inode->i_mapping);
-               if (!ret)
-                       ret = err;
-       }
-       return ret;
-}
-EXPORT_SYMBOL(sync_blockdev);
-
-/*
- * Write out and wait upon all dirty data associated with this
- * superblock.  Filesystem data as well as the underlying block
- * device.  Takes the superblock lock.
- */
-int fsync_super(struct super_block *sb)
-{
-       sync_inodes_sb(sb, 0);
-       DQUOT_SYNC(sb);
-       lock_super(sb);
-       if (sb->s_dirt && sb->s_op->write_super)
-               sb->s_op->write_super(sb);
-       unlock_super(sb);
-       if (sb->s_op->sync_fs)
-               sb->s_op->sync_fs(sb, 1);
-       sync_blockdev(sb->s_bdev);
-       sync_inodes_sb(sb, 1);
-
-       return sync_blockdev(sb->s_bdev);
-}
-
-/*
- * Write out and wait upon all dirty data associated with this
- * device.   Filesystem data as well as the underlying block
- * device.  Takes the superblock lock.
- */
-int fsync_bdev(struct block_device *bdev)
-{
-       struct super_block *sb = get_super(bdev);
-       if (sb) {
-               int res = fsync_super(sb);
-               drop_super(sb);
-               return res;
-       }
-       return sync_blockdev(bdev);
-}
-
-/**
- * freeze_bdev  --  lock a filesystem and force it into a consistent state
- * @bdev:      blockdevice to lock
- *
- * This takes the block device bd_mount_sem to make sure no new mounts
- * happen on bdev until thaw_bdev() is called.
- * If a superblock is found on this device, we take the s_umount semaphore
- * on it to make sure nobody unmounts until the snapshot creation is done.
- */
-struct super_block *freeze_bdev(struct block_device *bdev)
-{
-       struct super_block *sb;
-
-       down(&bdev->bd_mount_sem);
-       sb = get_super(bdev);
-       if (sb && !(sb->s_flags & MS_RDONLY)) {
-               sb->s_frozen = SB_FREEZE_WRITE;
-               wmb();
-
-               sync_inodes_sb(sb, 0);
-               DQUOT_SYNC(sb);
-
-               lock_super(sb);
-               if (sb->s_dirt && sb->s_op->write_super)
-                       sb->s_op->write_super(sb);
-               unlock_super(sb);
-
-               if (sb->s_op->sync_fs)
-                       sb->s_op->sync_fs(sb, 1);
-
-               sync_blockdev(sb->s_bdev);
-               sync_inodes_sb(sb, 1);
-
-               sb->s_frozen = SB_FREEZE_TRANS;
-               wmb();
-
-               sync_blockdev(sb->s_bdev);
-
-               if (sb->s_op->write_super_lockfs)
-                       sb->s_op->write_super_lockfs(sb);
-       }
-
-       sync_blockdev(bdev);
-       return sb;      /* thaw_bdev releases s->s_umount and bd_mount_sem */
-}
-EXPORT_SYMBOL(freeze_bdev);
-
-/**
- * thaw_bdev  -- unlock filesystem
- * @bdev:      blockdevice to unlock
- * @sb:                associated superblock
- *
- * Unlocks the filesystem and marks it writeable again after freeze_bdev().
- */
-void thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
-       if (sb) {
-               BUG_ON(sb->s_bdev != bdev);
-
-               if (sb->s_op->unlockfs)
-                       sb->s_op->unlockfs(sb);
-               sb->s_frozen = SB_UNFROZEN;
-               wmb();
-               wake_up(&sb->s_wait_unfrozen);
-               drop_super(sb);
-       }
-
-       up(&bdev->bd_mount_sem);
-}
-EXPORT_SYMBOL(thaw_bdev);
-
-/*
- * sync everything.  Start out by waking pdflush, because that writes back
- * all queues in parallel.
- */
-static void do_sync(unsigned long wait)
-{
-       wakeup_bdflush(0);
-       sync_inodes(0);         /* All mappings, inodes and their blockdevs */
-       DQUOT_SYNC(NULL);
-       sync_supers();          /* Write the superblocks */
-       sync_filesystems(0);    /* Start syncing the filesystems */
-       sync_filesystems(wait); /* Waitingly sync the filesystems */
-       sync_inodes(wait);      /* Mappings, inodes and blockdevs, again. */
-       if (!wait)
-               printk("Emergency Sync complete\n");
-       if (unlikely(laptop_mode))
-               laptop_sync_completion();
-}
-
-asmlinkage long sys_sync(void)
-{
-       do_sync(1);
-       return 0;
-}
-
-void emergency_sync(void)
-{
-       pdflush_operation(do_sync, 0);
-}
-
-/*
- * Generic function to fsync a file.
- *
- * filp may be NULL if called via the msync of a vma.
- */
-int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
-{
-       struct inode * inode = dentry->d_inode;
-       struct super_block * sb;
-       int ret, err;
-
-       /* sync the inode to buffers */
-       ret = write_inode_now(inode, 0);
-
-       /* sync the superblock to buffers */
-       sb = inode->i_sb;
-       lock_super(sb);
-       if (sb->s_op->write_super)
-               sb->s_op->write_super(sb);
-       unlock_super(sb);
-
-       /* .. finally sync the buffers to disk */
-       err = sync_blockdev(sb->s_bdev);
-       if (!ret)
-               ret = err;
-       return ret;
-}
-
-asmlinkage long sys_fsync(unsigned int fd)
-{
-       struct file * file;
-       struct address_space *mapping;
-       int ret, err;
-
-       ret = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-
-       mapping = file->f_mapping;
-
-       ret = -EINVAL;
-       if (!file->f_op || !file->f_op->fsync) {
-               /* Why?  We can still call filemap_fdatawrite */
-               goto out_putf;
-       }
-
-       current->flags |= PF_SYNCWRITE;
-       ret = filemap_fdatawrite(mapping);
-
-       /*
-        * We need to protect against concurrent writers,
-        * which could cause livelocks in fsync_buffers_list
-        */
-       down(&mapping->host->i_sem);
-       err = file->f_op->fsync(file, file->f_dentry, 0);
-       if (!ret)
-               ret = err;
-       up(&mapping->host->i_sem);
-       err = filemap_fdatawait(mapping);
-       if (!ret)
-               ret = err;
-       current->flags &= ~PF_SYNCWRITE;
-
-out_putf:
-       fput(file);
-out:
-       return ret;
-}
-
-asmlinkage long sys_fdatasync(unsigned int fd)
-{
-       struct file * file;
-       struct address_space *mapping;
-       int ret, err;
-
-       ret = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-
-       ret = -EINVAL;
-       if (!file->f_op || !file->f_op->fsync)
-               goto out_putf;
-
-       mapping = file->f_mapping;
-
-       current->flags |= PF_SYNCWRITE;
-       ret = filemap_fdatawrite(mapping);
-       down(&mapping->host->i_sem);
-       err = file->f_op->fsync(file, file->f_dentry, 1);
-       if (!ret)
-               ret = err;
-       up(&mapping->host->i_sem);
-       err = filemap_fdatawait(mapping);
-       if (!ret)
-               ret = err;
-       current->flags &= ~PF_SYNCWRITE;
-
-out_putf:
-       fput(file);
-out:
-       return ret;
-}
+EXPORT_SYMBOL(end_buffer_write_sync);
 
 /*
  * Various filesystems appear to want __find_get_block to be non-blocking.
@@ -420,7 +182,7 @@ out:
  * private_lock is contended then so is mapping->tree_lock).
  */
 static struct buffer_head *
-__find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
+__find_get_block_slow(struct block_device *bdev, sector_t block)
 {
        struct inode *bd_inode = bdev->bd_inode;
        struct address_space *bd_mapping = bd_inode->i_mapping;
@@ -442,13 +204,13 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
        head = page_buffers(page);
        bh = head;
        do {
-               if (bh->b_blocknr == block) {
+               if (!buffer_mapped(bh))
+                       all_mapped = 0;
+               else if (bh->b_blocknr == block) {
                        ret = bh;
                        get_bh(bh);
                        goto out_unlock;
                }
-               if (!buffer_mapped(bh))
-                       all_mapped = 0;
                bh = bh->b_this_page;
        } while (bh != head);
 
@@ -460,8 +222,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
        if (all_mapped) {
                printk("__find_get_block_slow() failed. "
                        "block=%llu, b_blocknr=%llu\n",
-                       (unsigned long long)block, (unsigned long long)bh->b_blocknr);
-               printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+                       (unsigned long long)block,
+                       (unsigned long long)bh->b_blocknr);
+               printk("b_state=0x%08lx, b_size=%zu\n",
+                       bh->b_state, bh->b_size);
                printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
        }
 out_unlock:
@@ -503,32 +267,36 @@ out:
    we think the disk contains more recent information than the buffercache.
    The update == 1 pass marks the buffers we need to update, the update == 2
    pass does the actual I/O. */
-void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
+void invalidate_bdev(struct block_device *bdev)
 {
+       struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+       if (mapping->nrpages == 0)
+               return;
+
        invalidate_bh_lrus();
-       /*
-        * FIXME: what about destroy_dirty_buffers?
-        * We really want to use invalidate_inode_pages2() for
-        * that, but not until that's cleaned up.
-        */
-       invalidate_inode_pages(bdev->bd_inode->i_mapping);
+       invalidate_mapping_pages(mapping, 0, -1);
 }
+EXPORT_SYMBOL(invalidate_bdev);
 
 /*
- * Kick pdflush then try to free up some ZONE_NORMAL memory.
+ * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
  */
 static void free_more_memory(void)
 {
-       struct zone **zones;
-       pg_data_t *pgdat;
+       struct zone *zone;
+       int nid;
 
-       wakeup_bdflush(1024);
+       wakeup_flusher_threads(1024);
        yield();
 
-       for_each_pgdat(pgdat) {
-               zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
-               if (*zones)
-                       try_to_free_pages(zones, GFP_NOFS, 0);
+       for_each_online_node(nid) {
+               (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
+                                               gfp_zone(GFP_NOFS), NULL,
+                                               &zone);
+               if (zone)
+                       try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
+                                               GFP_NOFS, NULL);
        }
 }
 
@@ -538,8 +306,8 @@ static void free_more_memory(void)
  */
 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
 {
-       static DEFINE_SPINLOCK(page_uptodate_lock);
        unsigned long flags;
+       struct buffer_head *first;
        struct buffer_head *tmp;
        struct page *page;
        int page_uptodate = 1;
@@ -551,7 +319,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
                set_buffer_uptodate(bh);
        } else {
                clear_buffer_uptodate(bh);
-               if (printk_ratelimit())
+               if (!quiet_error(bh))
                        buffer_io_error(bh);
                SetPageError(page);
        }
@@ -561,7 +329,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
         * two buffer heads end IO at almost the same time and both
         * decide that the page is now completely done.
         */
-       spin_lock_irqsave(&page_uptodate_lock, flags);
+       first = page_buffers(page);
+       local_irq_save(flags);
+       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
        clear_buffer_async_read(bh);
        unlock_buffer(bh);
        tmp = bh;
@@ -574,7 +344,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
                }
                tmp = tmp->b_this_page;
        } while (tmp != bh);
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
 
        /*
         * If none of the buffers had errors and they are all
@@ -586,7 +357,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
        return;
 
 still_busy:
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
        return;
 }
 
@@ -597,8 +369,8 @@ still_busy:
 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 {
        char b[BDEVNAME_SIZE];
-       static DEFINE_SPINLOCK(page_uptodate_lock);
        unsigned long flags;
+       struct buffer_head *first;
        struct buffer_head *tmp;
        struct page *page;
 
@@ -608,18 +380,22 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (printk_ratelimit()) {
+               if (!quiet_error(bh)) {
                        buffer_io_error(bh);
                        printk(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
                               bdevname(bh->b_bdev, b));
                }
                set_bit(AS_EIO, &page->mapping->flags);
+               set_buffer_write_io_error(bh);
                clear_buffer_uptodate(bh);
                SetPageError(page);
        }
 
-       spin_lock_irqsave(&page_uptodate_lock, flags);
+       first = page_buffers(page);
+       local_irq_save(flags);
+       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+
        clear_buffer_async_write(bh);
        unlock_buffer(bh);
        tmp = bh->b_this_page;
@@ -630,14 +406,17 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                }
                tmp = tmp->b_this_page;
        }
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
        end_page_writeback(page);
        return;
 
 still_busy:
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
        return;
 }
+EXPORT_SYMBOL(end_buffer_async_write);
 
 /*
  * If a page's buffers are under async readin (end_buffer_async_read
@@ -666,11 +445,17 @@ static void mark_buffer_async_read(struct buffer_head *bh)
        set_buffer_async_read(bh);
 }
 
-void mark_buffer_async_write(struct buffer_head *bh)
+static void mark_buffer_async_write_endio(struct buffer_head *bh,
+                                         bh_end_io_t *handler)
 {
-       bh->b_end_io = end_buffer_async_write;
+       bh->b_end_io = handler;
        set_buffer_async_write(bh);
 }
+
+void mark_buffer_async_write(struct buffer_head *bh)
+{
+       mark_buffer_async_write_endio(bh, end_buffer_async_write);
+}
 EXPORT_SYMBOL(mark_buffer_async_write);
 
 
@@ -726,9 +511,13 @@ EXPORT_SYMBOL(mark_buffer_async_write);
 /*
  * The buffer's backing address_space's private_lock must be held
  */
-static inline void __remove_assoc_queue(struct buffer_head *bh)
+static void __remove_assoc_queue(struct buffer_head *bh)
 {
        list_del_init(&bh->b_assoc_buffers);
+       WARN_ON(!bh->b_assoc_map);
+       if (buffer_write_io_error(bh))
+               set_bit(AS_EIO, &bh->b_assoc_map->flags);
+       bh->b_assoc_map = NULL;
 }
 
 int inode_has_buffers(struct inode *inode)
@@ -771,18 +560,56 @@ repeat:
        return err;
 }
 
+static void do_thaw_all(struct work_struct *work)
+{
+       struct super_block *sb;
+       char b[BDEVNAME_SIZE];
+
+       spin_lock(&sb_lock);
+restart:
+       list_for_each_entry(sb, &super_blocks, s_list) {
+               sb->s_count++;
+               spin_unlock(&sb_lock);
+               down_read(&sb->s_umount);
+               while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
+                       printk(KERN_WARNING "Emergency Thaw on %s\n",
+                              bdevname(sb->s_bdev, b));
+               up_read(&sb->s_umount);
+               spin_lock(&sb_lock);
+               if (__put_super_and_need_restart(sb))
+                       goto restart;
+       }
+       spin_unlock(&sb_lock);
+       kfree(work);
+       printk(KERN_WARNING "Emergency Thaw complete\n");
+}
+
+/**
+ * emergency_thaw_all -- forcibly thaw every frozen filesystem
+ *
+ * Used for emergency unfreeze of all filesystems via SysRq
+ */
+void emergency_thaw_all(void)
+{
+       struct work_struct *work;
+
+       work = kmalloc(sizeof(*work), GFP_ATOMIC);
+       if (work) {
+               INIT_WORK(work, do_thaw_all);
+               schedule_work(work);
+       }
+}
+
 /**
- * sync_mapping_buffers - write out and wait upon a mapping's "associated"
- *                        buffers
- * @buffer_mapping - the mapping which backs the buffers' data
- * @mapping - the mapping which wants those buffers written
+ * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
+ * @mapping: the mapping which wants those buffers written
  *
  * Starts I/O against the buffers at mapping->private_list, and waits upon
  * that I/O.
  *
- * Basically, this is a convenience function for fsync().  @buffer_mapping is
- * the blockdev which "owns" the buffers and @mapping is a file or directory
- * which needs those buffers to be written for a successful fsync().
+ * Basically, this is a convenience function for fsync().
+ * @mapping is a file or directory which needs those buffers to be written for
+ * a successful fsync().
  */
 int sync_mapping_buffers(struct address_space *mapping)
 {
@@ -822,19 +649,40 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
        if (!mapping->assoc_mapping) {
                mapping->assoc_mapping = buffer_mapping;
        } else {
-               if (mapping->assoc_mapping != buffer_mapping)
-                       BUG();
+               BUG_ON(mapping->assoc_mapping != buffer_mapping);
        }
-       if (list_empty(&bh->b_assoc_buffers)) {
+       if (!bh->b_assoc_map) {
                spin_lock(&buffer_mapping->private_lock);
                list_move_tail(&bh->b_assoc_buffers,
                                &mapping->private_list);
+               bh->b_assoc_map = mapping;
                spin_unlock(&buffer_mapping->private_lock);
        }
 }
 EXPORT_SYMBOL(mark_buffer_dirty_inode);
 
 /*
+ * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
+ * dirty.
+ *
+ * If warn is true, then emit a warning if the page is not uptodate and has
+ * not been truncated.
+ */
+static void __set_page_dirty(struct page *page,
+               struct address_space *mapping, int warn)
+{
+       spin_lock_irq(&mapping->tree_lock);
+       if (page->mapping) {    /* Race with truncate? */
+               WARN_ON_ONCE(warn && !PageUptodate(page));
+               account_page_dirtied(page, mapping);
+               radix_tree_tag_set(&mapping->page_tree,
+                               page_index(page), PAGECACHE_TAG_DIRTY);
+       }
+       spin_unlock_irq(&mapping->tree_lock);
+       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+}
+
+/*
  * Add a page to the dirty page list.
  *
  * It is a sad fact of life that this function is called from several places
@@ -861,7 +709,11 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
  */
 int __set_page_dirty_buffers(struct page *page)
 {
-       struct address_space * const mapping = page->mapping;
+       int newly_dirty;
+       struct address_space *mapping = page_mapping(page);
+
+       if (unlikely(!mapping))
+               return !TestSetPageDirty(page);
 
        spin_lock(&mapping->private_lock);
        if (page_has_buffers(page)) {
@@ -873,22 +725,12 @@ int __set_page_dirty_buffers(struct page *page)
                        bh = bh->b_this_page;
                } while (bh != head);
        }
+       newly_dirty = !TestSetPageDirty(page);
        spin_unlock(&mapping->private_lock);
 
-       if (!TestSetPageDirty(page)) {
-               write_lock_irq(&mapping->tree_lock);
-               if (page->mapping) {    /* Race with truncate? */
-                       if (mapping_cap_account_dirty(mapping))
-                               inc_page_state(nr_dirty);
-                       radix_tree_tag_set(&mapping->page_tree,
-                                               page_index(page),
-                                               PAGECACHE_TAG_DIRTY);
-               }
-               write_unlock_irq(&mapping->tree_lock);
-               __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
-       }
-       
-       return 0;
+       if (newly_dirty)
+               __set_page_dirty(page, mapping, 1);
+       return newly_dirty;
 }
 EXPORT_SYMBOL(__set_page_dirty_buffers);
 
@@ -915,6 +757,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
+       struct address_space *mapping, *prev_mapping = NULL;
        int err = 0, err2;
 
        INIT_LIST_HEAD(&tmp);
@@ -922,9 +765,14 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
        spin_lock(lock);
        while (!list_empty(list)) {
                bh = BH_ENTRY(list->next);
-               list_del_init(&bh->b_assoc_buffers);
+               mapping = bh->b_assoc_map;
+               __remove_assoc_queue(bh);
+               /* Avoid race with mark_buffer_dirty_inode() which does
+                * a lockless check and we rely on seeing the dirty bit */
+               smp_mb();
                if (buffer_dirty(bh) || buffer_locked(bh)) {
                        list_add(&bh->b_assoc_buffers, &tmp);
+                       bh->b_assoc_map = mapping;
                        if (buffer_dirty(bh)) {
                                get_bh(bh);
                                spin_unlock(lock);
@@ -934,8 +782,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * contents - it is a noop if I/O is still in
                                 * flight on potentially older contents.
                                 */
-                               wait_on_buffer(bh);
-                               ll_rw_block(WRITE, 1, &bh);
+                               ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+
+                               /*
+                                * Kick off IO for the previous mapping. Note
+                                * that we will not run the very last mapping,
+                                * wait_on_buffer() will do that for us
+                                * through sync_buffer().
+                                */
+                               if (prev_mapping && prev_mapping != mapping)
+                                       blk_run_address_space(prev_mapping);
+                               prev_mapping = mapping;
+
                                brelse(bh);
                                spin_lock(lock);
                        }
@@ -944,8 +802,17 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 
        while (!list_empty(&tmp)) {
                bh = BH_ENTRY(tmp.prev);
-               __remove_assoc_queue(bh);
                get_bh(bh);
+               mapping = bh->b_assoc_map;
+               __remove_assoc_queue(bh);
+               /* Avoid race with mark_buffer_dirty_inode() which does
+                * a lockless check and we rely on seeing the dirty bit */
+               smp_mb();
+               if (buffer_dirty(bh)) {
+                       list_add(&bh->b_assoc_buffers,
+                                &mapping->private_list);
+                       bh->b_assoc_map = mapping;
+               }
                spin_unlock(lock);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh))
@@ -984,6 +851,7 @@ void invalidate_inode_buffers(struct inode *inode)
                spin_unlock(&buffer_mapping->private_lock);
        }
 }
+EXPORT_SYMBOL(invalidate_inode_buffers);
 
 /*
  * Remove any clean buffers from the inode's buffer list.  This is called
@@ -1044,12 +912,13 @@ try_again:
 
                bh->b_state = 0;
                atomic_set(&bh->b_count, 0);
+               bh->b_private = NULL;
                bh->b_size = size;
 
                /* Link the buffer to its page */
                set_bh_page(bh, page, offset);
 
-               bh->b_end_io = NULL;
+               init_buffer(bh, NULL, NULL);
        }
        return head;
 /*
@@ -1136,12 +1005,12 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        struct page *page;
        struct buffer_head *bh;
 
-       page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+       page = find_or_create_page(inode->i_mapping, index,
+               (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
        if (!page)
                return NULL;
 
-       if (!PageLocked(page))
-               BUG();
+       BUG_ON(!PageLocked(page));
 
        if (page_has_buffers(page)) {
                bh = page_buffers(page);
@@ -1181,13 +1050,8 @@ failed:
 /*
  * Create buffers for the specified block device block's page.  If
  * that page was dirty, the buffers are set dirty also.
- *
- * Except that's a bug.  Attaching dirty buffers to a dirty
- * blockdev's page can result in filesystem corruption, because
- * some of those buffers may be aliases of filesystem data.
- * grow_dev_page() will go BUG() if this happens.
  */
-static inline int
+static int
 grow_buffers(struct block_device *bdev, sector_t block, int size)
 {
        struct page *page;
@@ -1200,8 +1064,21 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
        } while ((size << sizebits) < PAGE_SIZE);
 
        index = block >> sizebits;
-       block = index << sizebits;
 
+       /*
+        * Check for a block which wants to lie outside our maximum possible
+        * pagecache index.  (this comparison is done using sector_t types).
+        */
+       if (unlikely(index != block >> sizebits)) {
+               char b[BDEVNAME_SIZE];
+
+               printk(KERN_ERR "%s: requested out-of-range block %llu for "
+                       "device %s\n",
+                       __func__, (unsigned long long)block,
+                       bdevname(bdev, b));
+               return -EIO;
+       }
+       block = index << sizebits;
        /* Create a page with the proper size buffers.. */
        page = grow_dev_page(bdev, block, index, size);
        if (!page)
@@ -1211,16 +1088,16 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
        return 1;
 }
 
-struct buffer_head *
+static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
        /* Size must be multiple of hard sectorsize */
-       if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
+       if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
                printk(KERN_ERR "getblk(): invalid block size %d requested\n",
                                        size);
-               printk(KERN_ERR "hardsect size: %d\n",
-                                       bdev_hardsect_size(bdev));
+               printk(KERN_ERR "logical block size: %d\n",
+                                       bdev_logical_block_size(bdev));
 
                dump_stack();
                return NULL;
@@ -1228,12 +1105,16 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
 
        for (;;) {
                struct buffer_head * bh;
+               int ret;
 
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
 
-               if (!grow_buffers(bdev, block, size))
+               ret = grow_buffers(bdev, block, size);
+               if (ret < 0)
+                       return NULL;
+               if (ret == 0)
                        free_more_memory();
        }
 }
@@ -1263,6 +1144,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
 
 /**
  * mark_buffer_dirty - mark a buffer_head as needing writeout
+ * @bh: the buffer_head to mark dirty
  *
  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
  * backing page dirty, then tag the page as dirty in its address_space's radix
@@ -1272,11 +1154,32 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
  * mapping->tree_lock and the global inode_lock.
  */
-void fastcall mark_buffer_dirty(struct buffer_head *bh)
+void mark_buffer_dirty(struct buffer_head *bh)
 {
-       if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
-               __set_page_dirty_nobuffers(bh->b_page);
+       WARN_ON_ONCE(!buffer_uptodate(bh));
+
+       /*
+        * Very *carefully* optimize the it-is-already-dirty case.
+        *
+        * Don't let the final "is it dirty" escape to before we
+        * perhaps modified the buffer.
+        */
+       if (buffer_dirty(bh)) {
+               smp_mb();
+               if (buffer_dirty(bh))
+                       return;
+       }
+
+       if (!test_set_buffer_dirty(bh)) {
+               struct page *page = bh->b_page;
+               if (!TestSetPageDirty(page)) {
+                       struct address_space *mapping = page_mapping(page);
+                       if (mapping)
+                               __set_page_dirty(page, mapping, 0);
+               }
+       }
 }
+EXPORT_SYMBOL(mark_buffer_dirty);
 
 /*
  * Decrement a buffer_head's reference count.  If all buffers against a page
@@ -1291,9 +1194,9 @@ void __brelse(struct buffer_head * buf)
                put_bh(buf);
                return;
        }
-       printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
-       WARN_ON(1);
+       WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
 }
+EXPORT_SYMBOL(__brelse);
 
 /*
  * bforget() is like brelse(), except it discards any
@@ -1302,15 +1205,17 @@ void __brelse(struct buffer_head * buf)
 void __bforget(struct buffer_head *bh)
 {
        clear_buffer_dirty(bh);
-       if (!list_empty(&bh->b_assoc_buffers)) {
+       if (bh->b_assoc_map) {
                struct address_space *buffer_mapping = bh->b_page->mapping;
 
                spin_lock(&buffer_mapping->private_lock);
                list_del_init(&bh->b_assoc_buffers);
+               bh->b_assoc_map = NULL;
                spin_unlock(&buffer_mapping->private_lock);
        }
        __brelse(bh);
 }
+EXPORT_SYMBOL(__bforget);
 
 static struct buffer_head *__bread_slow(struct buffer_head *bh)
 {
@@ -1412,12 +1317,12 @@ static void bh_lru_install(struct buffer_head *bh)
 /*
  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
  */
-static inline struct buffer_head *
-lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
+static struct buffer_head *
+lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *ret = NULL;
        struct bh_lru *lru;
-       int i;
+       unsigned int i;
 
        check_irqs_on();
        bh_lru_lock();
@@ -1449,12 +1354,12 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
  * NULL
  */
 struct buffer_head *
-__find_get_block(struct block_device *bdev, sector_t block, int size)
+__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
 
        if (bh == NULL) {
-               bh = __find_get_block_slow(bdev, block, size);
+               bh = __find_get_block_slow(bdev, block);
                if (bh)
                        bh_lru_install(bh);
        }
@@ -1477,7 +1382,7 @@ EXPORT_SYMBOL(__find_get_block);
  * attempt is failing.  FIXME, perhaps?
  */
 struct buffer_head *
-__getblk(struct block_device *bdev, sector_t block, int size)
+__getblk(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = __find_get_block(bdev, block, size);
 
@@ -1491,16 +1396,19 @@ EXPORT_SYMBOL(__getblk);
 /*
  * Do async read-ahead on a buffer..
  */
-void __breadahead(struct block_device *bdev, sector_t block, int size)
+void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = __getblk(bdev, block, size);
-       ll_rw_block(READA, 1, &bh);
-       brelse(bh);
+       if (likely(bh)) {
+               ll_rw_block(READA, 1, &bh);
+               brelse(bh);
+       }
 }
 EXPORT_SYMBOL(__breadahead);
 
 /**
  *  __bread() - reads a specified block and returns the bh
+ *  @bdev: the block_device to read from
  *  @block: number of block
  *  @size: size (in bytes) to read
  * 
@@ -1508,11 +1416,11 @@ EXPORT_SYMBOL(__breadahead);
  *  It returns NULL if the block was unreadable.
  */
 struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, int size)
+__bread(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = __getblk(bdev, block, size);
 
-       if (!buffer_uptodate(bh))
+       if (likely(bh) && !buffer_uptodate(bh))
                bh = __bread_slow(bh);
        return bh;
 }
@@ -1535,17 +1443,17 @@ static void invalidate_bh_lru(void *arg)
        put_cpu_var(bh_lrus);
 }
        
-static void invalidate_bh_lrus(void)
+void invalidate_bh_lrus(void)
 {
-       on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
+       on_each_cpu(invalidate_bh_lru, NULL, 1);
 }
+EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
 void set_bh_page(struct buffer_head *bh,
                struct page *page, unsigned long offset)
 {
        bh->b_page = page;
-       if (offset >= PAGE_SIZE)
-               BUG();
+       BUG_ON(offset >= PAGE_SIZE);
        if (PageHighMem(page))
                /*
                 * This catches illegal uses and preserves the offset:
@@ -1559,7 +1467,7 @@ EXPORT_SYMBOL(set_bh_page);
 /*
  * Called when truncating a buffer on a page completely.
  */
-static inline void discard_buffer(struct buffer_head * bh)
+static void discard_buffer(struct buffer_head * bh)
 {
        lock_buffer(bh);
        clear_buffer_dirty(bh);
@@ -1568,39 +1476,11 @@ static inline void discard_buffer(struct buffer_head * bh)
        clear_buffer_req(bh);
        clear_buffer_new(bh);
        clear_buffer_delay(bh);
+       clear_buffer_unwritten(bh);
        unlock_buffer(bh);
 }
 
 /**
- * try_to_release_page() - release old fs-specific metadata on a page
- *
- * @page: the page which the kernel is trying to free
- * @gfp_mask: memory allocation flags (and I/O mode)
- *
- * The address_space is to try to release any data against the page
- * (presumably at page->private).  If the release was successful, return `1'.
- * Otherwise return zero.
- *
- * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
- *
- * NOTE: @gfp_mask may go away, and this function may become non-blocking.
- */
-int try_to_release_page(struct page *page, int gfp_mask)
-{
-       struct address_space * const mapping = page->mapping;
-
-       BUG_ON(!PageLocked(page));
-       if (PageWriteback(page))
-               return 0;
-       
-       if (mapping && mapping->a_ops->releasepage)
-               return mapping->a_ops->releasepage(page, gfp_mask);
-       return try_to_free_buffers(page);
-}
-EXPORT_SYMBOL(try_to_release_page);
-
-/**
  * block_invalidatepage - invalidate part of all of a buffer-backed page
  *
  * @page: the page which is affected
@@ -1615,11 +1495,10 @@ EXPORT_SYMBOL(try_to_release_page);
  * point.  Because the caller is about to free (and possibly reuse) those
  * blocks on-disk.
  */
-int block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned long offset)
 {
        struct buffer_head *head, *bh, *next;
        unsigned int curr_off = 0;
-       int ret = 1;
 
        BUG_ON(!PageLocked(page));
        if (!page_has_buffers(page))
@@ -1646,9 +1525,9 @@ int block_invalidatepage(struct page *page, unsigned long offset)
         * so real IO is not possible anymore.
         */
        if (offset == 0)
-               ret = try_to_release_page(page, 0);
+               try_to_release_page(page, 0);
 out:
-       return ret;
+       return;
 }
 EXPORT_SYMBOL(block_invalidatepage);
 
@@ -1709,7 +1588,7 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
 
        might_sleep();
 
-       old_bh = __find_get_block_slow(bdev, block, 0);
+       old_bh = __find_get_block_slow(bdev, block);
        if (old_bh) {
                clear_buffer_dirty(old_bh);
                wait_on_buffer(old_bh);
@@ -1743,22 +1622,36 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
  * locked buffer.   This only can happen if someone has written the buffer
  * directly, with submit_bh().  At the address_space level PageWriteback
  * prevents this contention from occurring.
+ *
+ * If block_write_full_page() is called with wbc->sync_mode ==
+ * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
+ * causes the writes to be flagged as synchronous writes, but the
+ * block device queue will NOT be unplugged, since usually many pages
+ * will be pushed to the out before the higher-level caller actually
+ * waits for the writes to be completed.  The various wait functions,
+ * such as wait_on_writeback_range() will ultimately call sync_page()
+ * which will ultimately call blk_run_backing_dev(), which will end up
+ * unplugging the device queue.
  */
 static int __block_write_full_page(struct inode *inode, struct page *page,
-                       get_block_t *get_block, struct writeback_control *wbc)
+                       get_block_t *get_block, struct writeback_control *wbc,
+                       bh_end_io_t *handler)
 {
        int err;
        sector_t block;
        sector_t last_block;
        struct buffer_head *bh, *head;
+       const unsigned blocksize = 1 << inode->i_blkbits;
        int nr_underway = 0;
+       int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
+                       WRITE_SYNC_PLUG : WRITE);
 
        BUG_ON(!PageLocked(page));
 
        last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
 
        if (!page_has_buffers(page)) {
-               create_empty_buffers(page, 1 << inode->i_blkbits,
+               create_empty_buffers(page, blocksize,
                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
        }
 
@@ -1772,7 +1665,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
         * handle that here by just cleaning them.
         */
 
-       block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
        head = page_buffers(page);
        bh = head;
 
@@ -1792,10 +1685,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                         */
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
-               } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+               } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
+                          buffer_dirty(bh)) {
+                       WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
                                goto recover;
+                       clear_buffer_delay(bh);
                        if (buffer_new(bh)) {
                                /* blockdev mappings never come here */
                                clear_buffer_new(bh);
@@ -1808,24 +1704,23 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        } while (bh != head);
 
        do {
-               get_bh(bh);
                if (!buffer_mapped(bh))
                        continue;
                /*
                 * If it's a fully non-blocking write attempt and we cannot
                 * lock the buffer then redirty the page.  Note that this can
-                * potentially cause a busy-wait loop from pdflush and kswapd
-                * activity, but those code paths have their own higher-level
-                * throttling.
+                * potentially cause a busy-wait loop from writeback threads
+                * and kswapd activity, but those code paths have their own
+                * higher-level throttling.
                 */
                if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
                        lock_buffer(bh);
-               } else if (test_set_buffer_locked(bh)) {
+               } else if (!trylock_buffer(bh)) {
                        redirty_page_for_writepage(wbc, page);
                        continue;
                }
                if (test_clear_buffer_dirty(bh)) {
-                       mark_buffer_async_write(bh);
+                       mark_buffer_async_write_endio(bh, handler);
                } else {
                        unlock_buffer(bh);
                }
@@ -1837,17 +1732,16 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
         */
        BUG_ON(PageWriteback(page));
        set_page_writeback(page);
-       unlock_page(page);
 
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
-                       submit_bh(WRITE, bh);
+                       submit_bh(write_op, bh);
                        nr_underway++;
                }
-               put_bh(bh);
                bh = next;
        } while (bh != head);
+       unlock_page(page);
 
        err = 0;
 done:
@@ -1857,22 +1751,12 @@ done:
                 * clean.  Someone wrote them back by hand with
                 * ll_rw_block/submit_bh.  A rare case.
                 */
-               int uptodate = 1;
-               do {
-                       if (!buffer_uptodate(bh)) {
-                               uptodate = 0;
-                               break;
-                       }
-                       bh = bh->b_this_page;
-               } while (bh != head);
-               if (uptodate)
-                       SetPageUptodate(page);
                end_page_writeback(page);
+
                /*
                 * The page and buffer_heads can be released at any time from
                 * here on.
                 */
-               wbc->pages_skipped++;   /* We didn't write this page */
        }
        return err;
 
@@ -1886,10 +1770,10 @@ recover:
        bh = head;
        /* Recovery: lock and submit the mapped buffers */
        do {
-               get_bh(bh);
-               if (buffer_mapped(bh) && buffer_dirty(bh)) {
+               if (buffer_mapped(bh) && buffer_dirty(bh) &&
+                   !buffer_delay(bh)) {
                        lock_buffer(bh);
-                       mark_buffer_async_write(bh);
+                       mark_buffer_async_write_endio(bh, handler);
                } else {
                        /*
                         * The buffer may have been set dirty during
@@ -1900,25 +1784,67 @@ recover:
        } while ((bh = bh->b_this_page) != head);
        SetPageError(page);
        BUG_ON(PageWriteback(page));
+       mapping_set_error(page->mapping, err);
        set_page_writeback(page);
-       unlock_page(page);
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
                        clear_buffer_dirty(bh);
-                       submit_bh(WRITE, bh);
+                       submit_bh(write_op, bh);
                        nr_underway++;
                }
-               put_bh(bh);
                bh = next;
        } while (bh != head);
+       unlock_page(page);
        goto done;
 }
 
-static int __block_prepare_write(struct inode *inode, struct page *page,
-               unsigned from, unsigned to, get_block_t *get_block)
+/*
+ * If a page has any new buffers, zero them out here, and mark them uptodate
+ * and dirty so they'll be written out (in order to prevent uninitialised
+ * block data from leaking). And clear the new bit.
+ */
+void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
 {
-       unsigned block_start, block_end;
+       unsigned int block_start, block_end;
+       struct buffer_head *head, *bh;
+
+       BUG_ON(!PageLocked(page));
+       if (!page_has_buffers(page))
+               return;
+
+       bh = head = page_buffers(page);
+       block_start = 0;
+       do {
+               block_end = block_start + bh->b_size;
+
+               if (buffer_new(bh)) {
+                       if (block_end > from && block_start < to) {
+                               if (!PageUptodate(page)) {
+                                       unsigned start, size;
+
+                                       start = max(from, block_start);
+                                       size = min(to, block_end) - start;
+
+                                       zero_user(page, start, size);
+                                       set_buffer_uptodate(bh);
+                               }
+
+                               clear_buffer_new(bh);
+                               mark_buffer_dirty(bh);
+                       }
+               }
+
+               block_start = block_end;
+               bh = bh->b_this_page;
+       } while (bh != head);
+}
+EXPORT_SYMBOL(page_zero_new_buffers);
+
+static int __block_prepare_write(struct inode *inode, struct page *page,
+               unsigned from, unsigned to, get_block_t *get_block)
+{
+       unsigned block_start, block_end;
        sector_t block;
        int err = 0;
        unsigned blocksize, bbits;
@@ -1950,30 +1876,23 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                if (buffer_new(bh))
                        clear_buffer_new(bh);
                if (!buffer_mapped(bh)) {
+                       WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
-                               goto out;
+                               break;
                        if (buffer_new(bh)) {
-                               clear_buffer_new(bh);
                                unmap_underlying_metadata(bh->b_bdev,
                                                        bh->b_blocknr);
                                if (PageUptodate(page)) {
+                                       clear_buffer_new(bh);
                                        set_buffer_uptodate(bh);
+                                       mark_buffer_dirty(bh);
                                        continue;
                                }
-                               if (block_end > to || block_start < from) {
-                                       void *kaddr;
-
-                                       kaddr = kmap_atomic(page, KM_USER0);
-                                       if (block_end > to)
-                                               memset(kaddr+to, 0,
-                                                       block_end-to);
-                                       if (block_start < from)
-                                               memset(kaddr+block_start,
-                                                       0, from-block_start);
-                                       flush_dcache_page(page);
-                                       kunmap_atomic(kaddr, KM_USER0);
-                               }
+                               if (block_end > to || block_start < from)
+                                       zero_user_segments(page,
+                                               to, block_end,
+                                               block_start, from);
                                continue;
                        }
                }
@@ -1983,6 +1902,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                        continue; 
                }
                if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+                   !buffer_unwritten(bh) &&
                     (block_start < from || block_end > to)) {
                        ll_rw_block(READ, 1, &bh);
                        *wait_bh++=bh;
@@ -1994,37 +1914,10 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
        while(wait_bh > wait) {
                wait_on_buffer(*--wait_bh);
                if (!buffer_uptodate(*wait_bh))
-                       return -EIO;
+                       err = -EIO;
        }
-       return 0;
-out:
-       /*
-        * Zero out any newly allocated blocks to avoid exposing stale
-        * data.  If BH_New is set, we know that the block was newly
-        * allocated in the above loop.
-        */
-       bh = head;
-       block_start = 0;
-       do {
-               block_end = block_start+blocksize;
-               if (block_end <= from)
-                       goto next_bh;
-               if (block_start >= to)
-                       break;
-               if (buffer_new(bh)) {
-                       void *kaddr;
-
-                       clear_buffer_new(bh);
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       memset(kaddr+block_start, 0, bh->b_size);
-                       kunmap_atomic(kaddr, KM_USER0);
-                       set_buffer_uptodate(bh);
-                       mark_buffer_dirty(bh);
-               }
-next_bh:
-               block_start = block_end;
-               bh = bh->b_this_page;
-       } while (bh != head);
+       if (unlikely(err))
+               page_zero_new_buffers(page, from, to);
        return err;
 }
 
@@ -2049,6 +1942,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
                        set_buffer_uptodate(bh);
                        mark_buffer_dirty(bh);
                }
+               clear_buffer_new(bh);
        }
 
        /*
@@ -2063,6 +1957,185 @@ static int __block_commit_write(struct inode *inode, struct page *page,
 }
 
 /*
+ * block_write_begin takes care of the basic task of block allocation and
+ * bringing partial write blocks uptodate first.
+ *
+ * If *pagep is not NULL, then block_write_begin uses the locked page
+ * at *pagep rather than allocating its own. In this case, the page will
+ * not be unlocked or deallocated on failure.
+ */
+int block_write_begin(struct file *file, struct address_space *mapping,
+                       loff_t pos, unsigned len, unsigned flags,
+                       struct page **pagep, void **fsdata,
+                       get_block_t *get_block)
+{
+       struct inode *inode = mapping->host;
+       int status = 0;
+       struct page *page;
+       pgoff_t index;
+       unsigned start, end;
+       int ownpage = 0;
+
+       index = pos >> PAGE_CACHE_SHIFT;
+       start = pos & (PAGE_CACHE_SIZE - 1);
+       end = start + len;
+
+       page = *pagep;
+       if (page == NULL) {
+               ownpage = 1;
+               page = grab_cache_page_write_begin(mapping, index, flags);
+               if (!page) {
+                       status = -ENOMEM;
+                       goto out;
+               }
+               *pagep = page;
+       } else
+               BUG_ON(!PageLocked(page));
+
+       status = __block_prepare_write(inode, page, start, end, get_block);
+       if (unlikely(status)) {
+               ClearPageUptodate(page);
+
+               if (ownpage) {
+                       unlock_page(page);
+                       page_cache_release(page);
+                       *pagep = NULL;
+
+                       /*
+                        * prepare_write() may have instantiated a few blocks
+                        * outside i_size.  Trim these off again. Don't need
+                        * i_size_read because we hold i_mutex.
+                        */
+                       if (pos + len > inode->i_size)
+                               vmtruncate(inode, inode->i_size);
+               }
+       }
+
+out:
+       return status;
+}
+EXPORT_SYMBOL(block_write_begin);
+
+int block_write_end(struct file *file, struct address_space *mapping,
+                       loff_t pos, unsigned len, unsigned copied,
+                       struct page *page, void *fsdata)
+{
+       struct inode *inode = mapping->host;
+       unsigned start;
+
+       start = pos & (PAGE_CACHE_SIZE - 1);
+
+       if (unlikely(copied < len)) {
+               /*
+                * The buffers that were written will now be uptodate, so we
+                * don't have to worry about a readpage reading them and
+                * overwriting a partial write. However if we have encountered
+                * a short write and only partially written into a buffer, it
+                * will not be marked uptodate, so a readpage might come in and
+                * destroy our partial write.
+                *
+                * Do the simplest thing, and just treat any short write to a
+                * non uptodate page as a zero-length write, and force the
+                * caller to redo the whole thing.
+                */
+               if (!PageUptodate(page))
+                       copied = 0;
+
+               page_zero_new_buffers(page, start+copied, start+len);
+       }
+       flush_dcache_page(page);
+
+       /* This could be a short (even 0-length) commit */
+       __block_commit_write(inode, page, start, start+copied);
+
+       return copied;
+}
+EXPORT_SYMBOL(block_write_end);
+
+int generic_write_end(struct file *file, struct address_space *mapping,
+                       loff_t pos, unsigned len, unsigned copied,
+                       struct page *page, void *fsdata)
+{
+       struct inode *inode = mapping->host;
+       int i_size_changed = 0;
+
+       copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+
+       /*
+        * No need to use i_size_read() here, the i_size
+        * cannot change under us because we hold i_mutex.
+        *
+        * But it's important to update i_size while still holding page lock:
+        * page writeout could otherwise come in and zero beyond i_size.
+        */
+       if (pos+copied > inode->i_size) {
+               i_size_write(inode, pos+copied);
+               i_size_changed = 1;
+       }
+
+       unlock_page(page);
+       page_cache_release(page);
+
+       /*
+        * Don't mark the inode dirty under page lock. First, it unnecessarily
+        * makes the holding time of page lock longer. Second, it forces lock
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+       if (i_size_changed)
+               mark_inode_dirty(inode);
+
+       return copied;
+}
+EXPORT_SYMBOL(generic_write_end);
+
+/*
+ * block_is_partially_uptodate checks whether buffers within a page are
+ * uptodate or not.
+ *
+ * Returns true if all buffers which correspond to a file portion
+ * we want to read are uptodate.
+ */
+int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
+                                       unsigned long from)
+{
+       struct inode *inode = page->mapping->host;
+       unsigned block_start, block_end, blocksize;
+       unsigned to;
+       struct buffer_head *bh, *head;
+       int ret = 1;
+
+       if (!page_has_buffers(page))
+               return 0;
+
+       blocksize = 1 << inode->i_blkbits;
+       to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
+       to = from + to;
+       if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
+               return 0;
+
+       head = page_buffers(page);
+       bh = head;
+       block_start = 0;
+       do {
+               block_end = block_start + blocksize;
+               if (block_end > from && block_start < to) {
+                       if (!buffer_uptodate(bh)) {
+                               ret = 0;
+                               break;
+                       }
+                       if (block_end >= to)
+                               break;
+               }
+               block_start = block_end;
+               bh = bh->b_this_page;
+       } while (bh != head);
+
+       return ret;
+}
+EXPORT_SYMBOL(block_is_partially_uptodate);
+
+/*
  * Generic "read page" function for block devices that have the normal
  * get_block functionality. This is most of the block device filesystems.
  * Reads the page asynchronously --- the unlock_buffer() and
@@ -2078,8 +2151,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
        int nr, i;
        int fully_mapped = 1;
 
-       if (!PageLocked(page))
-               PAGE_BUG(page);
+       BUG_ON(!PageLocked(page));
        blocksize = 1 << inode->i_blkbits;
        if (!page_has_buffers(page))
                create_empty_buffers(page, blocksize, 0);
@@ -2096,17 +2168,19 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                        continue;
 
                if (!buffer_mapped(bh)) {
+                       int err = 0;
+
                        fully_mapped = 0;
                        if (iblock < lblock) {
-                               if (get_block(inode, iblock, bh, 0))
+                               WARN_ON(bh->b_size != blocksize);
+                               err = get_block(inode, iblock, bh, 0);
+                               if (err)
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
-                               void *kaddr = kmap_atomic(page, KM_USER0);
-                               memset(kaddr + i * blocksize, 0, blocksize);
-                               flush_dcache_page(page);
-                               kunmap_atomic(kaddr, KM_USER0);
-                               set_buffer_uptodate(bh);
+                               zero_user(page, i * blocksize, blocksize);
+                               if (!err)
+                                       set_buffer_uptodate(bh);
                                continue;
                        }
                        /*
@@ -2154,138 +2228,137 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
        }
        return 0;
 }
+EXPORT_SYMBOL(block_read_full_page);
 
 /* utility function for filesystems that need to do work on expanding
- * truncates.  Uses prepare/commit_write to allow the filesystem to
+ * truncates.  Uses filesystem pagecache writes to allow the filesystem to
  * deal with the hole.  
  */
-int generic_cont_expand(struct inode *inode, loff_t size)
+int generic_cont_expand_simple(struct inode *inode, loff_t size)
 {
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
-       unsigned long index, offset, limit;
+       void *fsdata;
        int err;
 
-       err = -EFBIG;
-        limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-       if (limit != RLIM_INFINITY && size > (loff_t)limit) {
-               send_sig(SIGXFSZ, current, 0);
+       err = inode_newsize_ok(inode, size);
+       if (err)
                goto out;
-       }
-       if (size > inode->i_sb->s_maxbytes)
+
+       err = pagecache_write_begin(NULL, mapping, size, 0,
+                               AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
+                               &page, &fsdata);
+       if (err)
                goto out;
 
-       offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
+       err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
+       BUG_ON(err > 0);
 
-       /* ugh.  in prepare/commit_write, if from==to==start of block, we 
-       ** skip the prepare.  make sure we never send an offset for the start
-       ** of a block
-       */
-       if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
-               offset++;
-       }
-       index = size >> PAGE_CACHE_SHIFT;
-       err = -ENOMEM;
-       page = grab_cache_page(mapping, index);
-       if (!page)
-               goto out;
-       err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
-       if (!err) {
-               err = mapping->a_ops->commit_write(NULL, page, offset, offset);
-       }
-       unlock_page(page);
-       page_cache_release(page);
-       if (err > 0)
-               err = 0;
 out:
        return err;
 }
+EXPORT_SYMBOL(generic_cont_expand_simple);
 
-/*
- * For moronic filesystems that do not allow holes in file.
- * We may have to extend the file.
- */
-
-int cont_prepare_write(struct page *page, unsigned offset,
-               unsigned to, get_block_t *get_block, loff_t *bytes)
+static int cont_expand_zero(struct file *file, struct address_space *mapping,
+                           loff_t pos, loff_t *bytes)
 {
-       struct address_space *mapping = page->mapping;
        struct inode *inode = mapping->host;
-       struct page *new_page;
-       pgoff_t pgpos;
-       long status;
-       unsigned zerofrom;
        unsigned blocksize = 1 << inode->i_blkbits;
-       void *kaddr;
+       struct page *page;
+       void *fsdata;
+       pgoff_t index, curidx;
+       loff_t curpos;
+       unsigned zerofrom, offset, len;
+       int err = 0;
 
-       while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
-               status = -ENOMEM;
-               new_page = grab_cache_page(mapping, pgpos);
-               if (!new_page)
-                       goto out;
-               /* we might sleep */
-               if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
-                       unlock_page(new_page);
-                       page_cache_release(new_page);
-                       continue;
-               }
-               zerofrom = *bytes & ~PAGE_CACHE_MASK;
+       index = pos >> PAGE_CACHE_SHIFT;
+       offset = pos & ~PAGE_CACHE_MASK;
+
+       while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
+               zerofrom = curpos & ~PAGE_CACHE_MASK;
                if (zerofrom & (blocksize-1)) {
                        *bytes |= (blocksize-1);
                        (*bytes)++;
                }
-               status = __block_prepare_write(inode, new_page, zerofrom,
-                                               PAGE_CACHE_SIZE, get_block);
-               if (status)
-                       goto out_unmap;
-               kaddr = kmap_atomic(new_page, KM_USER0);
-               memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
-               flush_dcache_page(new_page);
-               kunmap_atomic(kaddr, KM_USER0);
-               generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
-               unlock_page(new_page);
-               page_cache_release(new_page);
-       }
-
-       if (page->index < pgpos) {
-               /* completely inside the area */
-               zerofrom = offset;
-       } else {
-               /* page covers the boundary, find the boundary offset */
-               zerofrom = *bytes & ~PAGE_CACHE_MASK;
+               len = PAGE_CACHE_SIZE - zerofrom;
+
+               err = pagecache_write_begin(file, mapping, curpos, len,
+                                               AOP_FLAG_UNINTERRUPTIBLE,
+                                               &page, &fsdata);
+               if (err)
+                       goto out;
+               zero_user(page, zerofrom, len);
+               err = pagecache_write_end(file, mapping, curpos, len, len,
+                                               page, fsdata);
+               if (err < 0)
+                       goto out;
+               BUG_ON(err != len);
+               err = 0;
+
+               balance_dirty_pages_ratelimited(mapping);
+       }
 
+       /* page covers the boundary, find the boundary offset */
+       if (index == curidx) {
+               zerofrom = curpos & ~PAGE_CACHE_MASK;
                /* if we will expand the thing last block will be filled */
-               if (to > zerofrom && (zerofrom & (blocksize-1))) {
+               if (offset <= zerofrom) {
+                       goto out;
+               }
+               if (zerofrom & (blocksize-1)) {
                        *bytes |= (blocksize-1);
                        (*bytes)++;
                }
+               len = offset - zerofrom;
 
-               /* starting below the boundary? Nothing to zero out */
-               if (offset <= zerofrom)
-                       zerofrom = offset;
+               err = pagecache_write_begin(file, mapping, curpos, len,
+                                               AOP_FLAG_UNINTERRUPTIBLE,
+                                               &page, &fsdata);
+               if (err)
+                       goto out;
+               zero_user(page, zerofrom, len);
+               err = pagecache_write_end(file, mapping, curpos, len, len,
+                                               page, fsdata);
+               if (err < 0)
+                       goto out;
+               BUG_ON(err != len);
+               err = 0;
        }
-       status = __block_prepare_write(inode, page, zerofrom, to, get_block);
-       if (status)
-               goto out1;
-       if (zerofrom < offset) {
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr+zerofrom, 0, offset-zerofrom);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
-               __block_commit_write(inode, page, zerofrom, offset);
+out:
+       return err;
+}
+
+/*
+ * For moronic filesystems that do not allow holes in file.
+ * We may have to extend the file.
+ */
+int cont_write_begin(struct file *file, struct address_space *mapping,
+                       loff_t pos, unsigned len, unsigned flags,
+                       struct page **pagep, void **fsdata,
+                       get_block_t *get_block, loff_t *bytes)
+{
+       struct inode *inode = mapping->host;
+       unsigned blocksize = 1 << inode->i_blkbits;
+       unsigned zerofrom;
+       int err;
+
+       err = cont_expand_zero(file, mapping, pos, bytes);
+       if (err)
+               goto out;
+
+       zerofrom = *bytes & ~PAGE_CACHE_MASK;
+       if (pos+len > *bytes && zerofrom & (blocksize-1)) {
+               *bytes |= (blocksize-1);
+               (*bytes)++;
        }
-       return 0;
-out1:
-       ClearPageUptodate(page);
-       return status;
 
-out_unmap:
-       ClearPageUptodate(new_page);
-       unlock_page(new_page);
-       page_cache_release(new_page);
+       *pagep = NULL;
+       err = block_write_begin(file, mapping, pos, len,
+                               flags, pagep, fsdata, get_block);
 out:
-       return status;
+       return err;
 }
+EXPORT_SYMBOL(cont_write_begin);
 
 int block_prepare_write(struct page *page, unsigned from, unsigned to,
                        get_block_t *get_block)
@@ -2296,6 +2369,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
                ClearPageUptodate(page);
        return err;
 }
+EXPORT_SYMBOL(block_prepare_write);
 
 int block_commit_write(struct page *page, unsigned from, unsigned to)
 {
@@ -2303,157 +2377,213 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
        __block_commit_write(inode,page,from,to);
        return 0;
 }
+EXPORT_SYMBOL(block_commit_write);
 
-int generic_commit_write(struct file *file, struct page *page,
-               unsigned from, unsigned to)
-{
-       struct inode *inode = page->mapping->host;
-       loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-       __block_commit_write(inode,page,from,to);
-       /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_sem.
-        */
-       if (pos > inode->i_size) {
-               i_size_write(inode, pos);
-               mark_inode_dirty(inode);
+/*
+ * block_page_mkwrite() is not allowed to change the file size as it gets
+ * called from a page fault handler when a page is first dirtied. Hence we must
+ * be careful to check for EOF conditions here. We set the page up correctly
+ * for a written page which means we get ENOSPC checking when writing into
+ * holes and correct delalloc and unwritten extent mapping on filesystems that
+ * support these features.
+ *
+ * We are not allowed to take the i_mutex here so we have to play games to
+ * protect against truncate races as the page could now be beyond EOF.  Because
+ * vmtruncate() writes the inode size before removing pages, once we have the
+ * page lock we can determine safely if the page is beyond EOF. If it is not
+ * beyond EOF, then the page is guaranteed safe against truncation until we
+ * unlock the page.
+ */
+int
+block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                  get_block_t get_block)
+{
+       struct page *page = vmf->page;
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       unsigned long end;
+       loff_t size;
+       int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
+
+       lock_page(page);
+       size = i_size_read(inode);
+       if ((page->mapping != inode->i_mapping) ||
+           (page_offset(page) > size)) {
+               /* page got truncated out from underneath us */
+               unlock_page(page);
+               goto out;
        }
-       return 0;
-}
 
+       /* page is wholly or partially inside EOF */
+       if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
+               end = size & ~PAGE_CACHE_MASK;
+       else
+               end = PAGE_CACHE_SIZE;
+
+       ret = block_prepare_write(page, 0, end, get_block);
+       if (!ret)
+               ret = block_commit_write(page, 0, end);
+
+       if (unlikely(ret)) {
+               unlock_page(page);
+               if (ret == -ENOMEM)
+                       ret = VM_FAULT_OOM;
+               else /* -ENOSPC, -EIO, etc */
+                       ret = VM_FAULT_SIGBUS;
+       } else
+               ret = VM_FAULT_LOCKED;
+
+out:
+       return ret;
+}
+EXPORT_SYMBOL(block_page_mkwrite);
 
 /*
- * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
+ * nobh_write_begin()'s prereads are special: the buffer_heads are freed
  * immediately, while under the page lock.  So it needs a special end_io
  * handler which does not touch the bh after unlocking it.
- *
- * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
- * a race there is benign: unlock_buffer() only use the bh's address for
- * hashing after unlocking the buffer, so it doesn't actually touch the bh
- * itself.
  */
 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
 {
-       if (uptodate) {
-               set_buffer_uptodate(bh);
-       } else {
-               /* This happens, due to failed READA attempts. */
-               clear_buffer_uptodate(bh);
-       }
-       unlock_buffer(bh);
+       __end_buffer_read_notouch(bh, uptodate);
+}
+
+/*
+ * Attach the singly-linked list of buffers created by nobh_write_begin, to
+ * the page (converting it to circular linked list and taking care of page
+ * dirty races).
+ */
+static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
+{
+       struct buffer_head *bh;
+
+       BUG_ON(!PageLocked(page));
+
+       spin_lock(&page->mapping->private_lock);
+       bh = head;
+       do {
+               if (PageDirty(page))
+                       set_buffer_dirty(bh);
+               if (!bh->b_this_page)
+                       bh->b_this_page = head;
+               bh = bh->b_this_page;
+       } while (bh != head);
+       attach_page_buffers(page, head);
+       spin_unlock(&page->mapping->private_lock);
 }
 
 /*
  * On entry, the page is fully not uptodate.
  * On exit the page is fully uptodate in the areas outside (from,to)
  */
-int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
+int nobh_write_begin(struct file *file, struct address_space *mapping,
+                       loff_t pos, unsigned len, unsigned flags,
+                       struct page **pagep, void **fsdata,
                        get_block_t *get_block)
 {
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = mapping->host;
        const unsigned blkbits = inode->i_blkbits;
        const unsigned blocksize = 1 << blkbits;
-       struct buffer_head map_bh;
-       struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
+       struct buffer_head *head, *bh;
+       struct page *page;
+       pgoff_t index;
+       unsigned from, to;
        unsigned block_in_page;
-       unsigned block_start;
+       unsigned block_start, block_end;
        sector_t block_in_file;
-       char *kaddr;
        int nr_reads = 0;
-       int i;
        int ret = 0;
        int is_mapped_to_disk = 1;
-       int dirtied_it = 0;
+
+       index = pos >> PAGE_CACHE_SHIFT;
+       from = pos & (PAGE_CACHE_SIZE - 1);
+       to = from + len;
+
+       page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+       *fsdata = NULL;
+
+       if (page_has_buffers(page)) {
+               unlock_page(page);
+               page_cache_release(page);
+               *pagep = NULL;
+               return block_write_begin(file, mapping, pos, len, flags, pagep,
+                                       fsdata, get_block);
+       }
 
        if (PageMappedToDisk(page))
                return 0;
 
+       /*
+        * Allocate buffers so that we can keep track of state, and potentially
+        * attach them to the page if an error occurs. In the common case of
+        * no error, they will just be freed again without ever being attached
+        * to the page (which is all OK, because we're under the page lock).
+        *
+        * Be careful: the buffer linked list is a NULL terminated one, rather
+        * than the circular one we're used to.
+        */
+       head = alloc_page_buffers(page, blocksize, 0);
+       if (!head) {
+               ret = -ENOMEM;
+               goto out_release;
+       }
+
        block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
-       map_bh.b_page = page;
 
        /*
         * We loop across all blocks in the page, whether or not they are
         * part of the affected region.  This is so we can discover if the
         * page is fully mapped-to-disk.
         */
-       for (block_start = 0, block_in_page = 0;
+       for (block_start = 0, block_in_page = 0, bh = head;
                  block_start < PAGE_CACHE_SIZE;
-                 block_in_page++, block_start += blocksize) {
-               unsigned block_end = block_start + blocksize;
+                 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
                int create;
 
-               map_bh.b_state = 0;
+               block_end = block_start + blocksize;
+               bh->b_state = 0;
                create = 1;
                if (block_start >= to)
                        create = 0;
                ret = get_block(inode, block_in_file + block_in_page,
-                                       &map_bh, create);
+                                       bh, create);
                if (ret)
                        goto failed;
-               if (!buffer_mapped(&map_bh))
+               if (!buffer_mapped(bh))
                        is_mapped_to_disk = 0;
-               if (buffer_new(&map_bh))
-                       unmap_underlying_metadata(map_bh.b_bdev,
-                                                       map_bh.b_blocknr);
-               if (PageUptodate(page))
+               if (buffer_new(bh))
+                       unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+               if (PageUptodate(page)) {
+                       set_buffer_uptodate(bh);
                        continue;
-               if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       if (block_start < from) {
-                               memset(kaddr+block_start, 0, from-block_start);
-                               dirtied_it = 1;
-                       }
-                       if (block_end > to) {
-                               memset(kaddr + to, 0, block_end - to);
-                               dirtied_it = 1;
-                       }
-                       flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_USER0);
+               }
+               if (buffer_new(bh) || !buffer_mapped(bh)) {
+                       zero_user_segments(page, block_start, from,
+                                                       to, block_end);
                        continue;
                }
-               if (buffer_uptodate(&map_bh))
+               if (buffer_uptodate(bh))
                        continue;       /* reiserfs does this */
                if (block_start < from || block_end > to) {
-                       struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
-
-                       if (!bh) {
-                               ret = -ENOMEM;
-                               goto failed;
-                       }
-                       bh->b_state = map_bh.b_state;
-                       atomic_set(&bh->b_count, 0);
-                       bh->b_this_page = NULL;
-                       bh->b_page = page;
-                       bh->b_blocknr = map_bh.b_blocknr;
-                       bh->b_size = blocksize;
-                       bh->b_data = (char *)(long)block_start;
-                       bh->b_bdev = map_bh.b_bdev;
-                       bh->b_private = NULL;
-                       read_bh[nr_reads++] = bh;
+                       lock_buffer(bh);
+                       bh->b_end_io = end_buffer_read_nobh;
+                       submit_bh(READ, bh);
+                       nr_reads++;
                }
        }
 
        if (nr_reads) {
-               struct buffer_head *bh;
-
                /*
                 * The page is locked, so these buffers are protected from
                 * any VM or truncate activity.  Hence we don't need to care
                 * for the buffer_head refcounts.
                 */
-               for (i = 0; i < nr_reads; i++) {
-                       bh = read_bh[i];
-                       lock_buffer(bh);
-                       bh->b_end_io = end_buffer_read_nobh;
-                       submit_bh(READ, bh);
-               }
-               for (i = 0; i < nr_reads; i++) {
-                       bh = read_bh[i];
+               for (bh = head; bh; bh = bh->b_this_page) {
                        wait_on_buffer(bh);
                        if (!buffer_uptodate(bh))
                                ret = -EIO;
-                       free_buffer_head(bh);
-                       read_bh[i] = NULL;
                }
                if (ret)
                        goto failed;
@@ -2461,53 +2591,69 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
 
        if (is_mapped_to_disk)
                SetPageMappedToDisk(page);
-       SetPageUptodate(page);
 
-       /*
-        * Setting the page dirty here isn't necessary for the prepare_write
-        * function - commit_write will do that.  But if/when this function is
-        * used within the pagefault handler to ensure that all mmapped pages
-        * have backing space in the filesystem, we will need to dirty the page
-        * if its contents were altered.
-        */
-       if (dirtied_it)
-               set_page_dirty(page);
+       *fsdata = head; /* to be released by nobh_write_end */
 
        return 0;
 
 failed:
-       for (i = 0; i < nr_reads; i++) {
-               if (read_bh[i])
-                       free_buffer_head(read_bh[i]);
-       }
-
+       BUG_ON(!ret);
        /*
-        * Error recovery is pretty slack.  Clear the page and mark it dirty
-        * so we'll later zero out any blocks which _were_ allocated.
+        * Error recovery is a bit difficult. We need to zero out blocks that
+        * were newly allocated, and dirty them to ensure they get written out.
+        * Buffers need to be attached to the page at this point, otherwise
+        * the handling of potential IO errors during writeout would be hard
+        * (could try doing synchronous writeout, but what if that fails too?)
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr, 0, PAGE_CACHE_SIZE);
-       kunmap_atomic(kaddr, KM_USER0);
-       SetPageUptodate(page);
-       set_page_dirty(page);
+       attach_nobh_buffers(page, head);
+       page_zero_new_buffers(page, from, to);
+
+out_release:
+       unlock_page(page);
+       page_cache_release(page);
+       *pagep = NULL;
+
+       if (pos + len > inode->i_size)
+               vmtruncate(inode, inode->i_size);
+
        return ret;
 }
-EXPORT_SYMBOL(nobh_prepare_write);
+EXPORT_SYMBOL(nobh_write_begin);
 
-int nobh_commit_write(struct file *file, struct page *page,
-               unsigned from, unsigned to)
+int nobh_write_end(struct file *file, struct address_space *mapping,
+                       loff_t pos, unsigned len, unsigned copied,
+                       struct page *page, void *fsdata)
 {
        struct inode *inode = page->mapping->host;
-       loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+       struct buffer_head *head = fsdata;
+       struct buffer_head *bh;
+       BUG_ON(fsdata != NULL && page_has_buffers(page));
 
+       if (unlikely(copied < len) && head)
+               attach_nobh_buffers(page, head);
+       if (page_has_buffers(page))
+               return generic_write_end(file, mapping, pos, len,
+                                       copied, page, fsdata);
+
+       SetPageUptodate(page);
        set_page_dirty(page);
-       if (pos > inode->i_size) {
-               i_size_write(inode, pos);
+       if (pos+copied > inode->i_size) {
+               i_size_write(inode, pos+copied);
                mark_inode_dirty(inode);
        }
-       return 0;
+
+       unlock_page(page);
+       page_cache_release(page);
+
+       while (head) {
+               bh = head;
+               head = head->b_this_page;
+               free_buffer_head(bh);
+       }
+
+       return copied;
 }
-EXPORT_SYMBOL(nobh_commit_write);
+EXPORT_SYMBOL(nobh_write_end);
 
 /*
  * nobh_writepage() - based on block_full_write_page() except
@@ -2521,7 +2667,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
        unsigned offset;
-       void *kaddr;
        int ret;
 
        /* Is the page fully inside i_size? */
@@ -2552,54 +2697,91 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
-               ret = __block_write_full_page(inode, page, get_block, wbc);
+               ret = __block_write_full_page(inode, page, get_block, wbc,
+                                             end_buffer_async_write);
        return ret;
 }
 EXPORT_SYMBOL(nobh_writepage);
 
-/*
- * This function assumes that ->prepare_write() uses nobh_prepare_write().
- */
-int nobh_truncate_page(struct address_space *mapping, loff_t from)
+int nobh_truncate_page(struct address_space *mapping,
+                       loff_t from, get_block_t *get_block)
 {
-       struct inode *inode = mapping->host;
-       unsigned blocksize = 1 << inode->i_blkbits;
        pgoff_t index = from >> PAGE_CACHE_SHIFT;
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
-       unsigned to;
+       unsigned blocksize;
+       sector_t iblock;
+       unsigned length, pos;
+       struct inode *inode = mapping->host;
        struct page *page;
-       struct address_space_operations *a_ops = mapping->a_ops;
-       char *kaddr;
-       int ret = 0;
+       struct buffer_head map_bh;
+       int err;
 
-       if ((offset & (blocksize - 1)) == 0)
-               goto out;
+       blocksize = 1 << inode->i_blkbits;
+       length = offset & (blocksize - 1);
+
+       /* Block boundary? Nothing to do */
+       if (!length)
+               return 0;
+
+       length = blocksize - length;
+       iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
 
-       ret = -ENOMEM;
        page = grab_cache_page(mapping, index);
+       err = -ENOMEM;
        if (!page)
                goto out;
 
-       to = (offset + blocksize) & ~(blocksize - 1);
-       ret = a_ops->prepare_write(NULL, page, offset, to);
-       if (ret == 0) {
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
-               set_page_dirty(page);
+       if (page_has_buffers(page)) {
+has_buffers:
+               unlock_page(page);
+               page_cache_release(page);
+               return block_truncate_page(mapping, from, get_block);
        }
+
+       /* Find the buffer that contains "offset" */
+       pos = blocksize;
+       while (offset >= pos) {
+               iblock++;
+               pos += blocksize;
+       }
+
+       map_bh.b_size = blocksize;
+       map_bh.b_state = 0;
+       err = get_block(inode, iblock, &map_bh, 0);
+       if (err)
+               goto unlock;
+       /* unmapped? It's a hole - nothing to do */
+       if (!buffer_mapped(&map_bh))
+               goto unlock;
+
+       /* Ok, it's mapped. Make sure it's up-to-date */
+       if (!PageUptodate(page)) {
+               err = mapping->a_ops->readpage(NULL, page);
+               if (err) {
+                       page_cache_release(page);
+                       goto out;
+               }
+               lock_page(page);
+               if (!PageUptodate(page)) {
+                       err = -EIO;
+                       goto unlock;
+               }
+               if (page_has_buffers(page))
+                       goto has_buffers;
+       }
+       zero_user(page, offset, length);
+       set_page_dirty(page);
+       err = 0;
+
+unlock:
        unlock_page(page);
        page_cache_release(page);
 out:
-       return ret;
+       return err;
 }
 EXPORT_SYMBOL(nobh_truncate_page);
 
@@ -2609,12 +2791,11 @@ int block_truncate_page(struct address_space *mapping,
        pgoff_t index = from >> PAGE_CACHE_SHIFT;
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
        unsigned blocksize;
-       pgoff_t iblock;
+       sector_t iblock;
        unsigned length, pos;
        struct inode *inode = mapping->host;
        struct page *page;
        struct buffer_head *bh;
-       void *kaddr;
        int err;
 
        blocksize = 1 << inode->i_blkbits;
@@ -2625,7 +2806,7 @@ int block_truncate_page(struct address_space *mapping,
                return 0;
 
        length = blocksize - length;
-       iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
        
        page = grab_cache_page(mapping, index);
        err = -ENOMEM;
@@ -2646,6 +2827,7 @@ int block_truncate_page(struct address_space *mapping,
 
        err = 0;
        if (!buffer_mapped(bh)) {
+               WARN_ON(bh->b_size != blocksize);
                err = get_block(inode, iblock, bh, 0);
                if (err)
                        goto unlock;
@@ -2658,7 +2840,7 @@ int block_truncate_page(struct address_space *mapping,
        if (PageUptodate(page))
                set_buffer_uptodate(bh);
 
-       if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
+       if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
                err = -EIO;
                ll_rw_block(READ, 1, &bh);
                wait_on_buffer(bh);
@@ -2667,11 +2849,7 @@ int block_truncate_page(struct address_space *mapping,
                        goto unlock;
        }
 
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, length);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
-
+       zero_user(page, offset, length);
        mark_buffer_dirty(bh);
        err = 0;
 
@@ -2681,22 +2859,24 @@ unlock:
 out:
        return err;
 }
+EXPORT_SYMBOL(block_truncate_page);
 
 /*
  * The generic ->writepage function for buffer-backed address_spaces
+ * this form passes in the end_io handler used to finish the IO.
  */
-int block_write_full_page(struct page *page, get_block_t *get_block,
-                       struct writeback_control *wbc)
+int block_write_full_page_endio(struct page *page, get_block_t *get_block,
+                       struct writeback_control *wbc, bh_end_io_t *handler)
 {
        struct inode * const inode = page->mapping->host;
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
        unsigned offset;
-       void *kaddr;
 
        /* Is the page fully inside i_size? */
        if (page->index < end_index)
-               return __block_write_full_page(inode, page, get_block, wbc);
+               return __block_write_full_page(inode, page, get_block, wbc,
+                                              handler);
 
        /* Is the page fully outside i_size? (truncate in progress) */
        offset = i_size & (PAGE_CACHE_SIZE-1);
@@ -2706,7 +2886,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
                 * they may have been added in ext3_writepage().  Make them
                 * freeable here, so the page does not leak.
                 */
-               block_invalidatepage(page, 0);
+               do_invalidatepage(page, 0);
                unlock_page(page);
                return 0; /* don't care */
        }
@@ -2718,12 +2898,21 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
-       return __block_write_full_page(inode, page, get_block, wbc);
+       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+       return __block_write_full_page(inode, page, get_block, wbc, handler);
+}
+EXPORT_SYMBOL(block_write_full_page_endio);
+
+/*
+ * The generic ->writepage function for buffer-backed address_spaces
+ */
+int block_write_full_page(struct page *page, get_block_t *get_block,
+                       struct writeback_control *wbc)
+{
+       return block_write_full_page_endio(page, get_block, wbc,
+                                          end_buffer_async_write);
 }
+EXPORT_SYMBOL(block_write_full_page);
 
 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
                            get_block_t *get_block)
@@ -2732,25 +2921,26 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
        struct inode *inode = mapping->host;
        tmp.b_state = 0;
        tmp.b_blocknr = 0;
+       tmp.b_size = 1 << inode->i_blkbits;
        get_block(inode, block, &tmp, 0);
        return tmp.b_blocknr;
 }
+EXPORT_SYMBOL(generic_block_bmap);
 
-static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
+static void end_bio_bh_io_sync(struct bio *bio, int err)
 {
        struct buffer_head *bh = bio->bi_private;
 
-       if (bio->bi_size)
-               return 1;
-
        if (err == -EOPNOTSUPP) {
                set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
                set_bit(BH_Eopnotsupp, &bh->b_state);
        }
 
+       if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
+               set_bit(BH_Quiet, &bh->b_state);
+
        bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
        bio_put(bio);
-       return 0;
 }
 
 int submit_bh(int rw, struct buffer_head * bh)
@@ -2761,15 +2951,20 @@ int submit_bh(int rw, struct buffer_head * bh)
        BUG_ON(!buffer_locked(bh));
        BUG_ON(!buffer_mapped(bh));
        BUG_ON(!bh->b_end_io);
+       BUG_ON(buffer_delay(bh));
+       BUG_ON(buffer_unwritten(bh));
 
-       if (buffer_ordered(bh) && (rw == WRITE))
-               rw = WRITE_BARRIER;
+       /*
+        * Mask in barrier bit for a write (could be either a WRITE or a
+        * WRITE_SYNC
+        */
+       if (buffer_ordered(bh) && (rw & WRITE))
+               rw |= WRITE_BARRIER;
 
        /*
-        * Only clear out a write error when rewriting, should this
-        * include WRITE_SYNC as well?
+        * Only clear out a write error when rewriting
         */
-       if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
+       if (test_set_buffer_req(bh) && (rw & WRITE))
                clear_buffer_write_io_error(bh);
 
        /*
@@ -2800,24 +2995,26 @@ int submit_bh(int rw, struct buffer_head * bh)
        bio_put(bio);
        return ret;
 }
+EXPORT_SYMBOL(submit_bh);
 
 /**
  * ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
+ * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
  * @nr: number of &struct buffer_heads in the array
  * @bhs: array of pointers to &struct buffer_head
  *
- * ll_rw_block() takes an array of pointers to &struct buffer_heads,
- * and requests an I/O operation on them, either a %READ or a %WRITE.
- * The third %READA option is described in the documentation for
- * generic_make_request() which ll_rw_block() calls.
+ * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
+ * requests an I/O operation on them, either a %READ or a %WRITE.  The third
+ * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
+ * are sent to disk. The fourth %READA option is described in the documentation
+ * for generic_make_request() which ll_rw_block() calls.
  *
  * This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit), any buffer that appears to be clean when doing a
- * write request, and any buffer that appears to be up-to-date when doing
- * read request.  Further it marks as clean buffers that are processed for
- * writing (the buffer cache won't assume that they are actually clean until
- * the buffer gets unlocked).
+ * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
+ * clean when doing a write request, and any buffer that appears to be
+ * up-to-date when doing read request.  Further it marks as clean buffers that
+ * are processed for writing (the buffer cache won't assume that they are
+ * actually clean until the buffer gets unlocked).
  *
  * ll_rw_block sets b_end_io to simple completion handler that marks
  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2833,27 +3030,34 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
        for (i = 0; i < nr; i++) {
                struct buffer_head *bh = bhs[i];
 
-               if (test_set_buffer_locked(bh))
+               if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
+                       lock_buffer(bh);
+               else if (!trylock_buffer(bh))
                        continue;
 
-               get_bh(bh);
-               if (rw == WRITE) {
-                       bh->b_end_io = end_buffer_write_sync;
+               if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
+                   rw == SWRITE_SYNC_PLUG) {
                        if (test_clear_buffer_dirty(bh)) {
-                               submit_bh(WRITE, bh);
+                               bh->b_end_io = end_buffer_write_sync;
+                               get_bh(bh);
+                               if (rw == SWRITE_SYNC)
+                                       submit_bh(WRITE_SYNC, bh);
+                               else
+                                       submit_bh(WRITE, bh);
                                continue;
                        }
                } else {
-                       bh->b_end_io = end_buffer_read_sync;
                        if (!buffer_uptodate(bh)) {
+                               bh->b_end_io = end_buffer_read_sync;
+                               get_bh(bh);
                                submit_bh(rw, bh);
                                continue;
                        }
                }
                unlock_buffer(bh);
-               put_bh(bh);
        }
 }
+EXPORT_SYMBOL(ll_rw_block);
 
 /*
  * For a data-integrity writeout, we need to wait upon any in-progress I/O
@@ -2869,7 +3073,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
        if (test_clear_buffer_dirty(bh)) {
                get_bh(bh);
                bh->b_end_io = end_buffer_write_sync;
-               ret = submit_bh(WRITE, bh);
+               ret = submit_bh(WRITE_SYNC, bh);
                wait_on_buffer(bh);
                if (buffer_eopnotsupp(bh)) {
                        clear_buffer_eopnotsupp(bh);
@@ -2882,6 +3086,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
        }
        return ret;
 }
+EXPORT_SYMBOL(sync_dirty_buffer);
 
 /*
  * try_to_free_buffers() checks if all the buffers on this particular page
@@ -2917,7 +3122,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
 
        bh = head;
        do {
-               if (buffer_write_io_error(bh))
+               if (buffer_write_io_error(bh) && page->mapping)
                        set_bit(AS_EIO, &page->mapping->flags);
                if (buffer_busy(bh))
                        goto failed;
@@ -2927,7 +3132,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
        do {
                struct buffer_head *next = bh->b_this_page;
 
-               if (!list_empty(&bh->b_assoc_buffers))
+               if (bh->b_assoc_map)
                        __remove_assoc_queue(bh);
                bh = next;
        } while (bh != head);
@@ -2955,17 +3160,23 @@ int try_to_free_buffers(struct page *page)
 
        spin_lock(&mapping->private_lock);
        ret = drop_buffers(page, &buffers_to_free);
-       if (ret) {
-               /*
-                * If the filesystem writes its buffers by hand (eg ext3)
-                * then we can have clean buffers against a dirty page.  We
-                * clean the page here; otherwise later reattachment of buffers
-                * could encounter a non-uptodate page, which is unresolvable.
-                * This only applies in the rare case where try_to_free_buffers
-                * succeeds but the page is not freed.
-                */
-               clear_page_dirty(page);
-       }
+
+       /*
+        * If the filesystem writes its buffers by hand (eg ext3)
+        * then we can have clean buffers against a dirty page.  We
+        * clean the page here; otherwise the VM will never notice
+        * that the filesystem did any IO at all.
+        *
+        * Also, during truncate, discard_buffer will have marked all
+        * the page's buffers clean.  We discover that here and clean
+        * the page also.
+        *
+        * private_lock must be held over this entire operation in order
+        * to synchronise against __set_page_dirty_buffers and prevent the
+        * dirty bit from being lost.
+        */
+       if (ret)
+               cancel_dirty_page(page, PAGE_CACHE_SIZE);
        spin_unlock(&mapping->private_lock);
 out:
        if (buffers_to_free) {
@@ -2981,7 +3192,7 @@ out:
 }
 EXPORT_SYMBOL(try_to_free_buffers);
 
-int block_sync_page(struct page *page)
+void block_sync_page(struct page *page)
 {
        struct address_space *mapping;
 
@@ -2989,17 +3200,17 @@ int block_sync_page(struct page *page)
        mapping = page_mapping(page);
        if (mapping)
                blk_run_backing_dev(mapping->backing_dev_info, page);
-       return 0;
 }
+EXPORT_SYMBOL(block_sync_page);
 
 /*
  * There are no bdflush tunables left.  But distributions are
  * still running obsolete flush daemons, so we terminate them here.
  *
  * Use of bdflush() is deprecated and will be removed in a future kernel.
- * The `pdflush' kernel threads fully replace bdflush daemons and this call.
+ * The `flush-X' kernel threads fully replace bdflush daemons and this call.
  */
-asmlinkage long sys_bdflush(int func, long data)
+SYSCALL_DEFINE2(bdflush, int, func, long, data)
 {
        static int msg_count;
 
@@ -3022,7 +3233,7 @@ asmlinkage long sys_bdflush(int func, long data)
 /*
  * Buffer-head allocation
  */
-static kmem_cache_t *bh_cachep;
+static struct kmem_cache *bh_cachep;
 
 /*
  * Once the number of bh's in the machine exceeds this level, we start
@@ -3047,19 +3258,19 @@ static void recalc_bh_state(void)
        if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
                return;
        __get_cpu_var(bh_accounting).ratelimit = 0;
-       for_each_cpu(i)
+       for_each_online_cpu(i)
                tot += per_cpu(bh_accounting, i).nr;
        buffer_heads_over_limit = (tot > max_buffer_heads);
 }
        
-struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
+struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
        struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
        if (ret) {
-               preempt_disable();
-               __get_cpu_var(bh_accounting).nr++;
+               INIT_LIST_HEAD(&ret->b_assoc_buffers);
+               get_cpu_var(bh_accounting).nr++;
                recalc_bh_state();
-               preempt_enable();
+               put_cpu_var(bh_accounting);
        }
        return ret;
 }
@@ -3069,26 +3280,12 @@ void free_buffer_head(struct buffer_head *bh)
 {
        BUG_ON(!list_empty(&bh->b_assoc_buffers));
        kmem_cache_free(bh_cachep, bh);
-       preempt_disable();
-       __get_cpu_var(bh_accounting).nr--;
+       get_cpu_var(bh_accounting).nr--;
        recalc_bh_state();
-       preempt_enable();
+       put_cpu_var(bh_accounting);
 }
 EXPORT_SYMBOL(free_buffer_head);
 
-static void
-init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
-{
-       if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
-                           SLAB_CTOR_CONSTRUCTOR) {
-               struct buffer_head * bh = (struct buffer_head *)data;
-
-               memset(bh, 0, sizeof(*bh));
-               INIT_LIST_HEAD(&bh->b_assoc_buffers);
-       }
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
 static void buffer_exit_cpu(int cpu)
 {
        int i;
@@ -3098,16 +3295,71 @@ static void buffer_exit_cpu(int cpu)
                brelse(b->bhs[i]);
                b->bhs[i] = NULL;
        }
+       get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+       per_cpu(bh_accounting, cpu).nr = 0;
+       put_cpu_var(bh_accounting);
 }
 
 static int buffer_cpu_notify(struct notifier_block *self,
                              unsigned long action, void *hcpu)
 {
-       if (action == CPU_DEAD)
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
                buffer_exit_cpu((unsigned long)hcpu);
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
+
+/**
+ * bh_uptodate_or_lock - Test whether the buffer is uptodate
+ * @bh: struct buffer_head
+ *
+ * Return true if the buffer is up-to-date and false,
+ * with the buffer locked, if not.
+ */
+int bh_uptodate_or_lock(struct buffer_head *bh)
+{
+       if (!buffer_uptodate(bh)) {
+               lock_buffer(bh);
+               if (!buffer_uptodate(bh))
+                       return 0;
+               unlock_buffer(bh);
+       }
+       return 1;
+}
+EXPORT_SYMBOL(bh_uptodate_or_lock);
+
+/**
+ * bh_submit_read - Submit a locked buffer for reading
+ * @bh: struct buffer_head
+ *
+ * Returns zero on success and -EIO on error.
+ */
+int bh_submit_read(struct buffer_head *bh)
+{
+       BUG_ON(!buffer_locked(bh));
+
+       if (buffer_uptodate(bh)) {
+               unlock_buffer(bh);
+               return 0;
+       }
+
+       get_bh(bh);
+       bh->b_end_io = end_buffer_read_sync;
+       submit_bh(READ, bh);
+       wait_on_buffer(bh);
+       if (buffer_uptodate(bh))
+               return 0;
+       return -EIO;
+}
+EXPORT_SYMBOL(bh_submit_read);
+
+static void
+init_buffer_head(void *data)
+{
+       struct buffer_head *bh = data;
+
+       memset(bh, 0, sizeof(*bh));
+       INIT_LIST_HEAD(&bh->b_assoc_buffers);
+}
 
 void __init buffer_init(void)
 {
@@ -3115,7 +3367,9 @@ void __init buffer_init(void)
 
        bh_cachep = kmem_cache_create("buffer_head",
                        sizeof(struct buffer_head), 0,
-                       SLAB_PANIC, init_buffer_head, NULL);
+                               (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+                               SLAB_MEM_SPREAD),
+                               init_buffer_head);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL
@@ -3124,29 +3378,3 @@ void __init buffer_init(void)
        max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
        hotcpu_notifier(buffer_cpu_notify, 0);
 }
-
-EXPORT_SYMBOL(__bforget);
-EXPORT_SYMBOL(__brelse);
-EXPORT_SYMBOL(__wait_on_buffer);
-EXPORT_SYMBOL(block_commit_write);
-EXPORT_SYMBOL(block_prepare_write);
-EXPORT_SYMBOL(block_read_full_page);
-EXPORT_SYMBOL(block_sync_page);
-EXPORT_SYMBOL(block_truncate_page);
-EXPORT_SYMBOL(block_write_full_page);
-EXPORT_SYMBOL(cont_prepare_write);
-EXPORT_SYMBOL(end_buffer_async_write);
-EXPORT_SYMBOL(end_buffer_read_sync);
-EXPORT_SYMBOL(end_buffer_write_sync);
-EXPORT_SYMBOL(file_fsync);
-EXPORT_SYMBOL(fsync_bdev);
-EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_commit_write);
-EXPORT_SYMBOL(generic_cont_expand);
-EXPORT_SYMBOL(init_buffer);
-EXPORT_SYMBOL(invalidate_bdev);
-EXPORT_SYMBOL(ll_rw_block);
-EXPORT_SYMBOL(mark_buffer_dirty);
-EXPORT_SYMBOL(submit_bh);
-EXPORT_SYMBOL(sync_dirty_buffer);
-EXPORT_SYMBOL(unlock_buffer);