writeback: switch to per-bdi threads for flushing data
[safe/jmp/linux-2.6] / fs / buffer.c
index ac84cd1..90a9886 100644 (file)
@@ -76,8 +76,7 @@ EXPORT_SYMBOL(__lock_buffer);
 
 void unlock_buffer(struct buffer_head *bh)
 {
-       smp_mb__before_clear_bit();
-       clear_buffer_locked(bh);
+       clear_bit_unlock(BH_Lock, &bh->b_state);
        smp_mb__after_clear_bit();
        wake_up_bit(&bh->b_state, BH_Lock);
 }
@@ -100,10 +99,18 @@ __clear_page_buffers(struct page *page)
        page_cache_release(page);
 }
 
+
+static int quiet_error(struct buffer_head *bh)
+{
+       if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
+               return 0;
+       return 1;
+}
+
+
 static void buffer_io_error(struct buffer_head *bh)
 {
        char b[BDEVNAME_SIZE];
-
        printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
                        bdevname(bh->b_bdev, b),
                        (unsigned long long)bh->b_blocknr);
@@ -145,7 +152,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
+               if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
                        buffer_io_error(bh);
                        printk(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
@@ -159,95 +166,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 }
 
 /*
- * Write out and wait upon all the dirty data associated with a block
- * device via its mapping.  Does not take the superblock lock.
- */
-int sync_blockdev(struct block_device *bdev)
-{
-       int ret = 0;
-
-       if (bdev)
-               ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
-       return ret;
-}
-EXPORT_SYMBOL(sync_blockdev);
-
-/*
- * Write out and wait upon all dirty data associated with this
- * device.   Filesystem data as well as the underlying block
- * device.  Takes the superblock lock.
- */
-int fsync_bdev(struct block_device *bdev)
-{
-       struct super_block *sb = get_super(bdev);
-       if (sb) {
-               int res = fsync_super(sb);
-               drop_super(sb);
-               return res;
-       }
-       return sync_blockdev(bdev);
-}
-
-/**
- * freeze_bdev  --  lock a filesystem and force it into a consistent state
- * @bdev:      blockdevice to lock
- *
- * This takes the block device bd_mount_sem to make sure no new mounts
- * happen on bdev until thaw_bdev() is called.
- * If a superblock is found on this device, we take the s_umount semaphore
- * on it to make sure nobody unmounts until the snapshot creation is done.
- */
-struct super_block *freeze_bdev(struct block_device *bdev)
-{
-       struct super_block *sb;
-
-       down(&bdev->bd_mount_sem);
-       sb = get_super(bdev);
-       if (sb && !(sb->s_flags & MS_RDONLY)) {
-               sb->s_frozen = SB_FREEZE_WRITE;
-               smp_wmb();
-
-               __fsync_super(sb);
-
-               sb->s_frozen = SB_FREEZE_TRANS;
-               smp_wmb();
-
-               sync_blockdev(sb->s_bdev);
-
-               if (sb->s_op->write_super_lockfs)
-                       sb->s_op->write_super_lockfs(sb);
-       }
-
-       sync_blockdev(bdev);
-       return sb;      /* thaw_bdev releases s->s_umount and bd_mount_sem */
-}
-EXPORT_SYMBOL(freeze_bdev);
-
-/**
- * thaw_bdev  -- unlock filesystem
- * @bdev:      blockdevice to unlock
- * @sb:                associated superblock
- *
- * Unlocks the filesystem and marks it writeable again after freeze_bdev().
- */
-void thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
-       if (sb) {
-               BUG_ON(sb->s_bdev != bdev);
-
-               if (sb->s_op->unlockfs)
-                       sb->s_op->unlockfs(sb);
-               sb->s_frozen = SB_UNFROZEN;
-               smp_wmb();
-               wake_up(&sb->s_wait_unfrozen);
-               drop_super(sb);
-       }
-
-       up(&bdev->bd_mount_sem);
-}
-EXPORT_SYMBOL(thaw_bdev);
-
-/*
  * Various filesystems appear to want __find_get_block to be non-blocking.
  * But it's the page lock which protects the buffers.  To get around this,
  * we get exclusion from try_to_free_buffers with the blockdev mapping's
@@ -281,13 +199,13 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
        head = page_buffers(page);
        bh = head;
        do {
-               if (bh->b_blocknr == block) {
+               if (!buffer_mapped(bh))
+                       all_mapped = 0;
+               else if (bh->b_blocknr == block) {
                        ret = bh;
                        get_bh(bh);
                        goto out_unlock;
                }
-               if (!buffer_mapped(bh))
-                       all_mapped = 0;
                bh = bh->b_this_page;
        } while (bh != head);
 
@@ -360,18 +278,19 @@ void invalidate_bdev(struct block_device *bdev)
  */
 static void free_more_memory(void)
 {
-       struct zoneref *zrefs;
+       struct zone *zone;
        int nid;
 
-       wakeup_pdflush(1024);
+       wakeup_flusher_threads(1024);
        yield();
 
        for_each_online_node(nid) {
-               zrefs = first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
-                                               gfp_zone(GFP_NOFS));
-               if (zrefs->zone)
+               (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
+                                               gfp_zone(GFP_NOFS), NULL,
+                                               &zone);
+               if (zone)
                        try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
-                                               GFP_NOFS);
+                                               GFP_NOFS, NULL);
        }
 }
 
@@ -394,7 +313,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
                set_buffer_uptodate(bh);
        } else {
                clear_buffer_uptodate(bh);
-               if (printk_ratelimit())
+               if (!quiet_error(bh))
                        buffer_io_error(bh);
                SetPageError(page);
        }
@@ -441,7 +360,7 @@ still_busy:
  * Completion handler for block_write_full_page() - pages which are unlocked
  * during I/O, and which have PageWriteback cleared upon I/O completion.
  */
-static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 {
        char b[BDEVNAME_SIZE];
        unsigned long flags;
@@ -455,7 +374,7 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (printk_ratelimit()) {
+               if (!quiet_error(bh)) {
                        buffer_io_error(bh);
                        printk(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
@@ -519,11 +438,17 @@ static void mark_buffer_async_read(struct buffer_head *bh)
        set_buffer_async_read(bh);
 }
 
-void mark_buffer_async_write(struct buffer_head *bh)
+void mark_buffer_async_write_endio(struct buffer_head *bh,
+                                  bh_end_io_t *handler)
 {
-       bh->b_end_io = end_buffer_async_write;
+       bh->b_end_io = handler;
        set_buffer_async_write(bh);
 }
+
+void mark_buffer_async_write(struct buffer_head *bh)
+{
+       mark_buffer_async_write_endio(bh, end_buffer_async_write);
+}
 EXPORT_SYMBOL(mark_buffer_async_write);
 
 
@@ -579,7 +504,7 @@ EXPORT_SYMBOL(mark_buffer_async_write);
 /*
  * The buffer's backing address_space's private_lock must be held
  */
-static inline void __remove_assoc_queue(struct buffer_head *bh)
+static void __remove_assoc_queue(struct buffer_head *bh)
 {
        list_del_init(&bh->b_assoc_buffers);
        WARN_ON(!bh->b_assoc_map);
@@ -628,6 +553,46 @@ repeat:
        return err;
 }
 
+void do_thaw_all(struct work_struct *work)
+{
+       struct super_block *sb;
+       char b[BDEVNAME_SIZE];
+
+       spin_lock(&sb_lock);
+restart:
+       list_for_each_entry(sb, &super_blocks, s_list) {
+               sb->s_count++;
+               spin_unlock(&sb_lock);
+               down_read(&sb->s_umount);
+               while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
+                       printk(KERN_WARNING "Emergency Thaw on %s\n",
+                              bdevname(sb->s_bdev, b));
+               up_read(&sb->s_umount);
+               spin_lock(&sb_lock);
+               if (__put_super_and_need_restart(sb))
+                       goto restart;
+       }
+       spin_unlock(&sb_lock);
+       kfree(work);
+       printk(KERN_WARNING "Emergency Thaw complete\n");
+}
+
+/**
+ * emergency_thaw_all -- forcibly thaw every frozen filesystem
+ *
+ * Used for emergency unfreeze of all filesystems via SysRq
+ */
+void emergency_thaw_all(void)
+{
+       struct work_struct *work;
+
+       work = kmalloc(sizeof(*work), GFP_ATOMIC);
+       if (work) {
+               INIT_WORK(work, do_thaw_all);
+               schedule_work(work);
+       }
+}
+
 /**
  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
  * @mapping: the mapping which wants those buffers written
@@ -696,32 +661,18 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
  * If warn is true, then emit a warning if the page is not uptodate and has
  * not been truncated.
  */
-static int __set_page_dirty(struct page *page,
+static void __set_page_dirty(struct page *page,
                struct address_space *mapping, int warn)
 {
-       if (unlikely(!mapping))
-               return !TestSetPageDirty(page);
-
-       if (TestSetPageDirty(page))
-               return 0;
-
-       write_lock_irq(&mapping->tree_lock);
+       spin_lock_irq(&mapping->tree_lock);
        if (page->mapping) {    /* Race with truncate? */
                WARN_ON_ONCE(warn && !PageUptodate(page));
-
-               if (mapping_cap_account_dirty(mapping)) {
-                       __inc_zone_page_state(page, NR_FILE_DIRTY);
-                       __inc_bdi_stat(mapping->backing_dev_info,
-                                       BDI_RECLAIMABLE);
-                       task_io_account_write(PAGE_CACHE_SIZE);
-               }
+               account_page_dirtied(page, mapping);
                radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
        }
-       write_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irq(&mapping->tree_lock);
        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
-
-       return 1;
 }
 
 /*
@@ -751,6 +702,7 @@ static int __set_page_dirty(struct page *page,
  */
 int __set_page_dirty_buffers(struct page *page)
 {
+       int newly_dirty;
        struct address_space *mapping = page_mapping(page);
 
        if (unlikely(!mapping))
@@ -766,9 +718,12 @@ int __set_page_dirty_buffers(struct page *page)
                        bh = bh->b_this_page;
                } while (bh != head);
        }
+       newly_dirty = !TestSetPageDirty(page);
        spin_unlock(&mapping->private_lock);
 
-       return __set_page_dirty(page, mapping, 1);
+       if (newly_dirty)
+               __set_page_dirty(page, mapping, 1);
+       return newly_dirty;
 }
 EXPORT_SYMBOL(__set_page_dirty_buffers);
 
@@ -795,7 +750,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
-       struct address_space *mapping;
+       struct address_space *mapping, *prev_mapping = NULL;
        int err = 0, err2;
 
        INIT_LIST_HEAD(&tmp);
@@ -820,7 +775,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * contents - it is a noop if I/O is still in
                                 * flight on potentially older contents.
                                 */
-                               ll_rw_block(SWRITE, 1, &bh);
+                               ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+
+                               /*
+                                * Kick off IO for the previous mapping. Note
+                                * that we will not run the very last mapping,
+                                * wait_on_buffer() will do that for us
+                                * through sync_buffer().
+                                */
+                               if (prev_mapping && prev_mapping != mapping)
+                                       blk_run_address_space(prev_mapping);
+                               prev_mapping = mapping;
+
                                brelse(bh);
                                spin_lock(lock);
                        }
@@ -878,6 +844,7 @@ void invalidate_inode_buffers(struct inode *inode)
                spin_unlock(&buffer_mapping->private_lock);
        }
 }
+EXPORT_SYMBOL(invalidate_inode_buffers);
 
 /*
  * Remove any clean buffers from the inode's buffer list.  This is called
@@ -1100,7 +1067,7 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
 
                printk(KERN_ERR "%s: requested out-of-range block %llu for "
                        "device %s\n",
-                       __FUNCTION__, (unsigned long long)block,
+                       __func__, (unsigned long long)block,
                        bdevname(bdev, b));
                return -EIO;
        }
@@ -1118,12 +1085,12 @@ static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
        /* Size must be multiple of hard sectorsize */
-       if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
+       if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
                printk(KERN_ERR "getblk(): invalid block size %d requested\n",
                                        size);
-               printk(KERN_ERR "hardsect size: %d\n",
-                                       bdev_hardsect_size(bdev));
+               printk(KERN_ERR "logical block size: %d\n",
+                                       bdev_logical_block_size(bdev));
 
                dump_stack();
                return NULL;
@@ -1196,8 +1163,14 @@ void mark_buffer_dirty(struct buffer_head *bh)
                        return;
        }
 
-       if (!test_set_buffer_dirty(bh))
-               __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
+       if (!test_set_buffer_dirty(bh)) {
+               struct page *page = bh->b_page;
+               if (!TestSetPageDirty(page)) {
+                       struct address_space *mapping = page_mapping(page);
+                       if (mapping)
+                               __set_page_dirty(page, mapping, 0);
+               }
+       }
 }
 
 /*
@@ -1213,8 +1186,7 @@ void __brelse(struct buffer_head * buf)
                put_bh(buf);
                return;
        }
-       printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
-       WARN_ON(1);
+       WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
 }
 
 /*
@@ -1463,7 +1435,7 @@ static void invalidate_bh_lru(void *arg)
        
 void invalidate_bh_lrus(void)
 {
-       on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
+       on_each_cpu(invalidate_bh_lru, NULL, 1);
 }
 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
@@ -1640,9 +1612,20 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
  * locked buffer.   This only can happen if someone has written the buffer
  * directly, with submit_bh().  At the address_space level PageWriteback
  * prevents this contention from occurring.
+ *
+ * If block_write_full_page() is called with wbc->sync_mode ==
+ * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
+ * causes the writes to be flagged as synchronous writes, but the
+ * block device queue will NOT be unplugged, since usually many pages
+ * will be pushed to the out before the higher-level caller actually
+ * waits for the writes to be completed.  The various wait functions,
+ * such as wait_on_writeback_range() will ultimately call sync_page()
+ * which will ultimately call blk_run_backing_dev(), which will end up
+ * unplugging the device queue.
  */
 static int __block_write_full_page(struct inode *inode, struct page *page,
-                       get_block_t *get_block, struct writeback_control *wbc)
+                       get_block_t *get_block, struct writeback_control *wbc,
+                       bh_end_io_t *handler)
 {
        int err;
        sector_t block;
@@ -1650,6 +1633,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        struct buffer_head *bh, *head;
        const unsigned blocksize = 1 << inode->i_blkbits;
        int nr_underway = 0;
+       int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
+                       WRITE_SYNC_PLUG : WRITE);
 
        BUG_ON(!PageLocked(page));
 
@@ -1690,11 +1675,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                         */
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
-               } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+               } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
+                          buffer_dirty(bh)) {
                        WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
                                goto recover;
+                       clear_buffer_delay(bh);
                        if (buffer_new(bh)) {
                                /* blockdev mappings never come here */
                                clear_buffer_new(bh);
@@ -1718,12 +1705,12 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                 */
                if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
                        lock_buffer(bh);
-               } else if (test_set_buffer_locked(bh)) {
+               } else if (!trylock_buffer(bh)) {
                        redirty_page_for_writepage(wbc, page);
                        continue;
                }
                if (test_clear_buffer_dirty(bh)) {
-                       mark_buffer_async_write(bh);
+                       mark_buffer_async_write_endio(bh, handler);
                } else {
                        unlock_buffer(bh);
                }
@@ -1739,7 +1726,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
-                       submit_bh(WRITE, bh);
+                       submit_bh(write_op, bh);
                        nr_underway++;
                }
                bh = next;
@@ -1773,9 +1760,10 @@ recover:
        bh = head;
        /* Recovery: lock and submit the mapped buffers */
        do {
-               if (buffer_mapped(bh) && buffer_dirty(bh)) {
+               if (buffer_mapped(bh) && buffer_dirty(bh) &&
+                   !buffer_delay(bh)) {
                        lock_buffer(bh);
-                       mark_buffer_async_write(bh);
+                       mark_buffer_async_write_endio(bh, handler);
                } else {
                        /*
                         * The buffer may have been set dirty during
@@ -1792,7 +1780,7 @@ recover:
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
                        clear_buffer_dirty(bh);
-                       submit_bh(WRITE, bh);
+                       submit_bh(write_op, bh);
                        nr_underway++;
                }
                bh = next;
@@ -1985,7 +1973,7 @@ int block_write_begin(struct file *file, struct address_space *mapping,
        page = *pagep;
        if (page == NULL) {
                ownpage = 1;
-               page = __grab_cache_page(mapping, index);
+               page = grab_cache_page_write_begin(mapping, index, flags);
                if (!page) {
                        status = -ENOMEM;
                        goto out;
@@ -2011,7 +1999,6 @@ int block_write_begin(struct file *file, struct address_space *mapping,
                        if (pos + len > inode->i_size)
                                vmtruncate(inode, inode->i_size);
                }
-               goto out;
        }
 
 out:
@@ -2060,6 +2047,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
                        struct page *page, void *fsdata)
 {
        struct inode *inode = mapping->host;
+       int i_size_changed = 0;
 
        copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
 
@@ -2072,17 +2060,72 @@ int generic_write_end(struct file *file, struct address_space *mapping,
         */
        if (pos+copied > inode->i_size) {
                i_size_write(inode, pos+copied);
-               mark_inode_dirty(inode);
+               i_size_changed = 1;
        }
 
        unlock_page(page);
        page_cache_release(page);
 
+       /*
+        * Don't mark the inode dirty under page lock. First, it unnecessarily
+        * makes the holding time of page lock longer. Second, it forces lock
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+       if (i_size_changed)
+               mark_inode_dirty(inode);
+
        return copied;
 }
 EXPORT_SYMBOL(generic_write_end);
 
 /*
+ * block_is_partially_uptodate checks whether buffers within a page are
+ * uptodate or not.
+ *
+ * Returns true if all buffers which correspond to a file portion
+ * we want to read are uptodate.
+ */
+int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
+                                       unsigned long from)
+{
+       struct inode *inode = page->mapping->host;
+       unsigned block_start, block_end, blocksize;
+       unsigned to;
+       struct buffer_head *bh, *head;
+       int ret = 1;
+
+       if (!page_has_buffers(page))
+               return 0;
+
+       blocksize = 1 << inode->i_blkbits;
+       to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
+       to = from + to;
+       if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
+               return 0;
+
+       head = page_buffers(page);
+       bh = head;
+       block_start = 0;
+       do {
+               block_end = block_start + blocksize;
+               if (block_end > from && block_start < to) {
+                       if (!buffer_uptodate(bh)) {
+                               ret = 0;
+                               break;
+                       }
+                       if (block_end >= to)
+                               break;
+               }
+               block_start = block_end;
+               bh = bh->b_this_page;
+       } while (bh != head);
+
+       return ret;
+}
+EXPORT_SYMBOL(block_is_partially_uptodate);
+
+/*
  * Generic "read page" function for block devices that have the normal
  * get_block functionality. This is most of the block device filesystems.
  * Reads the page asynchronously --- the unlock_buffer() and
@@ -2210,8 +2253,8 @@ out:
        return err;
 }
 
-int cont_expand_zero(struct file *file, struct address_space *mapping,
-                       loff_t pos, loff_t *bytes)
+static int cont_expand_zero(struct file *file, struct address_space *mapping,
+                           loff_t pos, loff_t *bytes)
 {
        struct inode *inode = mapping->host;
        unsigned blocksize = 1 << inode->i_blkbits;
@@ -2245,6 +2288,8 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
                        goto out;
                BUG_ON(err != len);
                err = 0;
+
+               balance_dirty_pages_ratelimited(mapping);
        }
 
        /* page covers the boundary, find the boundary offset */
@@ -2325,23 +2370,6 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
        return 0;
 }
 
-int generic_commit_write(struct file *file, struct page *page,
-               unsigned from, unsigned to)
-{
-       struct inode *inode = page->mapping->host;
-       loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-       __block_commit_write(inode,page,from,to);
-       /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_mutex.
-        */
-       if (pos > inode->i_size) {
-               i_size_write(inode, pos);
-               mark_inode_dirty(inode);
-       }
-       return 0;
-}
-
 /*
  * block_page_mkwrite() is not allowed to change the file size as it gets
  * called from a page fault handler when a page is first dirtied. Hence we must
@@ -2358,20 +2386,22 @@ int generic_commit_write(struct file *file, struct page *page,
  * unlock the page.
  */
 int
-block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                   get_block_t get_block)
 {
+       struct page *page = vmf->page;
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        unsigned long end;
        loff_t size;
-       int ret = -EINVAL;
+       int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
 
        lock_page(page);
        size = i_size_read(inode);
        if ((page->mapping != inode->i_mapping) ||
            (page_offset(page) > size)) {
                /* page got truncated out from underneath us */
-               goto out_unlock;
+               unlock_page(page);
+               goto out;
        }
 
        /* page is wholly or partially inside EOF */
@@ -2384,8 +2414,16 @@ block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
        if (!ret)
                ret = block_commit_write(page, 0, end);
 
-out_unlock:
-       unlock_page(page);
+       if (unlikely(ret)) {
+               unlock_page(page);
+               if (ret == -ENOMEM)
+                       ret = VM_FAULT_OOM;
+               else /* -ENOSPC, -EIO, etc */
+                       ret = VM_FAULT_SIGBUS;
+       } else
+               ret = VM_FAULT_LOCKED;
+
+out:
        return ret;
 }
 
@@ -2450,7 +2488,7 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
        from = pos & (PAGE_CACHE_SIZE - 1);
        to = from + len;
 
-       page = __grab_cache_page(mapping, index);
+       page = grab_cache_page_write_begin(mapping, index, flags);
        if (!page)
                return -ENOMEM;
        *pagep = page;
@@ -2581,7 +2619,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
        struct buffer_head *bh;
        BUG_ON(fsdata != NULL && page_has_buffers(page));
 
-       if (unlikely(copied < len) && !page_has_buffers(page))
+       if (unlikely(copied < len) && head)
                attach_nobh_buffers(page, head);
        if (page_has_buffers(page))
                return generic_write_end(file, mapping, pos, len,
@@ -2653,7 +2691,8 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
-               ret = __block_write_full_page(inode, page, get_block, wbc);
+               ret = __block_write_full_page(inode, page, get_block, wbc,
+                                             end_buffer_async_write);
        return ret;
 }
 EXPORT_SYMBOL(nobh_writepage);
@@ -2700,6 +2739,8 @@ has_buffers:
                pos += blocksize;
        }
 
+       map_bh.b_size = blocksize;
+       map_bh.b_state = 0;
        err = get_block(inode, iblock, &map_bh, 0);
        if (err)
                goto unlock;
@@ -2811,9 +2852,10 @@ out:
 
 /*
  * The generic ->writepage function for buffer-backed address_spaces
+ * this form passes in the end_io handler used to finish the IO.
  */
-int block_write_full_page(struct page *page, get_block_t *get_block,
-                       struct writeback_control *wbc)
+int block_write_full_page_endio(struct page *page, get_block_t *get_block,
+                       struct writeback_control *wbc, bh_end_io_t *handler)
 {
        struct inode * const inode = page->mapping->host;
        loff_t i_size = i_size_read(inode);
@@ -2822,7 +2864,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
 
        /* Is the page fully inside i_size? */
        if (page->index < end_index)
-               return __block_write_full_page(inode, page, get_block, wbc);
+               return __block_write_full_page(inode, page, get_block, wbc,
+                                              handler);
 
        /* Is the page fully outside i_size? (truncate in progress) */
        offset = i_size & (PAGE_CACHE_SIZE-1);
@@ -2845,9 +2888,20 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
         * writes to that region are not written out to the file."
         */
        zero_user_segment(page, offset, PAGE_CACHE_SIZE);
-       return __block_write_full_page(inode, page, get_block, wbc);
+       return __block_write_full_page(inode, page, get_block, wbc, handler);
+}
+
+/*
+ * The generic ->writepage function for buffer-backed address_spaces
+ */
+int block_write_full_page(struct page *page, get_block_t *get_block,
+                       struct writeback_control *wbc)
+{
+       return block_write_full_page_endio(page, get_block, wbc,
+                                          end_buffer_async_write);
 }
 
+
 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
                            get_block_t *get_block)
 {
@@ -2869,6 +2923,9 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
                set_bit(BH_Eopnotsupp, &bh->b_state);
        }
 
+       if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
+               set_bit(BH_Quiet, &bh->b_state);
+
        bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
        bio_put(bio);
 }
@@ -2881,15 +2938,20 @@ int submit_bh(int rw, struct buffer_head * bh)
        BUG_ON(!buffer_locked(bh));
        BUG_ON(!buffer_mapped(bh));
        BUG_ON(!bh->b_end_io);
+       BUG_ON(buffer_delay(bh));
+       BUG_ON(buffer_unwritten(bh));
 
-       if (buffer_ordered(bh) && (rw == WRITE))
-               rw = WRITE_BARRIER;
+       /*
+        * Mask in barrier bit for a write (could be either a WRITE or a
+        * WRITE_SYNC
+        */
+       if (buffer_ordered(bh) && (rw & WRITE))
+               rw |= WRITE_BARRIER;
 
        /*
-        * Only clear out a write error when rewriting, should this
-        * include WRITE_SYNC as well?
+        * Only clear out a write error when rewriting
         */
-       if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
+       if (test_set_buffer_req(bh) && (rw & WRITE))
                clear_buffer_write_io_error(bh);
 
        /*
@@ -2954,16 +3016,20 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
        for (i = 0; i < nr; i++) {
                struct buffer_head *bh = bhs[i];
 
-               if (rw == SWRITE)
+               if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
                        lock_buffer(bh);
-               else if (test_set_buffer_locked(bh))
+               else if (!trylock_buffer(bh))
                        continue;
 
-               if (rw == WRITE || rw == SWRITE) {
+               if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
+                   rw == SWRITE_SYNC_PLUG) {
                        if (test_clear_buffer_dirty(bh)) {
                                bh->b_end_io = end_buffer_write_sync;
                                get_bh(bh);
-                               submit_bh(WRITE, bh);
+                               if (rw == SWRITE_SYNC)
+                                       submit_bh(WRITE_SYNC, bh);
+                               else
+                                       submit_bh(WRITE, bh);
                                continue;
                        }
                } else {
@@ -2992,7 +3058,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
        if (test_clear_buffer_dirty(bh)) {
                get_bh(bh);
                bh->b_end_io = end_buffer_write_sync;
-               ret = submit_bh(WRITE, bh);
+               ret = submit_bh(WRITE_SYNC, bh);
                wait_on_buffer(bh);
                if (buffer_eopnotsupp(bh)) {
                        clear_buffer_eopnotsupp(bh);
@@ -3127,7 +3193,7 @@ void block_sync_page(struct page *page)
  * Use of bdflush() is deprecated and will be removed in a future kernel.
  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
  */
-asmlinkage long sys_bdflush(int func, long data)
+SYSCALL_DEFINE2(bdflush, int, func, long, data)
 {
        static int msg_count;
 
@@ -3270,7 +3336,7 @@ int bh_submit_read(struct buffer_head *bh)
 EXPORT_SYMBOL(bh_submit_read);
 
 static void
-init_buffer_head(struct kmem_cache *cachep, void *data)
+init_buffer_head(void *data)
 {
        struct buffer_head *bh = data;
 
@@ -3306,13 +3372,13 @@ EXPORT_SYMBOL(block_read_full_page);
 EXPORT_SYMBOL(block_sync_page);
 EXPORT_SYMBOL(block_truncate_page);
 EXPORT_SYMBOL(block_write_full_page);
+EXPORT_SYMBOL(block_write_full_page_endio);
 EXPORT_SYMBOL(cont_write_begin);
 EXPORT_SYMBOL(end_buffer_read_sync);
 EXPORT_SYMBOL(end_buffer_write_sync);
+EXPORT_SYMBOL(end_buffer_async_write);
 EXPORT_SYMBOL(file_fsync);
-EXPORT_SYMBOL(fsync_bdev);
 EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_commit_write);
 EXPORT_SYMBOL(generic_cont_expand_simple);
 EXPORT_SYMBOL(init_buffer);
 EXPORT_SYMBOL(invalidate_bdev);