Remove SLAB_CTOR_CONSTRUCTOR
[safe/jmp/linux-2.6] / fs / buffer.c
index 75cac9a..3deeb88 100644 (file)
  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
  */
 
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/syscalls.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/capability.h>
 #include <linux/blkdev.h>
 #include <linux/file.h>
 #include <linux/quotaops.h>
@@ -35,6 +34,7 @@
 #include <linux/hash.h>
 #include <linux/suspend.h>
 #include <linux/buffer_head.h>
+#include <linux/task_io_accounting_ops.h>
 #include <linux/bio.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
@@ -43,7 +43,6 @@
 #include <linux/bit_spinlock.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
-static void invalidate_bh_lrus(void);
 
 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
 
@@ -77,6 +76,7 @@ EXPORT_SYMBOL(__lock_buffer);
 
 void fastcall unlock_buffer(struct buffer_head *bh)
 {
+       smp_mb__before_clear_bit();
        clear_buffer_locked(bh);
        smp_mb__after_clear_bit();
        wake_up_bit(&bh->b_state, BH_Lock);
@@ -153,41 +153,14 @@ int sync_blockdev(struct block_device *bdev)
 {
        int ret = 0;
 
-       if (bdev) {
-               int err;
-
-               ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
-               err = filemap_fdatawait(bdev->bd_inode->i_mapping);
-               if (!ret)
-                       ret = err;
-       }
+       if (bdev)
+               ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
        return ret;
 }
 EXPORT_SYMBOL(sync_blockdev);
 
 /*
  * Write out and wait upon all dirty data associated with this
- * superblock.  Filesystem data as well as the underlying block
- * device.  Takes the superblock lock.
- */
-int fsync_super(struct super_block *sb)
-{
-       sync_inodes_sb(sb, 0);
-       DQUOT_SYNC(sb);
-       lock_super(sb);
-       if (sb->s_dirt && sb->s_op->write_super)
-               sb->s_op->write_super(sb);
-       unlock_super(sb);
-       if (sb->s_op->sync_fs)
-               sb->s_op->sync_fs(sb, 1);
-       sync_blockdev(sb->s_bdev);
-       sync_inodes_sb(sb, 1);
-
-       return sync_blockdev(sb->s_bdev);
-}
-
-/*
- * Write out and wait upon all dirty data associated with this
  * device.   Filesystem data as well as the underlying block
  * device.  Takes the superblock lock.
  */
@@ -221,19 +194,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
                sb->s_frozen = SB_FREEZE_WRITE;
                smp_wmb();
 
-               sync_inodes_sb(sb, 0);
-               DQUOT_SYNC(sb);
-
-               lock_super(sb);
-               if (sb->s_dirt && sb->s_op->write_super)
-                       sb->s_op->write_super(sb);
-               unlock_super(sb);
-
-               if (sb->s_op->sync_fs)
-                       sb->s_op->sync_fs(sb, 1);
-
-               sync_blockdev(sb->s_bdev);
-               sync_inodes_sb(sb, 1);
+               __fsync_super(sb);
 
                sb->s_frozen = SB_FREEZE_TRANS;
                smp_wmb();
@@ -274,117 +235,6 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
 EXPORT_SYMBOL(thaw_bdev);
 
 /*
- * sync everything.  Start out by waking pdflush, because that writes back
- * all queues in parallel.
- */
-static void do_sync(unsigned long wait)
-{
-       wakeup_pdflush(0);
-       sync_inodes(0);         /* All mappings, inodes and their blockdevs */
-       DQUOT_SYNC(NULL);
-       sync_supers();          /* Write the superblocks */
-       sync_filesystems(0);    /* Start syncing the filesystems */
-       sync_filesystems(wait); /* Waitingly sync the filesystems */
-       sync_inodes(wait);      /* Mappings, inodes and blockdevs, again. */
-       if (!wait)
-               printk("Emergency Sync complete\n");
-       if (unlikely(laptop_mode))
-               laptop_sync_completion();
-}
-
-asmlinkage long sys_sync(void)
-{
-       do_sync(1);
-       return 0;
-}
-
-void emergency_sync(void)
-{
-       pdflush_operation(do_sync, 0);
-}
-
-/*
- * Generic function to fsync a file.
- *
- * filp may be NULL if called via the msync of a vma.
- */
-int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
-{
-       struct inode * inode = dentry->d_inode;
-       struct super_block * sb;
-       int ret, err;
-
-       /* sync the inode to buffers */
-       ret = write_inode_now(inode, 0);
-
-       /* sync the superblock to buffers */
-       sb = inode->i_sb;
-       lock_super(sb);
-       if (sb->s_op->write_super)
-               sb->s_op->write_super(sb);
-       unlock_super(sb);
-
-       /* .. finally sync the buffers to disk */
-       err = sync_blockdev(sb->s_bdev);
-       if (!ret)
-               ret = err;
-       return ret;
-}
-
-static long do_fsync(unsigned int fd, int datasync)
-{
-       struct file * file;
-       struct address_space *mapping;
-       int ret, err;
-
-       ret = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-
-       ret = -EINVAL;
-       if (!file->f_op || !file->f_op->fsync) {
-               /* Why?  We can still call filemap_fdatawrite */
-               goto out_putf;
-       }
-
-       mapping = file->f_mapping;
-
-       current->flags |= PF_SYNCWRITE;
-       ret = filemap_fdatawrite(mapping);
-
-       /*
-        * We need to protect against concurrent writers,
-        * which could cause livelocks in fsync_buffers_list
-        */
-       down(&mapping->host->i_sem);
-       err = file->f_op->fsync(file, file->f_dentry, datasync);
-       if (!ret)
-               ret = err;
-       up(&mapping->host->i_sem);
-       err = filemap_fdatawait(mapping);
-       if (!ret)
-               ret = err;
-       current->flags &= ~PF_SYNCWRITE;
-
-out_putf:
-       fput(file);
-out:
-       return ret;
-}
-
-asmlinkage long sys_fsync(unsigned int fd)
-{
-       return do_fsync(fd, 0);
-}
-
-asmlinkage long sys_fdatasync(unsigned int fd)
-{
-       return do_fsync(fd, 1);
-}
-
-/*
  * Various filesystems appear to want __find_get_block to be non-blocking.
  * But it's the page lock which protects the buffers.  To get around this,
  * we get exclusion from try_to_free_buffers with the blockdev mapping's
@@ -396,7 +246,7 @@ asmlinkage long sys_fdatasync(unsigned int fd)
  * private_lock is contended then so is mapping->tree_lock).
  */
 static struct buffer_head *
-__find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
+__find_get_block_slow(struct block_device *bdev, sector_t block)
 {
        struct inode *bd_inode = bdev->bd_inode;
        struct address_space *bd_mapping = bd_inode->i_mapping;
@@ -436,8 +286,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
        if (all_mapped) {
                printk("__find_get_block_slow() failed. "
                        "block=%llu, b_blocknr=%llu\n",
-                       (unsigned long long)block, (unsigned long long)bh->b_blocknr);
-               printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
+                       (unsigned long long)block,
+                       (unsigned long long)bh->b_blocknr);
+               printk("b_state=0x%08lx, b_size=%zu\n",
+                       bh->b_state, bh->b_size);
                printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
        }
 out_unlock:
@@ -479,15 +331,15 @@ out:
    we think the disk contains more recent information than the buffercache.
    The update == 1 pass marks the buffers we need to update, the update == 2
    pass does the actual I/O. */
-void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
+void invalidate_bdev(struct block_device *bdev)
 {
+       struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+       if (mapping->nrpages == 0)
+               return;
+
        invalidate_bh_lrus();
-       /*
-        * FIXME: what about destroy_dirty_buffers?
-        * We really want to use invalidate_inode_pages2() for
-        * that, but not until that's cleaned up.
-        */
-       invalidate_inode_pages(bdev->bd_inode->i_mapping);
+       invalidate_mapping_pages(mapping, 0, -1);
 }
 
 /*
@@ -501,7 +353,7 @@ static void free_more_memory(void)
        wakeup_pdflush(1024);
        yield();
 
-       for_each_pgdat(pgdat) {
+       for_each_online_pgdat(pgdat) {
                zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
                if (*zones)
                        try_to_free_pages(zones, GFP_NOFS);
@@ -574,7 +426,7 @@ still_busy:
  * Completion handler for block_write_full_page() - pages which are unlocked
  * during I/O, and which have PageWriteback cleared upon I/O completion.
  */
-void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 {
        char b[BDEVNAME_SIZE];
        unsigned long flags;
@@ -595,6 +447,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                               bdevname(bh->b_bdev, b));
                }
                set_bit(AS_EIO, &page->mapping->flags);
+               set_buffer_write_io_error(bh);
                clear_buffer_uptodate(bh);
                SetPageError(page);
        }
@@ -714,6 +567,10 @@ EXPORT_SYMBOL(mark_buffer_async_write);
 static inline void __remove_assoc_queue(struct buffer_head *bh)
 {
        list_del_init(&bh->b_assoc_buffers);
+       WARN_ON(!bh->b_assoc_map);
+       if (buffer_write_io_error(bh))
+               set_bit(AS_EIO, &bh->b_assoc_map->flags);
+       bh->b_assoc_map = NULL;
 }
 
 int inode_has_buffers(struct inode *inode)
@@ -806,13 +663,13 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
        if (!mapping->assoc_mapping) {
                mapping->assoc_mapping = buffer_mapping;
        } else {
-               if (mapping->assoc_mapping != buffer_mapping)
-                       BUG();
+               BUG_ON(mapping->assoc_mapping != buffer_mapping);
        }
        if (list_empty(&bh->b_assoc_buffers)) {
                spin_lock(&buffer_mapping->private_lock);
                list_move_tail(&bh->b_assoc_buffers,
                                &mapping->private_list);
+               bh->b_assoc_map = mapping;
                spin_unlock(&buffer_mapping->private_lock);
        }
 }
@@ -845,7 +702,10 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
  */
 int __set_page_dirty_buffers(struct page *page)
 {
-       struct address_space * const mapping = page->mapping;
+       struct address_space * const mapping = page_mapping(page);
+
+       if (unlikely(!mapping))
+               return !TestSetPageDirty(page);
 
        spin_lock(&mapping->private_lock);
        if (page_has_buffers(page)) {
@@ -859,20 +719,21 @@ int __set_page_dirty_buffers(struct page *page)
        }
        spin_unlock(&mapping->private_lock);
 
-       if (!TestSetPageDirty(page)) {
-               write_lock_irq(&mapping->tree_lock);
-               if (page->mapping) {    /* Race with truncate? */
-                       if (mapping_cap_account_dirty(mapping))
-                               inc_page_state(nr_dirty);
-                       radix_tree_tag_set(&mapping->page_tree,
-                                               page_index(page),
-                                               PAGECACHE_TAG_DIRTY);
+       if (TestSetPageDirty(page))
+               return 0;
+
+       write_lock_irq(&mapping->tree_lock);
+       if (page->mapping) {    /* Race with truncate? */
+               if (mapping_cap_account_dirty(mapping)) {
+                       __inc_zone_page_state(page, NR_FILE_DIRTY);
+                       task_io_account_write(PAGE_CACHE_SIZE);
                }
-               write_unlock_irq(&mapping->tree_lock);
-               __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+               radix_tree_tag_set(&mapping->page_tree,
+                               page_index(page), PAGECACHE_TAG_DIRTY);
        }
-       
-       return 0;
+       write_unlock_irq(&mapping->tree_lock);
+       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       return 1;
 }
 EXPORT_SYMBOL(__set_page_dirty_buffers);
 
@@ -906,7 +767,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
        spin_lock(lock);
        while (!list_empty(list)) {
                bh = BH_ENTRY(list->next);
-               list_del_init(&bh->b_assoc_buffers);
+               __remove_assoc_queue(bh);
                if (buffer_dirty(bh) || buffer_locked(bh)) {
                        list_add(&bh->b_assoc_buffers, &tmp);
                        if (buffer_dirty(bh)) {
@@ -927,7 +788,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 
        while (!list_empty(&tmp)) {
                bh = BH_ENTRY(tmp.prev);
-               __remove_assoc_queue(bh);
+               list_del_init(&bh->b_assoc_buffers);
                get_bh(bh);
                spin_unlock(lock);
                wait_on_buffer(bh);
@@ -1027,12 +888,13 @@ try_again:
 
                bh->b_state = 0;
                atomic_set(&bh->b_count, 0);
+               bh->b_private = NULL;
                bh->b_size = size;
 
                /* Link the buffer to its page */
                set_bh_page(bh, page, offset);
 
-               bh->b_end_io = NULL;
+               init_buffer(bh, NULL, NULL);
        }
        return head;
 /*
@@ -1123,8 +985,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        if (!page)
                return NULL;
 
-       if (!PageLocked(page))
-               BUG();
+       BUG_ON(!PageLocked(page));
 
        if (page_has_buffers(page)) {
                bh = page_buffers(page);
@@ -1170,7 +1031,7 @@ failed:
  * some of those buffers may be aliases of filesystem data.
  * grow_dev_page() will go BUG() if this happens.
  */
-static inline int
+static int
 grow_buffers(struct block_device *bdev, sector_t block, int size)
 {
        struct page *page;
@@ -1183,8 +1044,21 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
        } while ((size << sizebits) < PAGE_SIZE);
 
        index = block >> sizebits;
-       block = index << sizebits;
 
+       /*
+        * Check for a block which wants to lie outside our maximum possible
+        * pagecache index.  (this comparison is done using sector_t types).
+        */
+       if (unlikely(index != block >> sizebits)) {
+               char b[BDEVNAME_SIZE];
+
+               printk(KERN_ERR "%s: requested out-of-range block %llu for "
+                       "device %s\n",
+                       __FUNCTION__, (unsigned long long)block,
+                       bdevname(bdev, b));
+               return -EIO;
+       }
+       block = index << sizebits;
        /* Create a page with the proper size buffers.. */
        page = grow_dev_page(bdev, block, index, size);
        if (!page)
@@ -1211,12 +1085,16 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
 
        for (;;) {
                struct buffer_head * bh;
+               int ret;
 
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
 
-               if (!grow_buffers(bdev, block, size))
+               ret = grow_buffers(bdev, block, size);
+               if (ret < 0)
+                       return NULL;
+               if (ret == 0)
                        free_more_memory();
        }
 }
@@ -1291,6 +1169,7 @@ void __bforget(struct buffer_head *bh)
 
                spin_lock(&buffer_mapping->private_lock);
                list_del_init(&bh->b_assoc_buffers);
+               bh->b_assoc_map = NULL;
                spin_unlock(&buffer_mapping->private_lock);
        }
        __brelse(bh);
@@ -1396,12 +1275,12 @@ static void bh_lru_install(struct buffer_head *bh)
 /*
  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
  */
-static inline struct buffer_head *
-lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
+static struct buffer_head *
+lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *ret = NULL;
        struct bh_lru *lru;
-       int i;
+       unsigned int i;
 
        check_irqs_on();
        bh_lru_lock();
@@ -1433,12 +1312,12 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
  * NULL
  */
 struct buffer_head *
-__find_get_block(struct block_device *bdev, sector_t block, int size)
+__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
 
        if (bh == NULL) {
-               bh = __find_get_block_slow(bdev, block, size);
+               bh = __find_get_block_slow(bdev, block);
                if (bh)
                        bh_lru_install(bh);
        }
@@ -1461,7 +1340,7 @@ EXPORT_SYMBOL(__find_get_block);
  * attempt is failing.  FIXME, perhaps?
  */
 struct buffer_head *
-__getblk(struct block_device *bdev, sector_t block, int size)
+__getblk(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = __find_get_block(bdev, block, size);
 
@@ -1475,11 +1354,13 @@ EXPORT_SYMBOL(__getblk);
 /*
  * Do async read-ahead on a buffer..
  */
-void __breadahead(struct block_device *bdev, sector_t block, int size)
+void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = __getblk(bdev, block, size);
-       ll_rw_block(READA, 1, &bh);
-       brelse(bh);
+       if (likely(bh)) {
+               ll_rw_block(READA, 1, &bh);
+               brelse(bh);
+       }
 }
 EXPORT_SYMBOL(__breadahead);
 
@@ -1493,11 +1374,11 @@ EXPORT_SYMBOL(__breadahead);
  *  It returns NULL if the block was unreadable.
  */
 struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, int size)
+__bread(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *bh = __getblk(bdev, block, size);
 
-       if (!buffer_uptodate(bh))
+       if (likely(bh) && !buffer_uptodate(bh))
                bh = __bread_slow(bh);
        return bh;
 }
@@ -1520,7 +1401,7 @@ static void invalidate_bh_lru(void *arg)
        put_cpu_var(bh_lrus);
 }
        
-static void invalidate_bh_lrus(void)
+void invalidate_bh_lrus(void)
 {
        on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
 }
@@ -1529,8 +1410,7 @@ void set_bh_page(struct buffer_head *bh,
                struct page *page, unsigned long offset)
 {
        bh->b_page = page;
-       if (offset >= PAGE_SIZE)
-               BUG();
+       BUG_ON(offset >= PAGE_SIZE);
        if (PageHighMem(page))
                /*
                 * This catches illegal uses and preserves the offset:
@@ -1544,7 +1424,7 @@ EXPORT_SYMBOL(set_bh_page);
 /*
  * Called when truncating a buffer on a page completely.
  */
-static inline void discard_buffer(struct buffer_head * bh)
+static void discard_buffer(struct buffer_head * bh)
 {
        lock_buffer(bh);
        clear_buffer_dirty(bh);
@@ -1553,39 +1433,11 @@ static inline void discard_buffer(struct buffer_head * bh)
        clear_buffer_req(bh);
        clear_buffer_new(bh);
        clear_buffer_delay(bh);
+       clear_buffer_unwritten(bh);
        unlock_buffer(bh);
 }
 
 /**
- * try_to_release_page() - release old fs-specific metadata on a page
- *
- * @page: the page which the kernel is trying to free
- * @gfp_mask: memory allocation flags (and I/O mode)
- *
- * The address_space is to try to release any data against the page
- * (presumably at page->private).  If the release was successful, return `1'.
- * Otherwise return zero.
- *
- * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
- *
- * NOTE: @gfp_mask may go away, and this function may become non-blocking.
- */
-int try_to_release_page(struct page *page, gfp_t gfp_mask)
-{
-       struct address_space * const mapping = page->mapping;
-
-       BUG_ON(!PageLocked(page));
-       if (PageWriteback(page))
-               return 0;
-       
-       if (mapping && mapping->a_ops->releasepage)
-               return mapping->a_ops->releasepage(page, gfp_mask);
-       return try_to_free_buffers(page);
-}
-EXPORT_SYMBOL(try_to_release_page);
-
-/**
  * block_invalidatepage - invalidate part of all of a buffer-backed page
  *
  * @page: the page which is affected
@@ -1600,11 +1452,10 @@ EXPORT_SYMBOL(try_to_release_page);
  * point.  Because the caller is about to free (and possibly reuse) those
  * blocks on-disk.
  */
-int block_invalidatepage(struct page *page, unsigned long offset)
+void block_invalidatepage(struct page *page, unsigned long offset)
 {
        struct buffer_head *head, *bh, *next;
        unsigned int curr_off = 0;
-       int ret = 1;
 
        BUG_ON(!PageLocked(page));
        if (!page_has_buffers(page))
@@ -1631,21 +1482,12 @@ int block_invalidatepage(struct page *page, unsigned long offset)
         * so real IO is not possible anymore.
         */
        if (offset == 0)
-               ret = try_to_release_page(page, 0);
+               try_to_release_page(page, 0);
 out:
-       return ret;
+       return;
 }
 EXPORT_SYMBOL(block_invalidatepage);
 
-int do_invalidatepage(struct page *page, unsigned long offset)
-{
-       int (*invalidatepage)(struct page *, unsigned long);
-       invalidatepage = page->mapping->a_ops->invalidatepage;
-       if (invalidatepage == NULL)
-               invalidatepage = block_invalidatepage;
-       return (*invalidatepage)(page, offset);
-}
-
 /*
  * We attach and possibly dirty the buffers atomically wrt
  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
@@ -1703,7 +1545,7 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
 
        might_sleep();
 
-       old_bh = __find_get_block_slow(bdev, block, 0);
+       old_bh = __find_get_block_slow(bdev, block);
        if (old_bh) {
                clear_buffer_dirty(old_bh);
                wait_on_buffer(old_bh);
@@ -1745,6 +1587,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        sector_t block;
        sector_t last_block;
        struct buffer_head *bh, *head;
+       const unsigned blocksize = 1 << inode->i_blkbits;
        int nr_underway = 0;
 
        BUG_ON(!PageLocked(page));
@@ -1752,7 +1595,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
 
        if (!page_has_buffers(page)) {
-               create_empty_buffers(page, 1 << inode->i_blkbits,
+               create_empty_buffers(page, blocksize,
                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
        }
 
@@ -1766,7 +1609,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
         * handle that here by just cleaning them.
         */
 
-       block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
        head = page_buffers(page);
        bh = head;
 
@@ -1787,6 +1630,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+                       WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
                                goto recover;
@@ -1849,17 +1693,8 @@ done:
                 * clean.  Someone wrote them back by hand with
                 * ll_rw_block/submit_bh.  A rare case.
                 */
-               int uptodate = 1;
-               do {
-                       if (!buffer_uptodate(bh)) {
-                               uptodate = 0;
-                               break;
-                       }
-                       bh = bh->b_this_page;
-               } while (bh != head);
-               if (uptodate)
-                       SetPageUptodate(page);
                end_page_writeback(page);
+
                /*
                 * The page and buffer_heads can be released at any time from
                 * here on.
@@ -1891,8 +1726,8 @@ recover:
        } while ((bh = bh->b_this_page) != head);
        SetPageError(page);
        BUG_ON(PageWriteback(page));
+       mapping_set_error(page->mapping, err);
        set_page_writeback(page);
-       unlock_page(page);
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
@@ -1902,6 +1737,7 @@ recover:
                }
                bh = next;
        } while (bh != head);
+       unlock_page(page);
        goto done;
 }
 
@@ -1940,6 +1776,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                if (buffer_new(bh))
                        clear_buffer_new(bh);
                if (!buffer_mapped(bh)) {
+                       WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
                                break;
@@ -1972,6 +1809,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                        continue; 
                }
                if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+                   !buffer_unwritten(bh) &&
                     (block_start < from || block_end > to)) {
                        ll_rw_block(READ, 1, &bh);
                        *wait_bh++=bh;
@@ -2008,12 +1846,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                if (block_start >= to)
                        break;
                if (buffer_new(bh)) {
-                       void *kaddr;
-
                        clear_buffer_new(bh);
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       memset(kaddr+block_start, 0, bh->b_size);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       zero_user_page(page, block_start, bh->b_size, KM_USER0);
                        set_buffer_uptodate(bh);
                        mark_buffer_dirty(bh);
                }
@@ -2095,15 +1929,14 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
 
                        fully_mapped = 0;
                        if (iblock < lblock) {
+                               WARN_ON(bh->b_size != blocksize);
                                err = get_block(inode, iblock, bh, 0);
                                if (err)
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
-                               void *kaddr = kmap_atomic(page, KM_USER0);
-                               memset(kaddr + i * blocksize, 0, blocksize);
-                               flush_dcache_page(page);
-                               kunmap_atomic(kaddr, KM_USER0);
+                               zero_user_page(page, i * blocksize, blocksize,
+                                               KM_USER0);
                                if (!err)
                                        set_buffer_uptodate(bh);
                                continue;
@@ -2158,11 +1991,12 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
  * truncates.  Uses prepare/commit_write to allow the filesystem to
  * deal with the hole.  
  */
-int generic_cont_expand(struct inode *inode, loff_t size)
+static int __generic_cont_expand(struct inode *inode, loff_t size,
+                                pgoff_t index, unsigned int offset)
 {
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
-       unsigned long index, offset, limit;
+       unsigned long limit;
        int err;
 
        err = -EFBIG;
@@ -2174,24 +2008,24 @@ int generic_cont_expand(struct inode *inode, loff_t size)
        if (size > inode->i_sb->s_maxbytes)
                goto out;
 
-       offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
-
-       /* ugh.  in prepare/commit_write, if from==to==start of block, we 
-       ** skip the prepare.  make sure we never send an offset for the start
-       ** of a block
-       */
-       if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
-               offset++;
-       }
-       index = size >> PAGE_CACHE_SHIFT;
        err = -ENOMEM;
        page = grab_cache_page(mapping, index);
        if (!page)
                goto out;
        err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
-       if (!err) {
-               err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+       if (err) {
+               /*
+                * ->prepare_write() may have instantiated a few blocks
+                * outside i_size.  Trim these off again.
+                */
+               unlock_page(page);
+               page_cache_release(page);
+               vmtruncate(inode, inode->i_size);
+               goto out;
        }
+
+       err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+
        unlock_page(page);
        page_cache_release(page);
        if (err > 0)
@@ -2200,6 +2034,36 @@ out:
        return err;
 }
 
+int generic_cont_expand(struct inode *inode, loff_t size)
+{
+       pgoff_t index;
+       unsigned int offset;
+
+       offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
+
+       /* ugh.  in prepare/commit_write, if from==to==start of block, we
+       ** skip the prepare.  make sure we never send an offset for the start
+       ** of a block
+       */
+       if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
+               /* caller must handle this extra byte. */
+               offset++;
+       }
+       index = size >> PAGE_CACHE_SHIFT;
+
+       return __generic_cont_expand(inode, size, index, offset);
+}
+
+int generic_cont_expand_simple(struct inode *inode, loff_t size)
+{
+       loff_t pos = size - 1;
+       pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+       unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
+
+       /* prepare/commit_write can handle even if from==to==start of block. */
+       return __generic_cont_expand(inode, size, index, offset);
+}
+
 /*
  * For moronic filesystems that do not allow holes in file.
  * We may have to extend the file.
@@ -2215,7 +2079,6 @@ int cont_prepare_write(struct page *page, unsigned offset,
        long status;
        unsigned zerofrom;
        unsigned blocksize = 1 << inode->i_blkbits;
-       void *kaddr;
 
        while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
                status = -ENOMEM;
@@ -2237,10 +2100,8 @@ int cont_prepare_write(struct page *page, unsigned offset,
                                                PAGE_CACHE_SIZE, get_block);
                if (status)
                        goto out_unmap;
-               kaddr = kmap_atomic(new_page, KM_USER0);
-               memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
-               flush_dcache_page(new_page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, zerofrom, PAGE_CACHE_SIZE - zerofrom,
+                               KM_USER0);
                generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
                unlock_page(new_page);
                page_cache_release(new_page);
@@ -2267,10 +2128,7 @@ int cont_prepare_write(struct page *page, unsigned offset,
        if (status)
                goto out1;
        if (zerofrom < offset) {
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr+zerofrom, 0, offset-zerofrom);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, zerofrom, offset - zerofrom, KM_USER0);
                __block_commit_write(inode, page, zerofrom, offset);
        }
        return 0;
@@ -2311,7 +2169,7 @@ int generic_commit_write(struct file *file, struct page *page,
        __block_commit_write(inode,page,from,to);
        /*
         * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_sem.
+        * cannot change under us because we hold i_mutex.
         */
        if (pos > inode->i_size) {
                i_size_write(inode, pos);
@@ -2362,7 +2220,6 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
        int i;
        int ret = 0;
        int is_mapped_to_disk = 1;
-       int dirtied_it = 0;
 
        if (PageMappedToDisk(page))
                return 0;
@@ -2385,6 +2242,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
                create = 1;
                if (block_start >= to)
                        create = 0;
+               map_bh.b_size = blocksize;
                ret = get_block(inode, block_in_file + block_in_page,
                                        &map_bh, create);
                if (ret)
@@ -2398,14 +2256,10 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
                        continue;
                if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
                        kaddr = kmap_atomic(page, KM_USER0);
-                       if (block_start < from) {
+                       if (block_start < from)
                                memset(kaddr+block_start, 0, from-block_start);
-                               dirtied_it = 1;
-                       }
-                       if (block_end > to) {
+                       if (block_end > to)
                                memset(kaddr + to, 0, block_end - to);
-                               dirtied_it = 1;
-                       }
                        flush_dcache_page(page);
                        kunmap_atomic(kaddr, KM_USER0);
                        continue;
@@ -2460,17 +2314,6 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
 
        if (is_mapped_to_disk)
                SetPageMappedToDisk(page);
-       SetPageUptodate(page);
-
-       /*
-        * Setting the page dirty here isn't necessary for the prepare_write
-        * function - commit_write will do that.  But if/when this function is
-        * used within the pagefault handler to ensure that all mmapped pages
-        * have backing space in the filesystem, we will need to dirty the page
-        * if its contents were altered.
-        */
-       if (dirtied_it)
-               set_page_dirty(page);
 
        return 0;
 
@@ -2484,21 +2327,24 @@ failed:
         * Error recovery is pretty slack.  Clear the page and mark it dirty
         * so we'll later zero out any blocks which _were_ allocated.
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr, 0, PAGE_CACHE_SIZE);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
        SetPageUptodate(page);
        set_page_dirty(page);
        return ret;
 }
 EXPORT_SYMBOL(nobh_prepare_write);
 
+/*
+ * Make sure any changes to nobh_commit_write() are reflected in
+ * nobh_truncate_page(), since it doesn't call commit_write().
+ */
 int nobh_commit_write(struct file *file, struct page *page,
                unsigned from, unsigned to)
 {
        struct inode *inode = page->mapping->host;
        loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
 
+       SetPageUptodate(page);
        set_page_dirty(page);
        if (pos > inode->i_size) {
                i_size_write(inode, pos);
@@ -2520,7 +2366,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
        unsigned offset;
-       void *kaddr;
        int ret;
 
        /* Is the page fully inside i_size? */
@@ -2551,10 +2396,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
@@ -2574,8 +2416,7 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
        unsigned to;
        struct page *page;
-       struct address_space_operations *a_ops = mapping->a_ops;
-       char *kaddr;
+       const struct address_space_operations *a_ops = mapping->a_ops;
        int ret = 0;
 
        if ((offset & (blocksize - 1)) == 0)
@@ -2589,10 +2430,13 @@ int nobh_truncate_page(struct address_space *mapping, loff_t from)
        to = (offset + blocksize) & ~(blocksize - 1);
        ret = a_ops->prepare_write(NULL, page, offset, to);
        if (ret == 0) {
-               kaddr = kmap_atomic(page, KM_USER0);
-               memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-               flush_dcache_page(page);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
+                               KM_USER0);
+               /*
+                * It would be more correct to call aops->commit_write()
+                * here, but this is more efficient.
+                */
+               SetPageUptodate(page);
                set_page_dirty(page);
        }
        unlock_page(page);
@@ -2608,12 +2452,11 @@ int block_truncate_page(struct address_space *mapping,
        pgoff_t index = from >> PAGE_CACHE_SHIFT;
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
        unsigned blocksize;
-       pgoff_t iblock;
+       sector_t iblock;
        unsigned length, pos;
        struct inode *inode = mapping->host;
        struct page *page;
        struct buffer_head *bh;
-       void *kaddr;
        int err;
 
        blocksize = 1 << inode->i_blkbits;
@@ -2624,7 +2467,7 @@ int block_truncate_page(struct address_space *mapping,
                return 0;
 
        length = blocksize - length;
-       iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
        
        page = grab_cache_page(mapping, index);
        err = -ENOMEM;
@@ -2645,6 +2488,7 @@ int block_truncate_page(struct address_space *mapping,
 
        err = 0;
        if (!buffer_mapped(bh)) {
+               WARN_ON(bh->b_size != blocksize);
                err = get_block(inode, iblock, bh, 0);
                if (err)
                        goto unlock;
@@ -2657,7 +2501,7 @@ int block_truncate_page(struct address_space *mapping,
        if (PageUptodate(page))
                set_buffer_uptodate(bh);
 
-       if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
+       if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
                err = -EIO;
                ll_rw_block(READ, 1, &bh);
                wait_on_buffer(bh);
@@ -2666,11 +2510,7 @@ int block_truncate_page(struct address_space *mapping,
                        goto unlock;
        }
 
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, length);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
-
+       zero_user_page(page, offset, length, KM_USER0);
        mark_buffer_dirty(bh);
        err = 0;
 
@@ -2691,7 +2531,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
        unsigned offset;
-       void *kaddr;
 
        /* Is the page fully inside i_size? */
        if (page->index < end_index)
@@ -2717,10 +2556,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
+       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
        return __block_write_full_page(inode, page, get_block, wbc);
 }
 
@@ -2731,6 +2567,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
        struct inode *inode = mapping->host;
        tmp.b_state = 0;
        tmp.b_blocknr = 0;
+       tmp.b_size = 1 << inode->i_blkbits;
        get_block(inode, block, &tmp, 0);
        return tmp.b_blocknr;
 }
@@ -2838,22 +2675,22 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
                else if (test_set_buffer_locked(bh))
                        continue;
 
-               get_bh(bh);
                if (rw == WRITE || rw == SWRITE) {
                        if (test_clear_buffer_dirty(bh)) {
                                bh->b_end_io = end_buffer_write_sync;
+                               get_bh(bh);
                                submit_bh(WRITE, bh);
                                continue;
                        }
                } else {
                        if (!buffer_uptodate(bh)) {
                                bh->b_end_io = end_buffer_read_sync;
+                               get_bh(bh);
                                submit_bh(rw, bh);
                                continue;
                        }
                }
                unlock_buffer(bh);
-               put_bh(bh);
        }
 }
 
@@ -2957,17 +2794,23 @@ int try_to_free_buffers(struct page *page)
 
        spin_lock(&mapping->private_lock);
        ret = drop_buffers(page, &buffers_to_free);
-       if (ret) {
-               /*
-                * If the filesystem writes its buffers by hand (eg ext3)
-                * then we can have clean buffers against a dirty page.  We
-                * clean the page here; otherwise later reattachment of buffers
-                * could encounter a non-uptodate page, which is unresolvable.
-                * This only applies in the rare case where try_to_free_buffers
-                * succeeds but the page is not freed.
-                */
-               clear_page_dirty(page);
-       }
+
+       /*
+        * If the filesystem writes its buffers by hand (eg ext3)
+        * then we can have clean buffers against a dirty page.  We
+        * clean the page here; otherwise the VM will never notice
+        * that the filesystem did any IO at all.
+        *
+        * Also, during truncate, discard_buffer will have marked all
+        * the page's buffers clean.  We discover that here and clean
+        * the page also.
+        *
+        * private_lock must be held over this entire operation in order
+        * to synchronise against __set_page_dirty_buffers and prevent the
+        * dirty bit from being lost.
+        */
+       if (ret)
+               cancel_dirty_page(page, PAGE_CACHE_SIZE);
        spin_unlock(&mapping->private_lock);
 out:
        if (buffers_to_free) {
@@ -2983,7 +2826,7 @@ out:
 }
 EXPORT_SYMBOL(try_to_free_buffers);
 
-int block_sync_page(struct page *page)
+void block_sync_page(struct page *page)
 {
        struct address_space *mapping;
 
@@ -2991,7 +2834,6 @@ int block_sync_page(struct page *page)
        mapping = page_mapping(page);
        if (mapping)
                blk_run_backing_dev(mapping->backing_dev_info, page);
-       return 0;
 }
 
 /*
@@ -3024,7 +2866,7 @@ asmlinkage long sys_bdflush(int func, long data)
 /*
  * Buffer-head allocation
  */
-static kmem_cache_t *bh_cachep;
+static struct kmem_cache *bh_cachep;
 
 /*
  * Once the number of bh's in the machine exceeds this level, we start
@@ -3049,15 +2891,16 @@ static void recalc_bh_state(void)
        if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
                return;
        __get_cpu_var(bh_accounting).ratelimit = 0;
-       for_each_cpu(i)
+       for_each_online_cpu(i)
                tot += per_cpu(bh_accounting, i).nr;
        buffer_heads_over_limit = (tot > max_buffer_heads);
 }
        
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
+       struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
        if (ret) {
+               INIT_LIST_HEAD(&ret->b_assoc_buffers);
                get_cpu_var(bh_accounting).nr++;
                recalc_bh_state();
                put_cpu_var(bh_accounting);
@@ -3076,19 +2919,6 @@ void free_buffer_head(struct buffer_head *bh)
 }
 EXPORT_SYMBOL(free_buffer_head);
 
-static void
-init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
-{
-       if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
-                           SLAB_CTOR_CONSTRUCTOR) {
-               struct buffer_head * bh = (struct buffer_head *)data;
-
-               memset(bh, 0, sizeof(*bh));
-               INIT_LIST_HEAD(&bh->b_assoc_buffers);
-       }
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
 static void buffer_exit_cpu(int cpu)
 {
        int i;
@@ -3098,24 +2928,25 @@ static void buffer_exit_cpu(int cpu)
                brelse(b->bhs[i]);
                b->bhs[i] = NULL;
        }
+       get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+       per_cpu(bh_accounting, cpu).nr = 0;
+       put_cpu_var(bh_accounting);
 }
 
 static int buffer_cpu_notify(struct notifier_block *self,
                              unsigned long action, void *hcpu)
 {
-       if (action == CPU_DEAD)
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
                buffer_exit_cpu((unsigned long)hcpu);
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init buffer_init(void)
 {
        int nrpages;
 
-       bh_cachep = kmem_cache_create("buffer_head",
-                       sizeof(struct buffer_head), 0,
-                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
+       bh_cachep = KMEM_CACHE(buffer_head,
+                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL
@@ -3135,7 +2966,6 @@ EXPORT_SYMBOL(block_sync_page);
 EXPORT_SYMBOL(block_truncate_page);
 EXPORT_SYMBOL(block_write_full_page);
 EXPORT_SYMBOL(cont_prepare_write);
-EXPORT_SYMBOL(end_buffer_async_write);
 EXPORT_SYMBOL(end_buffer_read_sync);
 EXPORT_SYMBOL(end_buffer_write_sync);
 EXPORT_SYMBOL(file_fsync);
@@ -3143,6 +2973,7 @@ EXPORT_SYMBOL(fsync_bdev);
 EXPORT_SYMBOL(generic_block_bmap);
 EXPORT_SYMBOL(generic_commit_write);
 EXPORT_SYMBOL(generic_cont_expand);
+EXPORT_SYMBOL(generic_cont_expand_simple);
 EXPORT_SYMBOL(init_buffer);
 EXPORT_SYMBOL(invalidate_bdev);
 EXPORT_SYMBOL(ll_rw_block);