X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fbuffer.c;h=826baf4f04bc738fdfea27f6652bc3c0bfe9969d;hb=1e88328111aae3ea408f346763ba9f9bad71f876;hp=7cdf48a9a50105c4a074d53f807077c398f4fdb5;hpb=858119e159384308a5dde67776691a2ebf70df0f;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/buffer.c b/fs/buffer.c index 7cdf48a..826baf4 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -18,14 +18,12 @@ * async buffer flushing, 1999 Andrea Arcangeli */ -#include #include #include #include #include #include #include -#include #include #include #include @@ -36,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -44,7 +43,6 @@ #include static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); -static void invalidate_bh_lrus(void); #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) @@ -78,6 +76,7 @@ EXPORT_SYMBOL(__lock_buffer); void fastcall unlock_buffer(struct buffer_head *bh) { + smp_mb__before_clear_bit(); clear_buffer_locked(bh); smp_mb__after_clear_bit(); wake_up_bit(&bh->b_state, BH_Lock); @@ -111,10 +110,14 @@ static void buffer_io_error(struct buffer_head *bh) } /* - * Default synchronous end-of-IO handler.. Just mark it up-to-date and - * unlock the buffer. This is what ll_rw_block uses too. + * End-of-IO handler helper function which does not touch the bh after + * unlocking it. + * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but + * a race there is benign: unlock_buffer() only use the bh's address for + * hashing after unlocking the buffer, so it doesn't actually touch the bh + * itself. */ -void end_buffer_read_sync(struct buffer_head *bh, int uptodate) +static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate) { if (uptodate) { set_buffer_uptodate(bh); @@ -123,6 +126,15 @@ void end_buffer_read_sync(struct buffer_head *bh, int uptodate) clear_buffer_uptodate(bh); } unlock_buffer(bh); +} + +/* + * Default synchronous end-of-IO handler.. Just mark it up-to-date and + * unlock the buffer. This is what ll_rw_block uses too. + */ +void end_buffer_read_sync(struct buffer_head *bh, int uptodate) +{ + __end_buffer_read_notouch(bh, uptodate); put_bh(bh); } @@ -162,27 +174,6 @@ EXPORT_SYMBOL(sync_blockdev); /* * Write out and wait upon all dirty data associated with this - * superblock. Filesystem data as well as the underlying block - * device. Takes the superblock lock. - */ -int fsync_super(struct super_block *sb) -{ - sync_inodes_sb(sb, 0); - DQUOT_SYNC(sb); - lock_super(sb); - if (sb->s_dirt && sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); - sync_inodes_sb(sb, 1); - - return sync_blockdev(sb->s_bdev); -} - -/* - * Write out and wait upon all dirty data associated with this * device. Filesystem data as well as the underlying block * device. Takes the superblock lock. */ @@ -216,19 +207,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) sb->s_frozen = SB_FREEZE_WRITE; smp_wmb(); - sync_inodes_sb(sb, 0); - DQUOT_SYNC(sb); - - lock_super(sb); - if (sb->s_dirt && sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); - - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - - sync_blockdev(sb->s_bdev); - sync_inodes_sb(sb, 1); + __fsync_super(sb); sb->s_frozen = SB_FREEZE_TRANS; smp_wmb(); @@ -269,117 +248,6 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) EXPORT_SYMBOL(thaw_bdev); /* - * sync everything. Start out by waking pdflush, because that writes back - * all queues in parallel. - */ -static void do_sync(unsigned long wait) -{ - wakeup_pdflush(0); - sync_inodes(0); /* All mappings, inodes and their blockdevs */ - DQUOT_SYNC(NULL); - sync_supers(); /* Write the superblocks */ - sync_filesystems(0); /* Start syncing the filesystems */ - sync_filesystems(wait); /* Waitingly sync the filesystems */ - sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */ - if (!wait) - printk("Emergency Sync complete\n"); - if (unlikely(laptop_mode)) - laptop_sync_completion(); -} - -asmlinkage long sys_sync(void) -{ - do_sync(1); - return 0; -} - -void emergency_sync(void) -{ - pdflush_operation(do_sync, 0); -} - -/* - * Generic function to fsync a file. - * - * filp may be NULL if called via the msync of a vma. - */ - -int file_fsync(struct file *filp, struct dentry *dentry, int datasync) -{ - struct inode * inode = dentry->d_inode; - struct super_block * sb; - int ret, err; - - /* sync the inode to buffers */ - ret = write_inode_now(inode, 0); - - /* sync the superblock to buffers */ - sb = inode->i_sb; - lock_super(sb); - if (sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); - - /* .. finally sync the buffers to disk */ - err = sync_blockdev(sb->s_bdev); - if (!ret) - ret = err; - return ret; -} - -static long do_fsync(unsigned int fd, int datasync) -{ - struct file * file; - struct address_space *mapping; - int ret, err; - - ret = -EBADF; - file = fget(fd); - if (!file) - goto out; - - ret = -EINVAL; - if (!file->f_op || !file->f_op->fsync) { - /* Why? We can still call filemap_fdatawrite */ - goto out_putf; - } - - mapping = file->f_mapping; - - current->flags |= PF_SYNCWRITE; - ret = filemap_fdatawrite(mapping); - - /* - * We need to protect against concurrent writers, - * which could cause livelocks in fsync_buffers_list - */ - mutex_lock(&mapping->host->i_mutex); - err = file->f_op->fsync(file, file->f_dentry, datasync); - if (!ret) - ret = err; - mutex_unlock(&mapping->host->i_mutex); - err = filemap_fdatawait(mapping); - if (!ret) - ret = err; - current->flags &= ~PF_SYNCWRITE; - -out_putf: - fput(file); -out: - return ret; -} - -asmlinkage long sys_fsync(unsigned int fd) -{ - return do_fsync(fd, 0); -} - -asmlinkage long sys_fdatasync(unsigned int fd) -{ - return do_fsync(fd, 1); -} - -/* * Various filesystems appear to want __find_get_block to be non-blocking. * But it's the page lock which protects the buffers. To get around this, * we get exclusion from try_to_free_buffers with the blockdev mapping's @@ -431,8 +299,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) if (all_mapped) { printk("__find_get_block_slow() failed. " "block=%llu, b_blocknr=%llu\n", - (unsigned long long)block, (unsigned long long)bh->b_blocknr); - printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size); + (unsigned long long)block, + (unsigned long long)bh->b_blocknr); + printk("b_state=0x%08lx, b_size=%zu\n", + bh->b_state, bh->b_size); printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); } out_unlock: @@ -474,15 +344,15 @@ out: we think the disk contains more recent information than the buffercache. The update == 1 pass marks the buffers we need to update, the update == 2 pass does the actual I/O. */ -void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers) +void invalidate_bdev(struct block_device *bdev) { + struct address_space *mapping = bdev->bd_inode->i_mapping; + + if (mapping->nrpages == 0) + return; + invalidate_bh_lrus(); - /* - * FIXME: what about destroy_dirty_buffers? - * We really want to use invalidate_inode_pages2() for - * that, but not until that's cleaned up. - */ - invalidate_inode_pages(bdev->bd_inode->i_mapping); + invalidate_mapping_pages(mapping, 0, -1); } /* @@ -496,10 +366,10 @@ static void free_more_memory(void) wakeup_pdflush(1024); yield(); - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; if (*zones) - try_to_free_pages(zones, GFP_NOFS); + try_to_free_pages(zones, 0, GFP_NOFS); } } @@ -569,7 +439,7 @@ still_busy: * Completion handler for block_write_full_page() - pages which are unlocked * during I/O, and which have PageWriteback cleared upon I/O completion. */ -void end_buffer_async_write(struct buffer_head *bh, int uptodate) +static void end_buffer_async_write(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; unsigned long flags; @@ -590,6 +460,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) bdevname(bh->b_bdev, b)); } set_bit(AS_EIO, &page->mapping->flags); + set_buffer_write_io_error(bh); clear_buffer_uptodate(bh); SetPageError(page); } @@ -709,6 +580,10 @@ EXPORT_SYMBOL(mark_buffer_async_write); static inline void __remove_assoc_queue(struct buffer_head *bh) { list_del_init(&bh->b_assoc_buffers); + WARN_ON(!bh->b_assoc_map); + if (buffer_write_io_error(bh)) + set_bit(AS_EIO, &bh->b_assoc_map->flags); + bh->b_assoc_map = NULL; } int inode_has_buffers(struct inode *inode) @@ -801,19 +676,54 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) if (!mapping->assoc_mapping) { mapping->assoc_mapping = buffer_mapping; } else { - if (mapping->assoc_mapping != buffer_mapping) - BUG(); + BUG_ON(mapping->assoc_mapping != buffer_mapping); } if (list_empty(&bh->b_assoc_buffers)) { spin_lock(&buffer_mapping->private_lock); list_move_tail(&bh->b_assoc_buffers, &mapping->private_list); + bh->b_assoc_map = mapping; spin_unlock(&buffer_mapping->private_lock); } } EXPORT_SYMBOL(mark_buffer_dirty_inode); /* + * Mark the page dirty, and set it dirty in the radix tree, and mark the inode + * dirty. + * + * If warn is true, then emit a warning if the page is not uptodate and has + * not been truncated. + */ +static int __set_page_dirty(struct page *page, + struct address_space *mapping, int warn) +{ + if (unlikely(!mapping)) + return !TestSetPageDirty(page); + + if (TestSetPageDirty(page)) + return 0; + + write_lock_irq(&mapping->tree_lock); + if (page->mapping) { /* Race with truncate? */ + WARN_ON_ONCE(warn && !PageUptodate(page)); + + if (mapping_cap_account_dirty(mapping)) { + __inc_zone_page_state(page, NR_FILE_DIRTY); + __inc_bdi_stat(mapping->backing_dev_info, + BDI_RECLAIMABLE); + task_io_account_write(PAGE_CACHE_SIZE); + } + radix_tree_tag_set(&mapping->page_tree, + page_index(page), PAGECACHE_TAG_DIRTY); + } + write_unlock_irq(&mapping->tree_lock); + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + + return 1; +} + +/* * Add a page to the dirty page list. * * It is a sad fact of life that this function is called from several places @@ -840,7 +750,10 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); */ int __set_page_dirty_buffers(struct page *page) { - struct address_space * const mapping = page->mapping; + struct address_space *mapping = page_mapping(page); + + if (unlikely(!mapping)) + return !TestSetPageDirty(page); spin_lock(&mapping->private_lock); if (page_has_buffers(page)) { @@ -854,20 +767,7 @@ int __set_page_dirty_buffers(struct page *page) } spin_unlock(&mapping->private_lock); - if (!TestSetPageDirty(page)) { - write_lock_irq(&mapping->tree_lock); - if (page->mapping) { /* Race with truncate? */ - if (mapping_cap_account_dirty(mapping)) - inc_page_state(nr_dirty); - radix_tree_tag_set(&mapping->page_tree, - page_index(page), - PAGECACHE_TAG_DIRTY); - } - write_unlock_irq(&mapping->tree_lock); - __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); - } - - return 0; + return __set_page_dirty(page, mapping, 1); } EXPORT_SYMBOL(__set_page_dirty_buffers); @@ -901,7 +801,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) spin_lock(lock); while (!list_empty(list)) { bh = BH_ENTRY(list->next); - list_del_init(&bh->b_assoc_buffers); + __remove_assoc_queue(bh); if (buffer_dirty(bh) || buffer_locked(bh)) { list_add(&bh->b_assoc_buffers, &tmp); if (buffer_dirty(bh)) { @@ -922,7 +822,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) while (!list_empty(&tmp)) { bh = BH_ENTRY(tmp.prev); - __remove_assoc_queue(bh); + list_del_init(&bh->b_assoc_buffers); get_bh(bh); spin_unlock(lock); wait_on_buffer(bh); @@ -1022,12 +922,13 @@ try_again: bh->b_state = 0; atomic_set(&bh->b_count, 0); + bh->b_private = NULL; bh->b_size = size; /* Link the buffer to its page */ set_bh_page(bh, page, offset); - bh->b_end_io = NULL; + init_buffer(bh, NULL, NULL); } return head; /* @@ -1114,12 +1015,12 @@ grow_dev_page(struct block_device *bdev, sector_t block, struct page *page; struct buffer_head *bh; - page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); + page = find_or_create_page(inode->i_mapping, index, + (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); if (!page) return NULL; - if (!PageLocked(page)) - BUG(); + BUG_ON(!PageLocked(page)); if (page_has_buffers(page)) { bh = page_buffers(page); @@ -1159,11 +1060,6 @@ failed: /* * Create buffers for the specified block device block's page. If * that page was dirty, the buffers are set dirty also. - * - * Except that's a bug. Attaching dirty buffers to a dirty - * blockdev's page can result in filesystem corruption, because - * some of those buffers may be aliases of filesystem data. - * grow_dev_page() will go BUG() if this happens. */ static int grow_buffers(struct block_device *bdev, sector_t block, int size) @@ -1178,8 +1074,21 @@ grow_buffers(struct block_device *bdev, sector_t block, int size) } while ((size << sizebits) < PAGE_SIZE); index = block >> sizebits; - block = index << sizebits; + /* + * Check for a block which wants to lie outside our maximum possible + * pagecache index. (this comparison is done using sector_t types). + */ + if (unlikely(index != block >> sizebits)) { + char b[BDEVNAME_SIZE]; + + printk(KERN_ERR "%s: requested out-of-range block %llu for " + "device %s\n", + __FUNCTION__, (unsigned long long)block, + bdevname(bdev, b)); + return -EIO; + } + block = index << sizebits; /* Create a page with the proper size buffers.. */ page = grow_dev_page(bdev, block, index, size); if (!page) @@ -1206,12 +1115,16 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) for (;;) { struct buffer_head * bh; + int ret; bh = __find_get_block(bdev, block, size); if (bh) return bh; - if (!grow_buffers(bdev, block, size)) + ret = grow_buffers(bdev, block, size); + if (ret < 0) + return NULL; + if (ret == 0) free_more_memory(); } } @@ -1253,8 +1166,9 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) */ void fastcall mark_buffer_dirty(struct buffer_head *bh) { + WARN_ON_ONCE(!buffer_uptodate(bh)); if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) - __set_page_dirty_nobuffers(bh->b_page); + __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); } /* @@ -1286,6 +1200,7 @@ void __bforget(struct buffer_head *bh) spin_lock(&buffer_mapping->private_lock); list_del_init(&bh->b_assoc_buffers); + bh->b_assoc_map = NULL; spin_unlock(&buffer_mapping->private_lock); } __brelse(bh); @@ -1392,11 +1307,11 @@ static void bh_lru_install(struct buffer_head *bh) * Look up the bh in this cpu's LRU. If it's there, move it to the head. */ static struct buffer_head * -lookup_bh_lru(struct block_device *bdev, sector_t block, int size) +lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *ret = NULL; struct bh_lru *lru; - int i; + unsigned int i; check_irqs_on(); bh_lru_lock(); @@ -1428,7 +1343,7 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, int size) * NULL */ struct buffer_head * -__find_get_block(struct block_device *bdev, sector_t block, int size) +__find_get_block(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = lookup_bh_lru(bdev, block, size); @@ -1456,7 +1371,7 @@ EXPORT_SYMBOL(__find_get_block); * attempt is failing. FIXME, perhaps? */ struct buffer_head * -__getblk(struct block_device *bdev, sector_t block, int size) +__getblk(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = __find_get_block(bdev, block, size); @@ -1470,7 +1385,7 @@ EXPORT_SYMBOL(__getblk); /* * Do async read-ahead on a buffer.. */ -void __breadahead(struct block_device *bdev, sector_t block, int size) +void __breadahead(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = __getblk(bdev, block, size); if (likely(bh)) { @@ -1490,7 +1405,7 @@ EXPORT_SYMBOL(__breadahead); * It returns NULL if the block was unreadable. */ struct buffer_head * -__bread(struct block_device *bdev, sector_t block, int size) +__bread(struct block_device *bdev, sector_t block, unsigned size) { struct buffer_head *bh = __getblk(bdev, block, size); @@ -1517,7 +1432,7 @@ static void invalidate_bh_lru(void *arg) put_cpu_var(bh_lrus); } -static void invalidate_bh_lrus(void) +void invalidate_bh_lrus(void) { on_each_cpu(invalidate_bh_lru, NULL, 1, 1); } @@ -1526,8 +1441,7 @@ void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) { bh->b_page = page; - if (offset >= PAGE_SIZE) - BUG(); + BUG_ON(offset >= PAGE_SIZE); if (PageHighMem(page)) /* * This catches illegal uses and preserves the offset: @@ -1550,39 +1464,11 @@ static void discard_buffer(struct buffer_head * bh) clear_buffer_req(bh); clear_buffer_new(bh); clear_buffer_delay(bh); + clear_buffer_unwritten(bh); unlock_buffer(bh); } /** - * try_to_release_page() - release old fs-specific metadata on a page - * - * @page: the page which the kernel is trying to free - * @gfp_mask: memory allocation flags (and I/O mode) - * - * The address_space is to try to release any data against the page - * (presumably at page->private). If the release was successful, return `1'. - * Otherwise return zero. - * - * The @gfp_mask argument specifies whether I/O may be performed to release - * this page (__GFP_IO), and whether the call may block (__GFP_WAIT). - * - * NOTE: @gfp_mask may go away, and this function may become non-blocking. - */ -int try_to_release_page(struct page *page, gfp_t gfp_mask) -{ - struct address_space * const mapping = page->mapping; - - BUG_ON(!PageLocked(page)); - if (PageWriteback(page)) - return 0; - - if (mapping && mapping->a_ops->releasepage) - return mapping->a_ops->releasepage(page, gfp_mask); - return try_to_free_buffers(page); -} -EXPORT_SYMBOL(try_to_release_page); - -/** * block_invalidatepage - invalidate part of all of a buffer-backed page * * @page: the page which is affected @@ -1597,11 +1483,10 @@ EXPORT_SYMBOL(try_to_release_page); * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ -int block_invalidatepage(struct page *page, unsigned long offset) +void block_invalidatepage(struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; - int ret = 1; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) @@ -1628,21 +1513,12 @@ int block_invalidatepage(struct page *page, unsigned long offset) * so real IO is not possible anymore. */ if (offset == 0) - ret = try_to_release_page(page, 0); + try_to_release_page(page, 0); out: - return ret; + return; } EXPORT_SYMBOL(block_invalidatepage); -int do_invalidatepage(struct page *page, unsigned long offset) -{ - int (*invalidatepage)(struct page *, unsigned long); - invalidatepage = page->mapping->a_ops->invalidatepage; - if (invalidatepage == NULL) - invalidatepage = block_invalidatepage; - return (*invalidatepage)(page, offset); -} - /* * We attach and possibly dirty the buffers atomically wrt * __set_page_dirty_buffers() via private_lock. try_to_free_buffers @@ -1742,6 +1618,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, sector_t block; sector_t last_block; struct buffer_head *bh, *head; + const unsigned blocksize = 1 << inode->i_blkbits; int nr_underway = 0; BUG_ON(!PageLocked(page)); @@ -1749,7 +1626,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; if (!page_has_buffers(page)) { - create_empty_buffers(page, 1 << inode->i_blkbits, + create_empty_buffers(page, blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } @@ -1784,6 +1661,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, clear_buffer_dirty(bh); set_buffer_uptodate(bh); } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) goto recover; @@ -1846,22 +1724,12 @@ done: * clean. Someone wrote them back by hand with * ll_rw_block/submit_bh. A rare case. */ - int uptodate = 1; - do { - if (!buffer_uptodate(bh)) { - uptodate = 0; - break; - } - bh = bh->b_this_page; - } while (bh != head); - if (uptodate) - SetPageUptodate(page); end_page_writeback(page); + /* * The page and buffer_heads can be released at any time from * here on. */ - wbc->pages_skipped++; /* We didn't write this page */ } return err; @@ -1888,8 +1756,8 @@ recover: } while ((bh = bh->b_this_page) != head); SetPageError(page); BUG_ON(PageWriteback(page)); + mapping_set_error(page->mapping, err); set_page_writeback(page); - unlock_page(page); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { @@ -1899,9 +1767,52 @@ recover: } bh = next; } while (bh != head); + unlock_page(page); goto done; } +/* + * If a page has any new buffers, zero them out here, and mark them uptodate + * and dirty so they'll be written out (in order to prevent uninitialised + * block data from leaking). And clear the new bit. + */ +void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) +{ + unsigned int block_start, block_end; + struct buffer_head *head, *bh; + + BUG_ON(!PageLocked(page)); + if (!page_has_buffers(page)) + return; + + bh = head = page_buffers(page); + block_start = 0; + do { + block_end = block_start + bh->b_size; + + if (buffer_new(bh)) { + if (block_end > from && block_start < to) { + if (!PageUptodate(page)) { + unsigned start, size; + + start = max(from, block_start); + size = min(to, block_end) - start; + + zero_user(page, start, size); + set_buffer_uptodate(bh); + } + + clear_buffer_new(bh); + mark_buffer_dirty(bh); + } + } + + block_start = block_end; + bh = bh->b_this_page; + } while (bh != head); +} +EXPORT_SYMBOL(page_zero_new_buffers); + static int __block_prepare_write(struct inode *inode, struct page *page, unsigned from, unsigned to, get_block_t *get_block) { @@ -1937,6 +1848,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page, if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) break; @@ -1944,22 +1856,15 @@ static int __block_prepare_write(struct inode *inode, struct page *page, unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); if (PageUptodate(page)) { + clear_buffer_new(bh); set_buffer_uptodate(bh); + mark_buffer_dirty(bh); continue; } - if (block_end > to || block_start < from) { - void *kaddr; - - kaddr = kmap_atomic(page, KM_USER0); - if (block_end > to) - memset(kaddr+to, 0, - block_end-to); - if (block_start < from) - memset(kaddr+block_start, - 0, from-block_start); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); - } + if (block_end > to || block_start < from) + zero_user_segments(page, + to, block_end, + block_start, from); continue; } } @@ -1969,6 +1874,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page, continue; } if (!buffer_uptodate(bh) && !buffer_delay(bh) && + !buffer_unwritten(bh) && (block_start < from || block_end > to)) { ll_rw_block(READ, 1, &bh); *wait_bh++=bh; @@ -1982,42 +1888,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page, if (!buffer_uptodate(*wait_bh)) err = -EIO; } - if (!err) { - bh = head; - do { - if (buffer_new(bh)) - clear_buffer_new(bh); - } while ((bh = bh->b_this_page) != head); - return 0; - } - /* Error case: */ - /* - * Zero out any newly allocated blocks to avoid exposing stale - * data. If BH_New is set, we know that the block was newly - * allocated in the above loop. - */ - bh = head; - block_start = 0; - do { - block_end = block_start+blocksize; - if (block_end <= from) - goto next_bh; - if (block_start >= to) - break; - if (buffer_new(bh)) { - void *kaddr; - - clear_buffer_new(bh); - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr+block_start, 0, bh->b_size); - kunmap_atomic(kaddr, KM_USER0); - set_buffer_uptodate(bh); - mark_buffer_dirty(bh); - } -next_bh: - block_start = block_end; - bh = bh->b_this_page; - } while (bh != head); + if (unlikely(err)) + page_zero_new_buffers(page, from, to); return err; } @@ -2042,6 +1914,7 @@ static int __block_commit_write(struct inode *inode, struct page *page, set_buffer_uptodate(bh); mark_buffer_dirty(bh); } + clear_buffer_new(bh); } /* @@ -2056,6 +1929,130 @@ static int __block_commit_write(struct inode *inode, struct page *page, } /* + * block_write_begin takes care of the basic task of block allocation and + * bringing partial write blocks uptodate first. + * + * If *pagep is not NULL, then block_write_begin uses the locked page + * at *pagep rather than allocating its own. In this case, the page will + * not be unlocked or deallocated on failure. + */ +int block_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata, + get_block_t *get_block) +{ + struct inode *inode = mapping->host; + int status = 0; + struct page *page; + pgoff_t index; + unsigned start, end; + int ownpage = 0; + + index = pos >> PAGE_CACHE_SHIFT; + start = pos & (PAGE_CACHE_SIZE - 1); + end = start + len; + + page = *pagep; + if (page == NULL) { + ownpage = 1; + page = __grab_cache_page(mapping, index); + if (!page) { + status = -ENOMEM; + goto out; + } + *pagep = page; + } else + BUG_ON(!PageLocked(page)); + + status = __block_prepare_write(inode, page, start, end, get_block); + if (unlikely(status)) { + ClearPageUptodate(page); + + if (ownpage) { + unlock_page(page); + page_cache_release(page); + *pagep = NULL; + + /* + * prepare_write() may have instantiated a few blocks + * outside i_size. Trim these off again. Don't need + * i_size_read because we hold i_mutex. + */ + if (pos + len > inode->i_size) + vmtruncate(inode, inode->i_size); + } + goto out; + } + +out: + return status; +} +EXPORT_SYMBOL(block_write_begin); + +int block_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + unsigned start; + + start = pos & (PAGE_CACHE_SIZE - 1); + + if (unlikely(copied < len)) { + /* + * The buffers that were written will now be uptodate, so we + * don't have to worry about a readpage reading them and + * overwriting a partial write. However if we have encountered + * a short write and only partially written into a buffer, it + * will not be marked uptodate, so a readpage might come in and + * destroy our partial write. + * + * Do the simplest thing, and just treat any short write to a + * non uptodate page as a zero-length write, and force the + * caller to redo the whole thing. + */ + if (!PageUptodate(page)) + copied = 0; + + page_zero_new_buffers(page, start+copied, start+len); + } + flush_dcache_page(page); + + /* This could be a short (even 0-length) commit */ + __block_commit_write(inode, page, start, start+copied); + + return copied; +} +EXPORT_SYMBOL(block_write_end); + +int generic_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + + copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); + + /* + * No need to use i_size_read() here, the i_size + * cannot change under us because we hold i_mutex. + * + * But it's important to update i_size while still holding page lock: + * page writeout could otherwise come in and zero beyond i_size. + */ + if (pos+copied > inode->i_size) { + i_size_write(inode, pos+copied); + mark_inode_dirty(inode); + } + + unlock_page(page); + page_cache_release(page); + + return copied; +} +EXPORT_SYMBOL(generic_write_end); + +/* * Generic "read page" function for block devices that have the normal * get_block functionality. This is most of the block device filesystems. * Reads the page asynchronously --- the unlock_buffer() and @@ -2092,15 +2089,13 @@ int block_read_full_page(struct page *page, get_block_t *get_block) fully_mapped = 0; if (iblock < lblock) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) SetPageError(page); } if (!buffer_mapped(bh)) { - void *kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + i * blocksize, 0, blocksize); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user(page, i * blocksize, blocksize); if (!err) set_buffer_uptodate(bh); continue; @@ -2152,14 +2147,14 @@ int block_read_full_page(struct page *page, get_block_t *get_block) } /* utility function for filesystems that need to do work on expanding - * truncates. Uses prepare/commit_write to allow the filesystem to + * truncates. Uses filesystem pagecache writes to allow the filesystem to * deal with the hole. */ -static int __generic_cont_expand(struct inode *inode, loff_t size, - pgoff_t index, unsigned int offset) +int generic_cont_expand_simple(struct inode *inode, loff_t size) { struct address_space *mapping = inode->i_mapping; struct page *page; + void *fsdata; unsigned long limit; int err; @@ -2172,146 +2167,115 @@ static int __generic_cont_expand(struct inode *inode, loff_t size, if (size > inode->i_sb->s_maxbytes) goto out; - err = -ENOMEM; - page = grab_cache_page(mapping, index); - if (!page) + err = pagecache_write_begin(NULL, mapping, size, 0, + AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND, + &page, &fsdata); + if (err) goto out; - err = mapping->a_ops->prepare_write(NULL, page, offset, offset); - if (err) { - /* - * ->prepare_write() may have instantiated a few blocks - * outside i_size. Trim these off again. - */ - unlock_page(page); - page_cache_release(page); - vmtruncate(inode, inode->i_size); - goto out; - } - err = mapping->a_ops->commit_write(NULL, page, offset, offset); + err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); + BUG_ON(err > 0); - unlock_page(page); - page_cache_release(page); - if (err > 0) - err = 0; out: return err; } -int generic_cont_expand(struct inode *inode, loff_t size) +int cont_expand_zero(struct file *file, struct address_space *mapping, + loff_t pos, loff_t *bytes) { - pgoff_t index; - unsigned int offset; + struct inode *inode = mapping->host; + unsigned blocksize = 1 << inode->i_blkbits; + struct page *page; + void *fsdata; + pgoff_t index, curidx; + loff_t curpos; + unsigned zerofrom, offset, len; + int err = 0; - offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */ + index = pos >> PAGE_CACHE_SHIFT; + offset = pos & ~PAGE_CACHE_MASK; - /* ugh. in prepare/commit_write, if from==to==start of block, we - ** skip the prepare. make sure we never send an offset for the start - ** of a block - */ - if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) { - /* caller must handle this extra byte. */ - offset++; - } - index = size >> PAGE_CACHE_SHIFT; + while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { + zerofrom = curpos & ~PAGE_CACHE_MASK; + if (zerofrom & (blocksize-1)) { + *bytes |= (blocksize-1); + (*bytes)++; + } + len = PAGE_CACHE_SIZE - zerofrom; - return __generic_cont_expand(inode, size, index, offset); -} + err = pagecache_write_begin(file, mapping, curpos, len, + AOP_FLAG_UNINTERRUPTIBLE, + &page, &fsdata); + if (err) + goto out; + zero_user(page, zerofrom, len); + err = pagecache_write_end(file, mapping, curpos, len, len, + page, fsdata); + if (err < 0) + goto out; + BUG_ON(err != len); + err = 0; + } -int generic_cont_expand_simple(struct inode *inode, loff_t size) -{ - loff_t pos = size - 1; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1; + /* page covers the boundary, find the boundary offset */ + if (index == curidx) { + zerofrom = curpos & ~PAGE_CACHE_MASK; + /* if we will expand the thing last block will be filled */ + if (offset <= zerofrom) { + goto out; + } + if (zerofrom & (blocksize-1)) { + *bytes |= (blocksize-1); + (*bytes)++; + } + len = offset - zerofrom; - /* prepare/commit_write can handle even if from==to==start of block. */ - return __generic_cont_expand(inode, size, index, offset); + err = pagecache_write_begin(file, mapping, curpos, len, + AOP_FLAG_UNINTERRUPTIBLE, + &page, &fsdata); + if (err) + goto out; + zero_user(page, zerofrom, len); + err = pagecache_write_end(file, mapping, curpos, len, len, + page, fsdata); + if (err < 0) + goto out; + BUG_ON(err != len); + err = 0; + } +out: + return err; } /* * For moronic filesystems that do not allow holes in file. * We may have to extend the file. */ - -int cont_prepare_write(struct page *page, unsigned offset, - unsigned to, get_block_t *get_block, loff_t *bytes) +int cont_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata, + get_block_t *get_block, loff_t *bytes) { - struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; - struct page *new_page; - pgoff_t pgpos; - long status; - unsigned zerofrom; unsigned blocksize = 1 << inode->i_blkbits; - void *kaddr; - - while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) { - status = -ENOMEM; - new_page = grab_cache_page(mapping, pgpos); - if (!new_page) - goto out; - /* we might sleep */ - if (*bytes>>PAGE_CACHE_SHIFT != pgpos) { - unlock_page(new_page); - page_cache_release(new_page); - continue; - } - zerofrom = *bytes & ~PAGE_CACHE_MASK; - if (zerofrom & (blocksize-1)) { - *bytes |= (blocksize-1); - (*bytes)++; - } - status = __block_prepare_write(inode, new_page, zerofrom, - PAGE_CACHE_SIZE, get_block); - if (status) - goto out_unmap; - kaddr = kmap_atomic(new_page, KM_USER0); - memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom); - flush_dcache_page(new_page); - kunmap_atomic(kaddr, KM_USER0); - generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE); - unlock_page(new_page); - page_cache_release(new_page); - } - - if (page->index < pgpos) { - /* completely inside the area */ - zerofrom = offset; - } else { - /* page covers the boundary, find the boundary offset */ - zerofrom = *bytes & ~PAGE_CACHE_MASK; + unsigned zerofrom; + int err; - /* if we will expand the thing last block will be filled */ - if (to > zerofrom && (zerofrom & (blocksize-1))) { - *bytes |= (blocksize-1); - (*bytes)++; - } + err = cont_expand_zero(file, mapping, pos, bytes); + if (err) + goto out; - /* starting below the boundary? Nothing to zero out */ - if (offset <= zerofrom) - zerofrom = offset; - } - status = __block_prepare_write(inode, page, zerofrom, to, get_block); - if (status) - goto out1; - if (zerofrom < offset) { - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr+zerofrom, 0, offset-zerofrom); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); - __block_commit_write(inode, page, zerofrom, offset); + zerofrom = *bytes & ~PAGE_CACHE_MASK; + if (pos+len > *bytes && zerofrom & (blocksize-1)) { + *bytes |= (blocksize-1); + (*bytes)++; } - return 0; -out1: - ClearPageUptodate(page); - return status; -out_unmap: - ClearPageUptodate(new_page); - unlock_page(new_page); - page_cache_release(new_page); + *pagep = NULL; + err = block_write_begin(file, mapping, pos, len, + flags, pagep, fsdata, get_block); out: - return status; + return err; } int block_prepare_write(struct page *page, unsigned from, unsigned to, @@ -2348,139 +2312,200 @@ int generic_commit_write(struct file *file, struct page *page, return 0; } +/* + * block_page_mkwrite() is not allowed to change the file size as it gets + * called from a page fault handler when a page is first dirtied. Hence we must + * be careful to check for EOF conditions here. We set the page up correctly + * for a written page which means we get ENOSPC checking when writing into + * holes and correct delalloc and unwritten extent mapping on filesystems that + * support these features. + * + * We are not allowed to take the i_mutex here so we have to play games to + * protect against truncate races as the page could now be beyond EOF. Because + * vmtruncate() writes the inode size before removing pages, once we have the + * page lock we can determine safely if the page is beyond EOF. If it is not + * beyond EOF, then the page is guaranteed safe against truncation until we + * unlock the page. + */ +int +block_page_mkwrite(struct vm_area_struct *vma, struct page *page, + get_block_t get_block) +{ + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; + unsigned long end; + loff_t size; + int ret = -EINVAL; + + lock_page(page); + size = i_size_read(inode); + if ((page->mapping != inode->i_mapping) || + (page_offset(page) > size)) { + /* page got truncated out from underneath us */ + goto out_unlock; + } + + /* page is wholly or partially inside EOF */ + if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) + end = size & ~PAGE_CACHE_MASK; + else + end = PAGE_CACHE_SIZE; + + ret = block_prepare_write(page, 0, end, get_block); + if (!ret) + ret = block_commit_write(page, 0, end); + +out_unlock: + unlock_page(page); + return ret; +} /* - * nobh_prepare_write()'s prereads are special: the buffer_heads are freed + * nobh_write_begin()'s prereads are special: the buffer_heads are freed * immediately, while under the page lock. So it needs a special end_io * handler which does not touch the bh after unlocking it. - * - * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but - * a race there is benign: unlock_buffer() only use the bh's address for - * hashing after unlocking the buffer, so it doesn't actually touch the bh - * itself. */ static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate) { - if (uptodate) { - set_buffer_uptodate(bh); - } else { - /* This happens, due to failed READA attempts. */ - clear_buffer_uptodate(bh); - } - unlock_buffer(bh); + __end_buffer_read_notouch(bh, uptodate); +} + +/* + * Attach the singly-linked list of buffers created by nobh_write_begin, to + * the page (converting it to circular linked list and taking care of page + * dirty races). + */ +static void attach_nobh_buffers(struct page *page, struct buffer_head *head) +{ + struct buffer_head *bh; + + BUG_ON(!PageLocked(page)); + + spin_lock(&page->mapping->private_lock); + bh = head; + do { + if (PageDirty(page)) + set_buffer_dirty(bh); + if (!bh->b_this_page) + bh->b_this_page = head; + bh = bh->b_this_page; + } while (bh != head); + attach_page_buffers(page, head); + spin_unlock(&page->mapping->private_lock); } /* * On entry, the page is fully not uptodate. * On exit the page is fully uptodate in the areas outside (from,to) */ -int nobh_prepare_write(struct page *page, unsigned from, unsigned to, +int nobh_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata, get_block_t *get_block) { - struct inode *inode = page->mapping->host; + struct inode *inode = mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocksize = 1 << blkbits; - struct buffer_head map_bh; - struct buffer_head *read_bh[MAX_BUF_PER_PAGE]; + struct buffer_head *head, *bh; + struct page *page; + pgoff_t index; + unsigned from, to; unsigned block_in_page; - unsigned block_start; + unsigned block_start, block_end; sector_t block_in_file; - char *kaddr; int nr_reads = 0; - int i; int ret = 0; int is_mapped_to_disk = 1; - int dirtied_it = 0; + + index = pos >> PAGE_CACHE_SHIFT; + from = pos & (PAGE_CACHE_SIZE - 1); + to = from + len; + + page = __grab_cache_page(mapping, index); + if (!page) + return -ENOMEM; + *pagep = page; + *fsdata = NULL; + + if (page_has_buffers(page)) { + unlock_page(page); + page_cache_release(page); + *pagep = NULL; + return block_write_begin(file, mapping, pos, len, flags, pagep, + fsdata, get_block); + } if (PageMappedToDisk(page)) return 0; + /* + * Allocate buffers so that we can keep track of state, and potentially + * attach them to the page if an error occurs. In the common case of + * no error, they will just be freed again without ever being attached + * to the page (which is all OK, because we're under the page lock). + * + * Be careful: the buffer linked list is a NULL terminated one, rather + * than the circular one we're used to. + */ + head = alloc_page_buffers(page, blocksize, 0); + if (!head) { + ret = -ENOMEM; + goto out_release; + } + block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); - map_bh.b_page = page; /* * We loop across all blocks in the page, whether or not they are * part of the affected region. This is so we can discover if the * page is fully mapped-to-disk. */ - for (block_start = 0, block_in_page = 0; + for (block_start = 0, block_in_page = 0, bh = head; block_start < PAGE_CACHE_SIZE; - block_in_page++, block_start += blocksize) { - unsigned block_end = block_start + blocksize; + block_in_page++, block_start += blocksize, bh = bh->b_this_page) { int create; - map_bh.b_state = 0; + block_end = block_start + blocksize; + bh->b_state = 0; create = 1; if (block_start >= to) create = 0; ret = get_block(inode, block_in_file + block_in_page, - &map_bh, create); + bh, create); if (ret) goto failed; - if (!buffer_mapped(&map_bh)) + if (!buffer_mapped(bh)) is_mapped_to_disk = 0; - if (buffer_new(&map_bh)) - unmap_underlying_metadata(map_bh.b_bdev, - map_bh.b_blocknr); - if (PageUptodate(page)) + if (buffer_new(bh)) + unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); + if (PageUptodate(page)) { + set_buffer_uptodate(bh); continue; - if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) { - kaddr = kmap_atomic(page, KM_USER0); - if (block_start < from) { - memset(kaddr+block_start, 0, from-block_start); - dirtied_it = 1; - } - if (block_end > to) { - memset(kaddr + to, 0, block_end - to); - dirtied_it = 1; - } - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + } + if (buffer_new(bh) || !buffer_mapped(bh)) { + zero_user_segments(page, block_start, from, + to, block_end); continue; } - if (buffer_uptodate(&map_bh)) + if (buffer_uptodate(bh)) continue; /* reiserfs does this */ if (block_start < from || block_end > to) { - struct buffer_head *bh = alloc_buffer_head(GFP_NOFS); - - if (!bh) { - ret = -ENOMEM; - goto failed; - } - bh->b_state = map_bh.b_state; - atomic_set(&bh->b_count, 0); - bh->b_this_page = NULL; - bh->b_page = page; - bh->b_blocknr = map_bh.b_blocknr; - bh->b_size = blocksize; - bh->b_data = (char *)(long)block_start; - bh->b_bdev = map_bh.b_bdev; - bh->b_private = NULL; - read_bh[nr_reads++] = bh; + lock_buffer(bh); + bh->b_end_io = end_buffer_read_nobh; + submit_bh(READ, bh); + nr_reads++; } } if (nr_reads) { - struct buffer_head *bh; - /* * The page is locked, so these buffers are protected from * any VM or truncate activity. Hence we don't need to care * for the buffer_head refcounts. */ - for (i = 0; i < nr_reads; i++) { - bh = read_bh[i]; - lock_buffer(bh); - bh->b_end_io = end_buffer_read_nobh; - submit_bh(READ, bh); - } - for (i = 0; i < nr_reads; i++) { - bh = read_bh[i]; + for (bh = head; bh; bh = bh->b_this_page) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) ret = -EIO; - free_buffer_head(bh); - read_bh[i] = NULL; } if (ret) goto failed; @@ -2488,53 +2513,70 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to, if (is_mapped_to_disk) SetPageMappedToDisk(page); - SetPageUptodate(page); - /* - * Setting the page dirty here isn't necessary for the prepare_write - * function - commit_write will do that. But if/when this function is - * used within the pagefault handler to ensure that all mmapped pages - * have backing space in the filesystem, we will need to dirty the page - * if its contents were altered. - */ - if (dirtied_it) - set_page_dirty(page); + *fsdata = head; /* to be released by nobh_write_end */ return 0; failed: - for (i = 0; i < nr_reads; i++) { - if (read_bh[i]) - free_buffer_head(read_bh[i]); - } - + BUG_ON(!ret); /* - * Error recovery is pretty slack. Clear the page and mark it dirty - * so we'll later zero out any blocks which _were_ allocated. + * Error recovery is a bit difficult. We need to zero out blocks that + * were newly allocated, and dirty them to ensure they get written out. + * Buffers need to be attached to the page at this point, otherwise + * the handling of potential IO errors during writeout would be hard + * (could try doing synchronous writeout, but what if that fails too?) */ - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr, 0, PAGE_CACHE_SIZE); - kunmap_atomic(kaddr, KM_USER0); - SetPageUptodate(page); - set_page_dirty(page); + attach_nobh_buffers(page, head); + page_zero_new_buffers(page, from, to); + +out_release: + unlock_page(page); + page_cache_release(page); + *pagep = NULL; + + if (pos + len > inode->i_size) + vmtruncate(inode, inode->i_size); + return ret; } -EXPORT_SYMBOL(nobh_prepare_write); +EXPORT_SYMBOL(nobh_write_begin); -int nobh_commit_write(struct file *file, struct page *page, - unsigned from, unsigned to) +int nobh_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; - loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; + struct buffer_head *head = fsdata; + struct buffer_head *bh; + + if (!PageMappedToDisk(page)) { + if (unlikely(copied < len) && !page_has_buffers(page)) + attach_nobh_buffers(page, head); + if (page_has_buffers(page)) + return generic_write_end(file, mapping, pos, len, + copied, page, fsdata); + } + SetPageUptodate(page); set_page_dirty(page); - if (pos > inode->i_size) { - i_size_write(inode, pos); + if (pos+copied > inode->i_size) { + i_size_write(inode, pos+copied); mark_inode_dirty(inode); } - return 0; + + unlock_page(page); + page_cache_release(page); + + while (head) { + bh = head; + head = head->b_this_page; + free_buffer_head(bh); + } + + return copied; } -EXPORT_SYMBOL(nobh_commit_write); +EXPORT_SYMBOL(nobh_write_end); /* * nobh_writepage() - based on block_full_write_page() except @@ -2548,7 +2590,6 @@ int nobh_writepage(struct page *page, get_block_t *get_block, loff_t i_size = i_size_read(inode); const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; - void *kaddr; int ret; /* Is the page fully inside i_size? */ @@ -2579,10 +2620,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_segment(page, offset, PAGE_CACHE_SIZE); out: ret = mpage_writepage(page, get_block, wbc); if (ret == -EAGAIN) @@ -2591,42 +2629,79 @@ out: } EXPORT_SYMBOL(nobh_writepage); -/* - * This function assumes that ->prepare_write() uses nobh_prepare_write(). - */ -int nobh_truncate_page(struct address_space *mapping, loff_t from) +int nobh_truncate_page(struct address_space *mapping, + loff_t from, get_block_t *get_block) { - struct inode *inode = mapping->host; - unsigned blocksize = 1 << inode->i_blkbits; pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); - unsigned to; + unsigned blocksize; + sector_t iblock; + unsigned length, pos; + struct inode *inode = mapping->host; struct page *page; - struct address_space_operations *a_ops = mapping->a_ops; - char *kaddr; - int ret = 0; + struct buffer_head map_bh; + int err; - if ((offset & (blocksize - 1)) == 0) - goto out; + blocksize = 1 << inode->i_blkbits; + length = offset & (blocksize - 1); + + /* Block boundary? Nothing to do */ + if (!length) + return 0; + + length = blocksize - length; + iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - ret = -ENOMEM; page = grab_cache_page(mapping, index); + err = -ENOMEM; if (!page) goto out; - to = (offset + blocksize) & ~(blocksize - 1); - ret = a_ops->prepare_write(NULL, page, offset, to); - if (ret == 0) { - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); - set_page_dirty(page); + if (page_has_buffers(page)) { +has_buffers: + unlock_page(page); + page_cache_release(page); + return block_truncate_page(mapping, from, get_block); + } + + /* Find the buffer that contains "offset" */ + pos = blocksize; + while (offset >= pos) { + iblock++; + pos += blocksize; + } + + err = get_block(inode, iblock, &map_bh, 0); + if (err) + goto unlock; + /* unmapped? It's a hole - nothing to do */ + if (!buffer_mapped(&map_bh)) + goto unlock; + + /* Ok, it's mapped. Make sure it's up-to-date */ + if (!PageUptodate(page)) { + err = mapping->a_ops->readpage(NULL, page); + if (err) { + page_cache_release(page); + goto out; + } + lock_page(page); + if (!PageUptodate(page)) { + err = -EIO; + goto unlock; + } + if (page_has_buffers(page)) + goto has_buffers; } + zero_user(page, offset, length); + set_page_dirty(page); + err = 0; + +unlock: unlock_page(page); page_cache_release(page); out: - return ret; + return err; } EXPORT_SYMBOL(nobh_truncate_page); @@ -2641,7 +2716,6 @@ int block_truncate_page(struct address_space *mapping, struct inode *inode = mapping->host; struct page *page; struct buffer_head *bh; - void *kaddr; int err; blocksize = 1 << inode->i_blkbits; @@ -2673,6 +2747,7 @@ int block_truncate_page(struct address_space *mapping, err = 0; if (!buffer_mapped(bh)) { + WARN_ON(bh->b_size != blocksize); err = get_block(inode, iblock, bh, 0); if (err) goto unlock; @@ -2685,7 +2760,7 @@ int block_truncate_page(struct address_space *mapping, if (PageUptodate(page)) set_buffer_uptodate(bh); - if (!buffer_uptodate(bh) && !buffer_delay(bh)) { + if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); @@ -2694,11 +2769,7 @@ int block_truncate_page(struct address_space *mapping, goto unlock; } - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, length); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); - + zero_user(page, offset, length); mark_buffer_dirty(bh); err = 0; @@ -2719,7 +2790,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block, loff_t i_size = i_size_read(inode); const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; - void *kaddr; /* Is the page fully inside i_size? */ if (page->index < end_index) @@ -2745,10 +2815,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + zero_user_segment(page, offset, PAGE_CACHE_SIZE); return __block_write_full_page(inode, page, get_block, wbc); } @@ -2759,17 +2826,15 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, struct inode *inode = mapping->host; tmp.b_state = 0; tmp.b_blocknr = 0; + tmp.b_size = 1 << inode->i_blkbits; get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } -static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err) +static void end_bio_bh_io_sync(struct bio *bio, int err) { struct buffer_head *bh = bio->bi_private; - if (bio->bi_size) - return 1; - if (err == -EOPNOTSUPP) { set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); set_bit(BH_Eopnotsupp, &bh->b_state); @@ -2777,7 +2842,6 @@ static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err) bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); bio_put(bio); - return 0; } int submit_bh(int rw, struct buffer_head * bh) @@ -2866,22 +2930,22 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) else if (test_set_buffer_locked(bh)) continue; - get_bh(bh); if (rw == WRITE || rw == SWRITE) { if (test_clear_buffer_dirty(bh)) { bh->b_end_io = end_buffer_write_sync; + get_bh(bh); submit_bh(WRITE, bh); continue; } } else { if (!buffer_uptodate(bh)) { bh->b_end_io = end_buffer_read_sync; + get_bh(bh); submit_bh(rw, bh); continue; } } unlock_buffer(bh); - put_bh(bh); } } @@ -2985,17 +3049,23 @@ int try_to_free_buffers(struct page *page) spin_lock(&mapping->private_lock); ret = drop_buffers(page, &buffers_to_free); - if (ret) { - /* - * If the filesystem writes its buffers by hand (eg ext3) - * then we can have clean buffers against a dirty page. We - * clean the page here; otherwise later reattachment of buffers - * could encounter a non-uptodate page, which is unresolvable. - * This only applies in the rare case where try_to_free_buffers - * succeeds but the page is not freed. - */ - clear_page_dirty(page); - } + + /* + * If the filesystem writes its buffers by hand (eg ext3) + * then we can have clean buffers against a dirty page. We + * clean the page here; otherwise the VM will never notice + * that the filesystem did any IO at all. + * + * Also, during truncate, discard_buffer will have marked all + * the page's buffers clean. We discover that here and clean + * the page also. + * + * private_lock must be held over this entire operation in order + * to synchronise against __set_page_dirty_buffers and prevent the + * dirty bit from being lost. + */ + if (ret) + cancel_dirty_page(page, PAGE_CACHE_SIZE); spin_unlock(&mapping->private_lock); out: if (buffers_to_free) { @@ -3011,7 +3081,7 @@ out: } EXPORT_SYMBOL(try_to_free_buffers); -int block_sync_page(struct page *page) +void block_sync_page(struct page *page) { struct address_space *mapping; @@ -3019,7 +3089,6 @@ int block_sync_page(struct page *page) mapping = page_mapping(page); if (mapping) blk_run_backing_dev(mapping->backing_dev_info, page); - return 0; } /* @@ -3052,7 +3121,7 @@ asmlinkage long sys_bdflush(int func, long data) /* * Buffer-head allocation */ -static kmem_cache_t *bh_cachep; +static struct kmem_cache *bh_cachep; /* * Once the number of bh's in the machine exceeds this level, we start @@ -3077,15 +3146,17 @@ static void recalc_bh_state(void) if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) return; __get_cpu_var(bh_accounting).ratelimit = 0; - for_each_cpu(i) + for_each_online_cpu(i) tot += per_cpu(bh_accounting, i).nr; buffer_heads_over_limit = (tot > max_buffer_heads); } struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) { - struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); + struct buffer_head *ret = kmem_cache_alloc(bh_cachep, + set_migrateflags(gfp_flags, __GFP_RECLAIMABLE)); if (ret) { + INIT_LIST_HEAD(&ret->b_assoc_buffers); get_cpu_var(bh_accounting).nr++; recalc_bh_state(); put_cpu_var(bh_accounting); @@ -3104,19 +3175,6 @@ void free_buffer_head(struct buffer_head *bh) } EXPORT_SYMBOL(free_buffer_head); -static void -init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) -{ - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { - struct buffer_head * bh = (struct buffer_head *)data; - - memset(bh, 0, sizeof(*bh)); - INIT_LIST_HEAD(&bh->b_assoc_buffers); - } -} - -#ifdef CONFIG_HOTPLUG_CPU static void buffer_exit_cpu(int cpu) { int i; @@ -3126,16 +3184,71 @@ static void buffer_exit_cpu(int cpu) brelse(b->bhs[i]); b->bhs[i] = NULL; } + get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr; + per_cpu(bh_accounting, cpu).nr = 0; + put_cpu_var(bh_accounting); } static int buffer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { - if (action == CPU_DEAD) + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) buffer_exit_cpu((unsigned long)hcpu); return NOTIFY_OK; } -#endif /* CONFIG_HOTPLUG_CPU */ + +/** + * bh_uptodate_or_lock: Test whether the buffer is uptodate + * @bh: struct buffer_head + * + * Return true if the buffer is up-to-date and false, + * with the buffer locked, if not. + */ +int bh_uptodate_or_lock(struct buffer_head *bh) +{ + if (!buffer_uptodate(bh)) { + lock_buffer(bh); + if (!buffer_uptodate(bh)) + return 0; + unlock_buffer(bh); + } + return 1; +} +EXPORT_SYMBOL(bh_uptodate_or_lock); + +/** + * bh_submit_read: Submit a locked buffer for reading + * @bh: struct buffer_head + * + * Returns zero on success and -EIO on error. + */ +int bh_submit_read(struct buffer_head *bh) +{ + BUG_ON(!buffer_locked(bh)); + + if (buffer_uptodate(bh)) { + unlock_buffer(bh); + return 0; + } + + get_bh(bh); + bh->b_end_io = end_buffer_read_sync; + submit_bh(READ, bh); + wait_on_buffer(bh); + if (buffer_uptodate(bh)) + return 0; + return -EIO; +} +EXPORT_SYMBOL(bh_submit_read); + +static void +init_buffer_head(struct kmem_cache *cachep, void *data) +{ + struct buffer_head *bh = data; + + memset(bh, 0, sizeof(*bh)); + INIT_LIST_HEAD(&bh->b_assoc_buffers); +} void __init buffer_init(void) { @@ -3143,7 +3256,9 @@ void __init buffer_init(void) bh_cachep = kmem_cache_create("buffer_head", sizeof(struct buffer_head), 0, - SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL); + (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| + SLAB_MEM_SPREAD), + init_buffer_head); /* * Limit the bh occupancy to 10% of ZONE_NORMAL @@ -3158,19 +3273,18 @@ EXPORT_SYMBOL(__brelse); EXPORT_SYMBOL(__wait_on_buffer); EXPORT_SYMBOL(block_commit_write); EXPORT_SYMBOL(block_prepare_write); +EXPORT_SYMBOL(block_page_mkwrite); EXPORT_SYMBOL(block_read_full_page); EXPORT_SYMBOL(block_sync_page); EXPORT_SYMBOL(block_truncate_page); EXPORT_SYMBOL(block_write_full_page); -EXPORT_SYMBOL(cont_prepare_write); -EXPORT_SYMBOL(end_buffer_async_write); +EXPORT_SYMBOL(cont_write_begin); EXPORT_SYMBOL(end_buffer_read_sync); EXPORT_SYMBOL(end_buffer_write_sync); EXPORT_SYMBOL(file_fsync); EXPORT_SYMBOL(fsync_bdev); EXPORT_SYMBOL(generic_block_bmap); EXPORT_SYMBOL(generic_commit_write); -EXPORT_SYMBOL(generic_cont_expand); EXPORT_SYMBOL(generic_cont_expand_simple); EXPORT_SYMBOL(init_buffer); EXPORT_SYMBOL(invalidate_bdev);