void unlock_buffer(struct buffer_head *bh)
{
- smp_mb__before_clear_bit();
- clear_buffer_locked(bh);
+ clear_bit_unlock(BH_Lock, &bh->b_state);
smp_mb__after_clear_bit();
wake_up_bit(&bh->b_state, BH_Lock);
}
page_cache_release(page);
}
+
+static int quiet_error(struct buffer_head *bh)
+{
+ if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
+ return 0;
+ return 1;
+}
+
+
static void buffer_io_error(struct buffer_head *bh)
{
char b[BDEVNAME_SIZE];
-
printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
bdevname(bh->b_bdev, b),
(unsigned long long)bh->b_blocknr);
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
+ if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
*/
static void free_more_memory(void)
{
- struct zone **zones;
- pg_data_t *pgdat;
+ struct zone *zone;
+ int nid;
wakeup_pdflush(1024);
yield();
- for_each_online_pgdat(pgdat) {
- zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
- if (*zones)
- try_to_free_pages(zones, 0, GFP_NOFS);
+ for_each_online_node(nid) {
+ (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
+ gfp_zone(GFP_NOFS), NULL,
+ &zone);
+ if (zone)
+ try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
+ GFP_NOFS);
}
}
set_buffer_uptodate(bh);
} else {
clear_buffer_uptodate(bh);
- if (printk_ratelimit())
+ if (!quiet_error(bh))
buffer_io_error(bh);
SetPageError(page);
}
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (printk_ratelimit()) {
+ if (!quiet_error(bh)) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
/*
* The buffer's backing address_space's private_lock must be held
*/
-static inline void __remove_assoc_queue(struct buffer_head *bh)
+static void __remove_assoc_queue(struct buffer_head *bh)
{
list_del_init(&bh->b_assoc_buffers);
WARN_ON(!bh->b_assoc_map);
if (TestSetPageDirty(page))
return 0;
- write_lock_irq(&mapping->tree_lock);
+ spin_lock_irq(&mapping->tree_lock);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
- write_unlock_irq(&mapping->tree_lock);
+ spin_unlock_irq(&mapping->tree_lock);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return 1;
* contents - it is a noop if I/O is still in
* flight on potentially older contents.
*/
- ll_rw_block(SWRITE, 1, &bh);
+ ll_rw_block(SWRITE_SYNC, 1, &bh);
brelse(bh);
spin_lock(lock);
}
spin_unlock(&buffer_mapping->private_lock);
}
}
+EXPORT_SYMBOL(invalidate_inode_buffers);
/*
* Remove any clean buffers from the inode's buffer list. This is called
printk(KERN_ERR "%s: requested out-of-range block %llu for "
"device %s\n",
- __FUNCTION__, (unsigned long long)block,
+ __func__, (unsigned long long)block,
bdevname(bdev, b));
return -EIO;
}
void mark_buffer_dirty(struct buffer_head *bh)
{
WARN_ON_ONCE(!buffer_uptodate(bh));
- if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
+
+ /*
+ * Very *carefully* optimize the it-is-already-dirty case.
+ *
+ * Don't let the final "is it dirty" escape to before we
+ * perhaps modified the buffer.
+ */
+ if (buffer_dirty(bh)) {
+ smp_mb();
+ if (buffer_dirty(bh))
+ return;
+ }
+
+ if (!test_set_buffer_dirty(bh))
__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
}
put_bh(buf);
return;
}
- printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
}
/*
void invalidate_bh_lrus(void)
{
- on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
+ on_each_cpu(invalidate_bh_lru, NULL, 1);
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
*/
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
- } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
+ } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
+ buffer_dirty(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
+ clear_buffer_delay(bh);
if (buffer_new(bh)) {
/* blockdev mappings never come here */
clear_buffer_new(bh);
*/
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
lock_buffer(bh);
- } else if (test_set_buffer_locked(bh)) {
+ } else if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page);
continue;
}
bh = head;
/* Recovery: lock and submit the mapped buffers */
do {
- if (buffer_mapped(bh) && buffer_dirty(bh)) {
+ if (buffer_mapped(bh) && buffer_dirty(bh) &&
+ !buffer_delay(bh)) {
lock_buffer(bh);
mark_buffer_async_write(bh);
} else {
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
+ int i_size_changed = 0;
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
*/
if (pos+copied > inode->i_size) {
i_size_write(inode, pos+copied);
- mark_inode_dirty(inode);
+ i_size_changed = 1;
}
unlock_page(page);
page_cache_release(page);
+ /*
+ * Don't mark the inode dirty under page lock. First, it unnecessarily
+ * makes the holding time of page lock longer. Second, it forces lock
+ * ordering of page lock and transaction start for journaling
+ * filesystems.
+ */
+ if (i_size_changed)
+ mark_inode_dirty(inode);
+
return copied;
}
EXPORT_SYMBOL(generic_write_end);
/*
+ * block_is_partially_uptodate checks whether buffers within a page are
+ * uptodate or not.
+ *
+ * Returns true if all buffers which correspond to a file portion
+ * we want to read are uptodate.
+ */
+int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
+ unsigned long from)
+{
+ struct inode *inode = page->mapping->host;
+ unsigned block_start, block_end, blocksize;
+ unsigned to;
+ struct buffer_head *bh, *head;
+ int ret = 1;
+
+ if (!page_has_buffers(page))
+ return 0;
+
+ blocksize = 1 << inode->i_blkbits;
+ to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
+ to = from + to;
+ if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
+ return 0;
+
+ head = page_buffers(page);
+ bh = head;
+ block_start = 0;
+ do {
+ block_end = block_start + blocksize;
+ if (block_end > from && block_start < to) {
+ if (!buffer_uptodate(bh)) {
+ ret = 0;
+ break;
+ }
+ if (block_end >= to)
+ break;
+ }
+ block_start = block_end;
+ bh = bh->b_this_page;
+ } while (bh != head);
+
+ return ret;
+}
+EXPORT_SYMBOL(block_is_partially_uptodate);
+
+/*
* Generic "read page" function for block devices that have the normal
* get_block functionality. This is most of the block device filesystems.
* Reads the page asynchronously --- the unlock_buffer() and
return err;
}
-int cont_expand_zero(struct file *file, struct address_space *mapping,
- loff_t pos, loff_t *bytes)
+static int cont_expand_zero(struct file *file, struct address_space *mapping,
+ loff_t pos, loff_t *bytes)
{
struct inode *inode = mapping->host;
unsigned blocksize = 1 << inode->i_blkbits;
goto out;
BUG_ON(err != len);
err = 0;
+
+ balance_dirty_pages_ratelimited(mapping);
}
/* page covers the boundary, find the boundary offset */
return 0;
}
-int generic_commit_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
-{
- struct inode *inode = page->mapping->host;
- loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
- __block_commit_write(inode,page,from,to);
- /*
- * No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_mutex.
- */
- if (pos > inode->i_size) {
- i_size_write(inode, pos);
- mark_inode_dirty(inode);
- }
- return 0;
-}
-
/*
* block_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
struct inode *inode = page->mapping->host;
struct buffer_head *head = fsdata;
struct buffer_head *bh;
+ BUG_ON(fsdata != NULL && page_has_buffers(page));
- if (!PageMappedToDisk(page)) {
- if (unlikely(copied < len) && !page_has_buffers(page))
- attach_nobh_buffers(page, head);
- if (page_has_buffers(page))
- return generic_write_end(file, mapping, pos, len,
- copied, page, fsdata);
- }
+ if (unlikely(copied < len) && !page_has_buffers(page))
+ attach_nobh_buffers(page, head);
+ if (page_has_buffers(page))
+ return generic_write_end(file, mapping, pos, len,
+ copied, page, fsdata);
SetPageUptodate(page);
set_page_dirty(page);
set_bit(BH_Eopnotsupp, &bh->b_state);
}
+ if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
+ set_bit(BH_Quiet, &bh->b_state);
+
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
}
BUG_ON(!buffer_mapped(bh));
BUG_ON(!bh->b_end_io);
- if (buffer_ordered(bh) && (rw == WRITE))
- rw = WRITE_BARRIER;
+ /*
+ * Mask in barrier bit for a write (could be either a WRITE or a
+ * WRITE_SYNC
+ */
+ if (buffer_ordered(bh) && (rw & WRITE))
+ rw |= WRITE_BARRIER;
/*
- * Only clear out a write error when rewriting, should this
- * include WRITE_SYNC as well?
+ * Only clear out a write error when rewriting
*/
- if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
+ if (test_set_buffer_req(bh) && (rw & WRITE))
clear_buffer_write_io_error(bh);
/*
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
- if (rw == SWRITE)
+ if (rw == SWRITE || rw == SWRITE_SYNC)
lock_buffer(bh);
- else if (test_set_buffer_locked(bh))
+ else if (!trylock_buffer(bh))
continue;
- if (rw == WRITE || rw == SWRITE) {
+ if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
- submit_bh(WRITE, bh);
+ if (rw == SWRITE_SYNC)
+ submit_bh(WRITE_SYNC, bh);
+ else
+ submit_bh(WRITE, bh);
continue;
}
} else {
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- ret = submit_bh(WRITE, bh);
+ ret = submit_bh(WRITE_SYNC, bh);
wait_on_buffer(bh);
if (buffer_eopnotsupp(bh)) {
clear_buffer_eopnotsupp(bh);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
- struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
- set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
+ struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
get_cpu_var(bh_accounting).nr++;
}
/**
- * bh_uptodate_or_lock: Test whether the buffer is uptodate
+ * bh_uptodate_or_lock - Test whether the buffer is uptodate
* @bh: struct buffer_head
*
* Return true if the buffer is up-to-date and false,
EXPORT_SYMBOL(bh_uptodate_or_lock);
/**
- * bh_submit_read: Submit a locked buffer for reading
+ * bh_submit_read - Submit a locked buffer for reading
* @bh: struct buffer_head
*
* Returns zero on success and -EIO on error.
EXPORT_SYMBOL(bh_submit_read);
static void
-init_buffer_head(struct kmem_cache *cachep, void *data)
+init_buffer_head(void *data)
{
struct buffer_head *bh = data;
EXPORT_SYMBOL(file_fsync);
EXPORT_SYMBOL(fsync_bdev);
EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_commit_write);
EXPORT_SYMBOL(generic_cont_expand_simple);
EXPORT_SYMBOL(init_buffer);
EXPORT_SYMBOL(invalidate_bdev);