#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/mpage.h>
+#include <linux/bit_spinlock.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static void invalidate_bh_lrus(void);
*/
static void do_sync(unsigned long wait)
{
- wakeup_bdflush(0);
+ wakeup_pdflush(0);
sync_inodes(0); /* All mappings, inodes and their blockdevs */
DQUOT_SYNC(NULL);
sync_supers(); /* Write the superblocks */
return ret;
}
-asmlinkage long sys_fsync(unsigned int fd)
+static long do_fsync(unsigned int fd, int datasync)
{
struct file * file;
struct address_space *mapping;
if (!file)
goto out;
- mapping = file->f_mapping;
-
ret = -EINVAL;
if (!file->f_op || !file->f_op->fsync) {
/* Why? We can still call filemap_fdatawrite */
goto out_putf;
}
+ mapping = file->f_mapping;
+
current->flags |= PF_SYNCWRITE;
ret = filemap_fdatawrite(mapping);
* which could cause livelocks in fsync_buffers_list
*/
down(&mapping->host->i_sem);
- err = file->f_op->fsync(file, file->f_dentry, 0);
+ err = file->f_op->fsync(file, file->f_dentry, datasync);
if (!ret)
ret = err;
up(&mapping->host->i_sem);
return ret;
}
-asmlinkage long sys_fdatasync(unsigned int fd)
+asmlinkage long sys_fsync(unsigned int fd)
{
- struct file * file;
- struct address_space *mapping;
- int ret, err;
-
- ret = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
-
- ret = -EINVAL;
- if (!file->f_op || !file->f_op->fsync)
- goto out_putf;
-
- mapping = file->f_mapping;
-
- current->flags |= PF_SYNCWRITE;
- ret = filemap_fdatawrite(mapping);
- down(&mapping->host->i_sem);
- err = file->f_op->fsync(file, file->f_dentry, 1);
- if (!ret)
- ret = err;
- up(&mapping->host->i_sem);
- err = filemap_fdatawait(mapping);
- if (!ret)
- ret = err;
- current->flags &= ~PF_SYNCWRITE;
+ return do_fsync(fd, 0);
+}
-out_putf:
- fput(file);
-out:
- return ret;
+asmlinkage long sys_fdatasync(unsigned int fd)
+{
+ return do_fsync(fd, 1);
}
/*
struct zone **zones;
pg_data_t *pgdat;
- wakeup_bdflush(1024);
+ wakeup_pdflush(1024);
yield();
for_each_pgdat(pgdat) {
zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
if (*zones)
- try_to_free_pages(zones, GFP_NOFS, 0);
+ try_to_free_pages(zones, GFP_NOFS);
}
}
*/
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
- static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
+ struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
int page_uptodate = 1;
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*/
- spin_lock_irqsave(&page_uptodate_lock, flags);
+ first = page_buffers(page);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
/*
* If none of the buffers had errors and they are all
return;
still_busy:
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
return;
}
void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
- static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
+ struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
SetPageError(page);
}
- spin_lock_irqsave(&page_uptodate_lock, flags);
+ first = page_buffers(page);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+
clear_buffer_async_write(bh);
unlock_buffer(bh);
tmp = bh->b_this_page;
}
tmp = tmp->b_this_page;
}
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
end_page_writeback(page);
return;
still_busy:
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
return;
}
/**
* sync_mapping_buffers - write out and wait upon a mapping's "associated"
* buffers
- * @buffer_mapping - the mapping which backs the buffers' data
- * @mapping - the mapping which wants those buffers written
+ * @mapping: the mapping which wants those buffers written
*
* Starts I/O against the buffers at mapping->private_list, and waits upon
* that I/O.
*
- * Basically, this is a convenience function for fsync(). @buffer_mapping is
- * the blockdev which "owns" the buffers and @mapping is a file or directory
- * which needs those buffers to be written for a successful fsync().
+ * Basically, this is a convenience function for fsync().
+ * @mapping is a file or directory which needs those buffers to be written for
+ * a successful fsync().
*/
int sync_mapping_buffers(struct address_space *mapping)
{
* contents - it is a noop if I/O is still in
* flight on potentially older contents.
*/
- wait_on_buffer(bh);
- ll_rw_block(WRITE, 1, &bh);
+ ll_rw_block(SWRITE, 1, &bh);
brelse(bh);
spin_lock(lock);
}
return 1;
}
-struct buffer_head *
+static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
/* Size must be multiple of hard sectorsize */
/**
* mark_buffer_dirty - mark a buffer_head as needing writeout
+ * @bh: the buffer_head to mark dirty
*
* mark_buffer_dirty() will set the dirty bit against the buffer, then set its
* backing page dirty, then tag the page as dirty in its address_space's radix
/**
* __bread() - reads a specified block and returns the bh
+ * @bdev: the block_device to read from
* @block: number of block
* @size: size (in bytes) to read
*
} while (bh != head);
do {
- get_bh(bh);
if (!buffer_mapped(bh))
continue;
/*
*/
BUG_ON(PageWriteback(page));
set_page_writeback(page);
- unlock_page(page);
do {
struct buffer_head *next = bh->b_this_page;
submit_bh(WRITE, bh);
nr_underway++;
}
- put_bh(bh);
bh = next;
} while (bh != head);
+ unlock_page(page);
err = 0;
done:
bh = head;
/* Recovery: lock and submit the mapped buffers */
do {
- get_bh(bh);
if (buffer_mapped(bh) && buffer_dirty(bh)) {
lock_buffer(bh);
mark_buffer_async_write(bh);
submit_bh(WRITE, bh);
nr_underway++;
}
- put_bh(bh);
bh = next;
} while (bh != head);
goto done;
if (!buffer_mapped(bh)) {
err = get_block(inode, block, bh, 1);
if (err)
- goto out;
+ break;
if (buffer_new(bh)) {
- clear_buffer_new(bh);
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
while(wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
- return -EIO;
+ err = -EIO;
}
- return 0;
-out:
+ if (!err) {
+ bh = head;
+ do {
+ if (buffer_new(bh))
+ clear_buffer_new(bh);
+ } while ((bh = bh->b_this_page) != head);
+ return 0;
+ }
+ /* Error case: */
/*
* Zero out any newly allocated blocks to avoid exposing stale
* data. If BH_New is set, we know that the block was newly
continue;
if (!buffer_mapped(bh)) {
+ int err = 0;
+
fully_mapped = 0;
if (iblock < lblock) {
- if (get_block(inode, iblock, bh, 0))
+ err = get_block(inode, iblock, bh, 0);
+ if (err)
SetPageError(page);
}
if (!buffer_mapped(bh)) {
memset(kaddr + i * blocksize, 0, blocksize);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
- set_buffer_uptodate(bh);
+ if (!err)
+ set_buffer_uptodate(bh);
continue;
}
/*
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
+ * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
- * ll_rw_block() takes an array of pointers to &struct buffer_heads,
- * and requests an I/O operation on them, either a %READ or a %WRITE.
- * The third %READA option is described in the documentation for
- * generic_make_request() which ll_rw_block() calls.
+ * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
+ * requests an I/O operation on them, either a %READ or a %WRITE. The third
+ * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
+ * are sent to disk. The fourth %READA option is described in the documentation
+ * for generic_make_request() which ll_rw_block() calls.
*
* This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit), any buffer that appears to be clean when doing a
- * write request, and any buffer that appears to be up-to-date when doing
- * read request. Further it marks as clean buffers that are processed for
- * writing (the buffer cache won't assume that they are actually clean until
- * the buffer gets unlocked).
+ * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
+ * clean when doing a write request, and any buffer that appears to be
+ * up-to-date when doing read request. Further it marks as clean buffers that
+ * are processed for writing (the buffer cache won't assume that they are
+ * actually clean until the buffer gets unlocked).
*
* ll_rw_block sets b_end_io to simple completion handler that marks
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
- if (test_set_buffer_locked(bh))
+ if (rw == SWRITE)
+ lock_buffer(bh);
+ else if (test_set_buffer_locked(bh))
continue;
get_bh(bh);
- if (rw == WRITE) {
+ if (rw == WRITE || rw == SWRITE) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
submit_bh(WRITE, bh);
buffer_heads_over_limit = (tot > max_buffer_heads);
}
-struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
+struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
- preempt_disable();
- __get_cpu_var(bh_accounting).nr++;
+ get_cpu_var(bh_accounting).nr++;
recalc_bh_state();
- preempt_enable();
+ put_cpu_var(bh_accounting);
}
return ret;
}
{
BUG_ON(!list_empty(&bh->b_assoc_buffers));
kmem_cache_free(bh_cachep, bh);
- preempt_disable();
- __get_cpu_var(bh_accounting).nr--;
+ get_cpu_var(bh_accounting).nr--;
recalc_bh_state();
- preempt_enable();
+ put_cpu_var(bh_accounting);
}
EXPORT_SYMBOL(free_buffer_head);
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
- SLAB_PANIC, init_buffer_head, NULL);
+ SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL