X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fbuffer.c;h=13e5938a64f6338072e06c16ad8b73e366cad024;hb=912eaa7198827df3cae7d0c9768fd08e84a09675;hp=665db84a1f9febadd9c71a9179aefe124a858424;hpb=de7d5a3b6c9ff8429bf046c36b56d3192b75c3da;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/buffer.c b/fs/buffer.c index 665db84..13e5938 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -218,7 +218,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) sb = get_super(bdev); if (sb && !(sb->s_flags & MS_RDONLY)) { sb->s_frozen = SB_FREEZE_WRITE; - wmb(); + smp_wmb(); sync_inodes_sb(sb, 0); DQUOT_SYNC(sb); @@ -235,7 +235,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) sync_inodes_sb(sb, 1); sb->s_frozen = SB_FREEZE_TRANS; - wmb(); + smp_wmb(); sync_blockdev(sb->s_bdev); @@ -263,7 +263,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) if (sb->s_op->unlockfs) sb->s_op->unlockfs(sb); sb->s_frozen = SB_UNFROZEN; - wmb(); + smp_wmb(); wake_up(&sb->s_wait_unfrozen); drop_super(sb); } @@ -331,7 +331,7 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync) return ret; } -asmlinkage long sys_fsync(unsigned int fd) +static long do_fsync(unsigned int fd, int datasync) { struct file * file; struct address_space *mapping; @@ -342,14 +342,14 @@ asmlinkage long sys_fsync(unsigned int fd) if (!file) goto out; - mapping = file->f_mapping; - ret = -EINVAL; if (!file->f_op || !file->f_op->fsync) { /* Why? We can still call filemap_fdatawrite */ goto out_putf; } + mapping = file->f_mapping; + current->flags |= PF_SYNCWRITE; ret = filemap_fdatawrite(mapping); @@ -358,7 +358,7 @@ asmlinkage long sys_fsync(unsigned int fd) * which could cause livelocks in fsync_buffers_list */ down(&mapping->host->i_sem); - err = file->f_op->fsync(file, file->f_dentry, 0); + err = file->f_op->fsync(file, file->f_dentry, datasync); if (!ret) ret = err; up(&mapping->host->i_sem); @@ -373,39 +373,14 @@ out: return ret; } -asmlinkage long sys_fdatasync(unsigned int fd) +asmlinkage long sys_fsync(unsigned int fd) { - struct file * file; - struct address_space *mapping; - int ret, err; - - ret = -EBADF; - file = fget(fd); - if (!file) - goto out; - - ret = -EINVAL; - if (!file->f_op || !file->f_op->fsync) - goto out_putf; - - mapping = file->f_mapping; - - current->flags |= PF_SYNCWRITE; - ret = filemap_fdatawrite(mapping); - down(&mapping->host->i_sem); - err = file->f_op->fsync(file, file->f_dentry, 1); - if (!ret) - ret = err; - up(&mapping->host->i_sem); - err = filemap_fdatawait(mapping); - if (!ret) - ret = err; - current->flags &= ~PF_SYNCWRITE; + return do_fsync(fd, 0); +} -out_putf: - fput(file); -out: - return ret; +asmlinkage long sys_fdatasync(unsigned int fd) +{ + return do_fsync(fd, 1); } /* @@ -528,7 +503,7 @@ static void free_more_memory(void) for_each_pgdat(pgdat) { zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones; if (*zones) - try_to_free_pages(zones, GFP_NOFS, 0); + try_to_free_pages(zones, GFP_NOFS); } } @@ -774,15 +749,14 @@ repeat: /** * sync_mapping_buffers - write out and wait upon a mapping's "associated" * buffers - * @buffer_mapping - the mapping which backs the buffers' data - * @mapping - the mapping which wants those buffers written + * @mapping: the mapping which wants those buffers written * * Starts I/O against the buffers at mapping->private_list, and waits upon * that I/O. * - * Basically, this is a convenience function for fsync(). @buffer_mapping is - * the blockdev which "owns" the buffers and @mapping is a file or directory - * which needs those buffers to be written for a successful fsync(). + * Basically, this is a convenience function for fsync(). + * @mapping is a file or directory which needs those buffers to be written for + * a successful fsync(). */ int sync_mapping_buffers(struct address_space *mapping) { @@ -1211,7 +1185,7 @@ grow_buffers(struct block_device *bdev, sector_t block, int size) return 1; } -struct buffer_head * +static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, int size) { /* Size must be multiple of hard sectorsize */ @@ -1263,6 +1237,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) /** * mark_buffer_dirty - mark a buffer_head as needing writeout + * @bh: the buffer_head to mark dirty * * mark_buffer_dirty() will set the dirty bit against the buffer, then set its * backing page dirty, then tag the page as dirty in its address_space's radix @@ -1501,6 +1476,7 @@ EXPORT_SYMBOL(__breadahead); /** * __bread() - reads a specified block and returns the bh + * @bdev: the block_device to read from * @block: number of block * @size: size (in bytes) to read * @@ -1808,7 +1784,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page, } while (bh != head); do { - get_bh(bh); if (!buffer_mapped(bh)) continue; /* @@ -1837,7 +1812,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page, */ BUG_ON(PageWriteback(page)); set_page_writeback(page); - unlock_page(page); do { struct buffer_head *next = bh->b_this_page; @@ -1845,9 +1819,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page, submit_bh(WRITE, bh); nr_underway++; } - put_bh(bh); bh = next; } while (bh != head); + unlock_page(page); err = 0; done: @@ -1886,7 +1860,6 @@ recover: bh = head; /* Recovery: lock and submit the mapped buffers */ do { - get_bh(bh); if (buffer_mapped(bh) && buffer_dirty(bh)) { lock_buffer(bh); mark_buffer_async_write(bh); @@ -1909,7 +1882,6 @@ recover: submit_bh(WRITE, bh); nr_underway++; } - put_bh(bh); bh = next; } while (bh != head); goto done; @@ -1952,9 +1924,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page, if (!buffer_mapped(bh)) { err = get_block(inode, block, bh, 1); if (err) - goto out; + break; if (buffer_new(bh)) { - clear_buffer_new(bh); unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); if (PageUptodate(page)) { @@ -1994,10 +1965,17 @@ static int __block_prepare_write(struct inode *inode, struct page *page, while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) - return -EIO; + err = -EIO; } - return 0; -out: + if (!err) { + bh = head; + do { + if (buffer_new(bh)) + clear_buffer_new(bh); + } while ((bh = bh->b_this_page) != head); + return 0; + } + /* Error case: */ /* * Zero out any newly allocated blocks to avoid exposing stale * data. If BH_New is set, we know that the block was newly @@ -2078,8 +2056,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) int nr, i; int fully_mapped = 1; - if (!PageLocked(page)) - PAGE_BUG(page); + BUG_ON(!PageLocked(page)); blocksize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); @@ -2096,9 +2073,12 @@ int block_read_full_page(struct page *page, get_block_t *get_block) continue; if (!buffer_mapped(bh)) { + int err = 0; + fully_mapped = 0; if (iblock < lblock) { - if (get_block(inode, iblock, bh, 0)) + err = get_block(inode, iblock, bh, 0); + if (err) SetPageError(page); } if (!buffer_mapped(bh)) { @@ -2106,7 +2086,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block) memset(kaddr + i * blocksize, 0, blocksize); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); - set_buffer_uptodate(bh); + if (!err) + set_buffer_uptodate(bh); continue; } /* @@ -3115,7 +3096,7 @@ void __init buffer_init(void) bh_cachep = kmem_cache_create("buffer_head", sizeof(struct buffer_head), 0, - SLAB_PANIC, init_buffer_head, NULL); + SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL); /* * Limit the bh occupancy to 10% of ZONE_NORMAL