X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fbuffer.c;h=6fa530256bfd7b118bbef4aebce7ceb79e145928;hb=8a1eaa6a5697c4554ecacbd021c668f51318c09e;hp=2963858f0f31f106b878966c75ca09991c7486f7;hpb=97f76d3d197f201ac8a8a3ced5b8fef81568e50e;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/buffer.c b/fs/buffer.c index 2963858..6fa5302 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -52,6 +52,7 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) bh->b_end_io = handler; bh->b_private = private; } +EXPORT_SYMBOL(init_buffer); static int sync_buffer(void *word) { @@ -80,6 +81,7 @@ void unlock_buffer(struct buffer_head *bh) smp_mb__after_clear_bit(); wake_up_bit(&bh->b_state, BH_Lock); } +EXPORT_SYMBOL(unlock_buffer); /* * Block until a buffer comes unlocked. This doesn't stop it @@ -90,6 +92,7 @@ void __wait_on_buffer(struct buffer_head * bh) { wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); } +EXPORT_SYMBOL(__wait_on_buffer); static void __clear_page_buffers(struct page *page) @@ -144,6 +147,7 @@ void end_buffer_read_sync(struct buffer_head *bh, int uptodate) __end_buffer_read_notouch(bh, uptodate); put_bh(bh); } +EXPORT_SYMBOL(end_buffer_read_sync); void end_buffer_write_sync(struct buffer_head *bh, int uptodate) { @@ -164,6 +168,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate) unlock_buffer(bh); put_bh(bh); } +EXPORT_SYMBOL(end_buffer_write_sync); /* * Various filesystems appear to want __find_get_block to be non-blocking. @@ -272,16 +277,17 @@ void invalidate_bdev(struct block_device *bdev) invalidate_bh_lrus(); invalidate_mapping_pages(mapping, 0, -1); } +EXPORT_SYMBOL(invalidate_bdev); /* - * Kick pdflush then try to free up some ZONE_NORMAL memory. + * Kick the writeback threads then try to free up some ZONE_NORMAL memory. */ static void free_more_memory(void) { struct zone *zone; int nid; - wakeup_pdflush(1024); + wakeup_flusher_threads(1024); yield(); for_each_online_node(nid) { @@ -360,7 +366,7 @@ still_busy: * Completion handler for block_write_full_page() - pages which are unlocked * during I/O, and which have PageWriteback cleared upon I/O completion. */ -static void end_buffer_async_write(struct buffer_head *bh, int uptodate) +void end_buffer_async_write(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; unsigned long flags; @@ -410,6 +416,7 @@ still_busy: local_irq_restore(flags); return; } +EXPORT_SYMBOL(end_buffer_async_write); /* * If a page's buffers are under async readin (end_buffer_async_read @@ -438,11 +445,17 @@ static void mark_buffer_async_read(struct buffer_head *bh) set_buffer_async_read(bh); } -void mark_buffer_async_write(struct buffer_head *bh) +static void mark_buffer_async_write_endio(struct buffer_head *bh, + bh_end_io_t *handler) { - bh->b_end_io = end_buffer_async_write; + bh->b_end_io = handler; set_buffer_async_write(bh); } + +void mark_buffer_async_write(struct buffer_head *bh) +{ + mark_buffer_async_write_endio(bh, end_buffer_async_write); +} EXPORT_SYMBOL(mark_buffer_async_write); @@ -547,7 +560,7 @@ repeat: return err; } -void do_thaw_all(unsigned long unused) +static void do_thaw_all(struct work_struct *work) { struct super_block *sb; char b[BDEVNAME_SIZE]; @@ -567,6 +580,7 @@ restart: goto restart; } spin_unlock(&sb_lock); + kfree(work); printk(KERN_WARNING "Emergency Thaw complete\n"); } @@ -577,7 +591,13 @@ restart: */ void emergency_thaw_all(void) { - pdflush_operation(do_thaw_all, 0); + struct work_struct *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (work) { + INIT_WORK(work, do_thaw_all); + schedule_work(work); + } } /** @@ -737,7 +757,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) { struct buffer_head *bh; struct list_head tmp; - struct address_space *mapping; + struct address_space *mapping, *prev_mapping = NULL; int err = 0, err2; INIT_LIST_HEAD(&tmp); @@ -762,7 +782,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) * contents - it is a noop if I/O is still in * flight on potentially older contents. */ - ll_rw_block(SWRITE_SYNC, 1, &bh); + ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh); + + /* + * Kick off IO for the previous mapping. Note + * that we will not run the very last mapping, + * wait_on_buffer() will do that for us + * through sync_buffer(). + */ + if (prev_mapping && prev_mapping != mapping) + blk_run_address_space(prev_mapping); + prev_mapping = mapping; + brelse(bh); spin_lock(lock); } @@ -1061,12 +1092,12 @@ static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, int size) { /* Size must be multiple of hard sectorsize */ - if (unlikely(size & (bdev_hardsect_size(bdev)-1) || + if (unlikely(size & (bdev_logical_block_size(bdev)-1) || (size < 512 || size > PAGE_SIZE))) { printk(KERN_ERR "getblk(): invalid block size %d requested\n", size); - printk(KERN_ERR "hardsect size: %d\n", - bdev_hardsect_size(bdev)); + printk(KERN_ERR "logical block size: %d\n", + bdev_logical_block_size(bdev)); dump_stack(); return NULL; @@ -1141,10 +1172,14 @@ void mark_buffer_dirty(struct buffer_head *bh) if (!test_set_buffer_dirty(bh)) { struct page *page = bh->b_page; - if (!TestSetPageDirty(page)) - __set_page_dirty(page, page_mapping(page), 0); + if (!TestSetPageDirty(page)) { + struct address_space *mapping = page_mapping(page); + if (mapping) + __set_page_dirty(page, mapping, 0); + } } } +EXPORT_SYMBOL(mark_buffer_dirty); /* * Decrement a buffer_head's reference count. If all buffers against a page @@ -1161,6 +1196,7 @@ void __brelse(struct buffer_head * buf) } WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); } +EXPORT_SYMBOL(__brelse); /* * bforget() is like brelse(), except it discards any @@ -1179,6 +1215,7 @@ void __bforget(struct buffer_head *bh) } __brelse(bh); } +EXPORT_SYMBOL(__bforget); static struct buffer_head *__bread_slow(struct buffer_head *bh) { @@ -1585,9 +1622,20 @@ EXPORT_SYMBOL(unmap_underlying_metadata); * locked buffer. This only can happen if someone has written the buffer * directly, with submit_bh(). At the address_space level PageWriteback * prevents this contention from occurring. + * + * If block_write_full_page() is called with wbc->sync_mode == + * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this + * causes the writes to be flagged as synchronous writes, but the + * block device queue will NOT be unplugged, since usually many pages + * will be pushed to the out before the higher-level caller actually + * waits for the writes to be completed. The various wait functions, + * such as wait_on_writeback_range() will ultimately call sync_page() + * which will ultimately call blk_run_backing_dev(), which will end up + * unplugging the device queue. */ static int __block_write_full_page(struct inode *inode, struct page *page, - get_block_t *get_block, struct writeback_control *wbc) + get_block_t *get_block, struct writeback_control *wbc, + bh_end_io_t *handler) { int err; sector_t block; @@ -1595,6 +1643,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page, struct buffer_head *bh, *head; const unsigned blocksize = 1 << inode->i_blkbits; int nr_underway = 0; + int write_op = (wbc->sync_mode == WB_SYNC_ALL ? + WRITE_SYNC_PLUG : WRITE); BUG_ON(!PageLocked(page)); @@ -1659,9 +1709,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page, /* * If it's a fully non-blocking write attempt and we cannot * lock the buffer then redirty the page. Note that this can - * potentially cause a busy-wait loop from pdflush and kswapd - * activity, but those code paths have their own higher-level - * throttling. + * potentially cause a busy-wait loop from writeback threads + * and kswapd activity, but those code paths have their own + * higher-level throttling. */ if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { lock_buffer(bh); @@ -1670,7 +1720,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, continue; } if (test_clear_buffer_dirty(bh)) { - mark_buffer_async_write(bh); + mark_buffer_async_write_endio(bh, handler); } else { unlock_buffer(bh); } @@ -1686,7 +1736,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { - submit_bh(WRITE, bh); + submit_bh(write_op, bh); nr_underway++; } bh = next; @@ -1723,7 +1773,7 @@ recover: if (buffer_mapped(bh) && buffer_dirty(bh) && !buffer_delay(bh)) { lock_buffer(bh); - mark_buffer_async_write(bh); + mark_buffer_async_write_endio(bh, handler); } else { /* * The buffer may have been set dirty during @@ -1740,7 +1790,7 @@ recover: struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { clear_buffer_dirty(bh); - submit_bh(WRITE, bh); + submit_bh(write_op, bh); nr_underway++; } bh = next; @@ -2178,6 +2228,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) } return 0; } +EXPORT_SYMBOL(block_read_full_page); /* utility function for filesystems that need to do work on expanding * truncates. Uses filesystem pagecache writes to allow the filesystem to @@ -2188,16 +2239,10 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; - unsigned long limit; int err; - err = -EFBIG; - limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; - if (limit != RLIM_INFINITY && size > (loff_t)limit) { - send_sig(SIGXFSZ, current, 0); - goto out; - } - if (size > inode->i_sb->s_maxbytes) + err = inode_newsize_ok(inode, size); + if (err) goto out; err = pagecache_write_begin(NULL, mapping, size, 0, @@ -2212,6 +2257,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) out: return err; } +EXPORT_SYMBOL(generic_cont_expand_simple); static int cont_expand_zero(struct file *file, struct address_space *mapping, loff_t pos, loff_t *bytes) @@ -2312,6 +2358,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping, out: return err; } +EXPORT_SYMBOL(cont_write_begin); int block_prepare_write(struct page *page, unsigned from, unsigned to, get_block_t *get_block) @@ -2322,6 +2369,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to, ClearPageUptodate(page); return err; } +EXPORT_SYMBOL(block_prepare_write); int block_commit_write(struct page *page, unsigned from, unsigned to) { @@ -2329,6 +2377,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to) __block_commit_write(inode,page,from,to); return 0; } +EXPORT_SYMBOL(block_commit_write); /* * block_page_mkwrite() is not allowed to change the file size as it gets @@ -2360,7 +2409,8 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, if ((page->mapping != inode->i_mapping) || (page_offset(page) > size)) { /* page got truncated out from underneath us */ - goto out_unlock; + unlock_page(page); + goto out; } /* page is wholly or partially inside EOF */ @@ -2374,16 +2424,18 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, ret = block_commit_write(page, 0, end); if (unlikely(ret)) { + unlock_page(page); if (ret == -ENOMEM) ret = VM_FAULT_OOM; else /* -ENOSPC, -EIO, etc */ ret = VM_FAULT_SIGBUS; - } + } else + ret = VM_FAULT_LOCKED; -out_unlock: - unlock_page(page); +out: return ret; } +EXPORT_SYMBOL(block_page_mkwrite); /* * nobh_write_begin()'s prereads are special: the buffer_heads are freed @@ -2649,7 +2701,8 @@ int nobh_writepage(struct page *page, get_block_t *get_block, out: ret = mpage_writepage(page, get_block, wbc); if (ret == -EAGAIN) - ret = __block_write_full_page(inode, page, get_block, wbc); + ret = __block_write_full_page(inode, page, get_block, wbc, + end_buffer_async_write); return ret; } EXPORT_SYMBOL(nobh_writepage); @@ -2696,6 +2749,8 @@ has_buffers: pos += blocksize; } + map_bh.b_size = blocksize; + map_bh.b_state = 0; err = get_block(inode, iblock, &map_bh, 0); if (err) goto unlock; @@ -2804,12 +2859,14 @@ unlock: out: return err; } +EXPORT_SYMBOL(block_truncate_page); /* * The generic ->writepage function for buffer-backed address_spaces + * this form passes in the end_io handler used to finish the IO. */ -int block_write_full_page(struct page *page, get_block_t *get_block, - struct writeback_control *wbc) +int block_write_full_page_endio(struct page *page, get_block_t *get_block, + struct writeback_control *wbc, bh_end_io_t *handler) { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); @@ -2818,7 +2875,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block, /* Is the page fully inside i_size? */ if (page->index < end_index) - return __block_write_full_page(inode, page, get_block, wbc); + return __block_write_full_page(inode, page, get_block, wbc, + handler); /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); @@ -2841,8 +2899,20 @@ int block_write_full_page(struct page *page, get_block_t *get_block, * writes to that region are not written out to the file." */ zero_user_segment(page, offset, PAGE_CACHE_SIZE); - return __block_write_full_page(inode, page, get_block, wbc); + return __block_write_full_page(inode, page, get_block, wbc, handler); } +EXPORT_SYMBOL(block_write_full_page_endio); + +/* + * The generic ->writepage function for buffer-backed address_spaces + */ +int block_write_full_page(struct page *page, get_block_t *get_block, + struct writeback_control *wbc) +{ + return block_write_full_page_endio(page, get_block, wbc, + end_buffer_async_write); +} +EXPORT_SYMBOL(block_write_full_page); sector_t generic_block_bmap(struct address_space *mapping, sector_t block, get_block_t *get_block) @@ -2855,6 +2925,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } +EXPORT_SYMBOL(generic_block_bmap); static void end_bio_bh_io_sync(struct bio *bio, int err) { @@ -2880,6 +2951,8 @@ int submit_bh(int rw, struct buffer_head * bh) BUG_ON(!buffer_locked(bh)); BUG_ON(!buffer_mapped(bh)); BUG_ON(!bh->b_end_io); + BUG_ON(buffer_delay(bh)); + BUG_ON(buffer_unwritten(bh)); /* * Mask in barrier bit for a write (could be either a WRITE or a @@ -2922,6 +2995,7 @@ int submit_bh(int rw, struct buffer_head * bh) bio_put(bio); return ret; } +EXPORT_SYMBOL(submit_bh); /** * ll_rw_block: low-level access to block devices (DEPRECATED) @@ -2956,12 +3030,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; - if (rw == SWRITE || rw == SWRITE_SYNC) + if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG) lock_buffer(bh); else if (!trylock_buffer(bh)) continue; - if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { + if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC || + rw == SWRITE_SYNC_PLUG) { if (test_clear_buffer_dirty(bh)) { bh->b_end_io = end_buffer_write_sync; get_bh(bh); @@ -2982,6 +3057,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) unlock_buffer(bh); } } +EXPORT_SYMBOL(ll_rw_block); /* * For a data-integrity writeout, we need to wait upon any in-progress I/O @@ -2997,7 +3073,7 @@ int sync_dirty_buffer(struct buffer_head *bh) if (test_clear_buffer_dirty(bh)) { get_bh(bh); bh->b_end_io = end_buffer_write_sync; - ret = submit_bh(WRITE, bh); + ret = submit_bh(WRITE_SYNC, bh); wait_on_buffer(bh); if (buffer_eopnotsupp(bh)) { clear_buffer_eopnotsupp(bh); @@ -3010,6 +3086,7 @@ int sync_dirty_buffer(struct buffer_head *bh) } return ret; } +EXPORT_SYMBOL(sync_dirty_buffer); /* * try_to_free_buffers() checks if all the buffers on this particular page @@ -3124,13 +3201,14 @@ void block_sync_page(struct page *page) if (mapping) blk_run_backing_dev(mapping->backing_dev_info, page); } +EXPORT_SYMBOL(block_sync_page); /* * There are no bdflush tunables left. But distributions are * still running obsolete flush daemons, so we terminate them here. * * Use of bdflush() is deprecated and will be removed in a future kernel. - * The `pdflush' kernel threads fully replace bdflush daemons and this call. + * The `flush-X' kernel threads fully replace bdflush daemons and this call. */ SYSCALL_DEFINE2(bdflush, int, func, long, data) { @@ -3300,28 +3378,3 @@ void __init buffer_init(void) max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); hotcpu_notifier(buffer_cpu_notify, 0); } - -EXPORT_SYMBOL(__bforget); -EXPORT_SYMBOL(__brelse); -EXPORT_SYMBOL(__wait_on_buffer); -EXPORT_SYMBOL(block_commit_write); -EXPORT_SYMBOL(block_prepare_write); -EXPORT_SYMBOL(block_page_mkwrite); -EXPORT_SYMBOL(block_read_full_page); -EXPORT_SYMBOL(block_sync_page); -EXPORT_SYMBOL(block_truncate_page); -EXPORT_SYMBOL(block_write_full_page); -EXPORT_SYMBOL(cont_write_begin); -EXPORT_SYMBOL(end_buffer_read_sync); -EXPORT_SYMBOL(end_buffer_write_sync); -EXPORT_SYMBOL(file_fsync); -EXPORT_SYMBOL(fsync_bdev); -EXPORT_SYMBOL(generic_block_bmap); -EXPORT_SYMBOL(generic_cont_expand_simple); -EXPORT_SYMBOL(init_buffer); -EXPORT_SYMBOL(invalidate_bdev); -EXPORT_SYMBOL(ll_rw_block); -EXPORT_SYMBOL(mark_buffer_dirty); -EXPORT_SYMBOL(submit_bh); -EXPORT_SYMBOL(sync_dirty_buffer); -EXPORT_SYMBOL(unlock_buffer);