4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler;
53 bh->b_private = private;
55 EXPORT_SYMBOL(init_buffer);
57 static int sync_buffer(void *word)
59 struct block_device *bd;
60 struct buffer_head *bh
61 = container_of(word, struct buffer_head, b_state);
66 blk_run_address_space(bd->bd_inode->i_mapping);
71 void __lock_buffer(struct buffer_head *bh)
73 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74 TASK_UNINTERRUPTIBLE);
76 EXPORT_SYMBOL(__lock_buffer);
78 void unlock_buffer(struct buffer_head *bh)
80 clear_bit_unlock(BH_Lock, &bh->b_state);
81 smp_mb__after_clear_bit();
82 wake_up_bit(&bh->b_state, BH_Lock);
84 EXPORT_SYMBOL(unlock_buffer);
87 * Block until a buffer comes unlocked. This doesn't stop it
88 * from becoming locked again - you have to lock it yourself
89 * if you want to preserve its state.
91 void __wait_on_buffer(struct buffer_head * bh)
93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
95 EXPORT_SYMBOL(__wait_on_buffer);
98 __clear_page_buffers(struct page *page)
100 ClearPagePrivate(page);
101 set_page_private(page, 0);
102 page_cache_release(page);
106 static int quiet_error(struct buffer_head *bh)
108 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
114 static void buffer_io_error(struct buffer_head *bh)
116 char b[BDEVNAME_SIZE];
117 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118 bdevname(bh->b_bdev, b),
119 (unsigned long long)bh->b_blocknr);
123 * End-of-IO handler helper function which does not touch the bh after
125 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126 * a race there is benign: unlock_buffer() only use the bh's address for
127 * hashing after unlocking the buffer, so it doesn't actually touch the bh
130 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
133 set_buffer_uptodate(bh);
135 /* This happens, due to failed READA attempts. */
136 clear_buffer_uptodate(bh);
142 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
143 * unlock the buffer. This is what ll_rw_block uses too.
145 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
147 __end_buffer_read_notouch(bh, uptodate);
150 EXPORT_SYMBOL(end_buffer_read_sync);
152 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
154 char b[BDEVNAME_SIZE];
157 set_buffer_uptodate(bh);
159 if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
161 printk(KERN_WARNING "lost page write due to "
163 bdevname(bh->b_bdev, b));
165 set_buffer_write_io_error(bh);
166 clear_buffer_uptodate(bh);
171 EXPORT_SYMBOL(end_buffer_write_sync);
174 * Various filesystems appear to want __find_get_block to be non-blocking.
175 * But it's the page lock which protects the buffers. To get around this,
176 * we get exclusion from try_to_free_buffers with the blockdev mapping's
179 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180 * may be quite high. This code could TryLock the page, and if that
181 * succeeds, there is no need to take private_lock. (But if
182 * private_lock is contended then so is mapping->tree_lock).
184 static struct buffer_head *
185 __find_get_block_slow(struct block_device *bdev, sector_t block)
187 struct inode *bd_inode = bdev->bd_inode;
188 struct address_space *bd_mapping = bd_inode->i_mapping;
189 struct buffer_head *ret = NULL;
191 struct buffer_head *bh;
192 struct buffer_head *head;
196 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197 page = find_get_page(bd_mapping, index);
201 spin_lock(&bd_mapping->private_lock);
202 if (!page_has_buffers(page))
204 head = page_buffers(page);
207 if (!buffer_mapped(bh))
209 else if (bh->b_blocknr == block) {
214 bh = bh->b_this_page;
215 } while (bh != head);
217 /* we might be here because some of the buffers on this page are
218 * not mapped. This is due to various races between
219 * file io on the block device and getblk. It gets dealt with
220 * elsewhere, don't buffer_error if we had some unmapped buffers
223 printk("__find_get_block_slow() failed. "
224 "block=%llu, b_blocknr=%llu\n",
225 (unsigned long long)block,
226 (unsigned long long)bh->b_blocknr);
227 printk("b_state=0x%08lx, b_size=%zu\n",
228 bh->b_state, bh->b_size);
229 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
232 spin_unlock(&bd_mapping->private_lock);
233 page_cache_release(page);
238 /* If invalidate_buffers() will trash dirty buffers, it means some kind
239 of fs corruption is going on. Trashing dirty data always imply losing
240 information that was supposed to be just stored on the physical layer
243 Thus invalidate_buffers in general usage is not allwowed to trash
244 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245 be preserved. These buffers are simply skipped.
247 We also skip buffers which are still in use. For example this can
248 happen if a userspace program is reading the block device.
250 NOTE: In the case where the user removed a removable-media-disk even if
251 there's still dirty data not synced on disk (due a bug in the device driver
252 or due an error of the user), by not destroying the dirty buffers we could
253 generate corruption also on the next media inserted, thus a parameter is
254 necessary to handle this case in the most safe way possible (trying
255 to not corrupt also the new disk inserted with the data belonging to
256 the old now corrupted disk). Also for the ramdisk the natural thing
257 to do in order to release the ramdisk memory is to destroy dirty buffers.
259 These are two special cases. Normal usage imply the device driver
260 to issue a sync on the device (without waiting I/O completion) and
261 then an invalidate_buffers call that doesn't trash dirty buffers.
263 For handling cache coherency with the blkdev pagecache the 'update' case
264 is been introduced. It is needed to re-read from disk any pinned
265 buffer. NOTE: re-reading from disk is destructive so we can do it only
266 when we assume nobody is changing the buffercache under our I/O and when
267 we think the disk contains more recent information than the buffercache.
268 The update == 1 pass marks the buffers we need to update, the update == 2
269 pass does the actual I/O. */
270 void invalidate_bdev(struct block_device *bdev)
272 struct address_space *mapping = bdev->bd_inode->i_mapping;
274 if (mapping->nrpages == 0)
277 invalidate_bh_lrus();
278 invalidate_mapping_pages(mapping, 0, -1);
280 EXPORT_SYMBOL(invalidate_bdev);
283 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
285 static void free_more_memory(void)
290 wakeup_flusher_threads(1024);
293 for_each_online_node(nid) {
294 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
295 gfp_zone(GFP_NOFS), NULL,
298 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
304 * I/O completion handler for block_read_full_page() - pages
305 * which come unlocked at the end of I/O.
307 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
310 struct buffer_head *first;
311 struct buffer_head *tmp;
313 int page_uptodate = 1;
315 BUG_ON(!buffer_async_read(bh));
319 set_buffer_uptodate(bh);
321 clear_buffer_uptodate(bh);
322 if (!quiet_error(bh))
328 * Be _very_ careful from here on. Bad things can happen if
329 * two buffer heads end IO at almost the same time and both
330 * decide that the page is now completely done.
332 first = page_buffers(page);
333 local_irq_save(flags);
334 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
335 clear_buffer_async_read(bh);
339 if (!buffer_uptodate(tmp))
341 if (buffer_async_read(tmp)) {
342 BUG_ON(!buffer_locked(tmp));
345 tmp = tmp->b_this_page;
347 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
348 local_irq_restore(flags);
351 * If none of the buffers had errors and they are all
352 * uptodate then we can set the page uptodate.
354 if (page_uptodate && !PageError(page))
355 SetPageUptodate(page);
360 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
361 local_irq_restore(flags);
366 * Completion handler for block_write_full_page() - pages which are unlocked
367 * during I/O, and which have PageWriteback cleared upon I/O completion.
369 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
371 char b[BDEVNAME_SIZE];
373 struct buffer_head *first;
374 struct buffer_head *tmp;
377 BUG_ON(!buffer_async_write(bh));
381 set_buffer_uptodate(bh);
383 if (!quiet_error(bh)) {
385 printk(KERN_WARNING "lost page write due to "
387 bdevname(bh->b_bdev, b));
389 set_bit(AS_EIO, &page->mapping->flags);
390 set_buffer_write_io_error(bh);
391 clear_buffer_uptodate(bh);
395 first = page_buffers(page);
396 local_irq_save(flags);
397 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
399 clear_buffer_async_write(bh);
401 tmp = bh->b_this_page;
403 if (buffer_async_write(tmp)) {
404 BUG_ON(!buffer_locked(tmp));
407 tmp = tmp->b_this_page;
409 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
410 local_irq_restore(flags);
411 end_page_writeback(page);
415 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
416 local_irq_restore(flags);
419 EXPORT_SYMBOL(end_buffer_async_write);
422 * If a page's buffers are under async readin (end_buffer_async_read
423 * completion) then there is a possibility that another thread of
424 * control could lock one of the buffers after it has completed
425 * but while some of the other buffers have not completed. This
426 * locked buffer would confuse end_buffer_async_read() into not unlocking
427 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
428 * that this buffer is not under async I/O.
430 * The page comes unlocked when it has no locked buffer_async buffers
433 * PageLocked prevents anyone starting new async I/O reads any of
436 * PageWriteback is used to prevent simultaneous writeout of the same
439 * PageLocked prevents anyone from starting writeback of a page which is
440 * under read I/O (PageWriteback is only ever set against a locked page).
442 static void mark_buffer_async_read(struct buffer_head *bh)
444 bh->b_end_io = end_buffer_async_read;
445 set_buffer_async_read(bh);
448 static void mark_buffer_async_write_endio(struct buffer_head *bh,
449 bh_end_io_t *handler)
451 bh->b_end_io = handler;
452 set_buffer_async_write(bh);
455 void mark_buffer_async_write(struct buffer_head *bh)
457 mark_buffer_async_write_endio(bh, end_buffer_async_write);
459 EXPORT_SYMBOL(mark_buffer_async_write);
463 * fs/buffer.c contains helper functions for buffer-backed address space's
464 * fsync functions. A common requirement for buffer-based filesystems is
465 * that certain data from the backing blockdev needs to be written out for
466 * a successful fsync(). For example, ext2 indirect blocks need to be
467 * written back and waited upon before fsync() returns.
469 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
470 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
471 * management of a list of dependent buffers at ->i_mapping->private_list.
473 * Locking is a little subtle: try_to_free_buffers() will remove buffers
474 * from their controlling inode's queue when they are being freed. But
475 * try_to_free_buffers() will be operating against the *blockdev* mapping
476 * at the time, not against the S_ISREG file which depends on those buffers.
477 * So the locking for private_list is via the private_lock in the address_space
478 * which backs the buffers. Which is different from the address_space
479 * against which the buffers are listed. So for a particular address_space,
480 * mapping->private_lock does *not* protect mapping->private_list! In fact,
481 * mapping->private_list will always be protected by the backing blockdev's
484 * Which introduces a requirement: all buffers on an address_space's
485 * ->private_list must be from the same address_space: the blockdev's.
487 * address_spaces which do not place buffers at ->private_list via these
488 * utility functions are free to use private_lock and private_list for
489 * whatever they want. The only requirement is that list_empty(private_list)
490 * be true at clear_inode() time.
492 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
493 * filesystems should do that. invalidate_inode_buffers() should just go
494 * BUG_ON(!list_empty).
496 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
497 * take an address_space, not an inode. And it should be called
498 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
501 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
502 * list if it is already on a list. Because if the buffer is on a list,
503 * it *must* already be on the right one. If not, the filesystem is being
504 * silly. This will save a ton of locking. But first we have to ensure
505 * that buffers are taken *off* the old inode's list when they are freed
506 * (presumably in truncate). That requires careful auditing of all
507 * filesystems (do it inside bforget()). It could also be done by bringing
512 * The buffer's backing address_space's private_lock must be held
514 static void __remove_assoc_queue(struct buffer_head *bh)
516 list_del_init(&bh->b_assoc_buffers);
517 WARN_ON(!bh->b_assoc_map);
518 if (buffer_write_io_error(bh))
519 set_bit(AS_EIO, &bh->b_assoc_map->flags);
520 bh->b_assoc_map = NULL;
523 int inode_has_buffers(struct inode *inode)
525 return !list_empty(&inode->i_data.private_list);
529 * osync is designed to support O_SYNC io. It waits synchronously for
530 * all already-submitted IO to complete, but does not queue any new
531 * writes to the disk.
533 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
534 * you dirty the buffers, and then use osync_inode_buffers to wait for
535 * completion. Any other dirty buffers which are not yet queued for
536 * write will not be flushed to disk by the osync.
538 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
540 struct buffer_head *bh;
546 list_for_each_prev(p, list) {
548 if (buffer_locked(bh)) {
552 if (!buffer_uptodate(bh))
563 static void do_thaw_all(struct work_struct *work)
565 struct super_block *sb, *n;
566 char b[BDEVNAME_SIZE];
569 list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
570 if (list_empty(&sb->s_instances))
573 spin_unlock(&sb_lock);
574 down_read(&sb->s_umount);
575 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
576 printk(KERN_WARNING "Emergency Thaw on %s\n",
577 bdevname(sb->s_bdev, b));
578 up_read(&sb->s_umount);
581 spin_unlock(&sb_lock);
583 printk(KERN_WARNING "Emergency Thaw complete\n");
587 * emergency_thaw_all -- forcibly thaw every frozen filesystem
589 * Used for emergency unfreeze of all filesystems via SysRq
591 void emergency_thaw_all(void)
593 struct work_struct *work;
595 work = kmalloc(sizeof(*work), GFP_ATOMIC);
597 INIT_WORK(work, do_thaw_all);
603 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
604 * @mapping: the mapping which wants those buffers written
606 * Starts I/O against the buffers at mapping->private_list, and waits upon
609 * Basically, this is a convenience function for fsync().
610 * @mapping is a file or directory which needs those buffers to be written for
611 * a successful fsync().
613 int sync_mapping_buffers(struct address_space *mapping)
615 struct address_space *buffer_mapping = mapping->assoc_mapping;
617 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
620 return fsync_buffers_list(&buffer_mapping->private_lock,
621 &mapping->private_list);
623 EXPORT_SYMBOL(sync_mapping_buffers);
626 * Called when we've recently written block `bblock', and it is known that
627 * `bblock' was for a buffer_boundary() buffer. This means that the block at
628 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
629 * dirty, schedule it for IO. So that indirects merge nicely with their data.
631 void write_boundary_block(struct block_device *bdev,
632 sector_t bblock, unsigned blocksize)
634 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
636 if (buffer_dirty(bh))
637 ll_rw_block(WRITE, 1, &bh);
642 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
644 struct address_space *mapping = inode->i_mapping;
645 struct address_space *buffer_mapping = bh->b_page->mapping;
647 mark_buffer_dirty(bh);
648 if (!mapping->assoc_mapping) {
649 mapping->assoc_mapping = buffer_mapping;
651 BUG_ON(mapping->assoc_mapping != buffer_mapping);
653 if (!bh->b_assoc_map) {
654 spin_lock(&buffer_mapping->private_lock);
655 list_move_tail(&bh->b_assoc_buffers,
656 &mapping->private_list);
657 bh->b_assoc_map = mapping;
658 spin_unlock(&buffer_mapping->private_lock);
661 EXPORT_SYMBOL(mark_buffer_dirty_inode);
664 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
667 * If warn is true, then emit a warning if the page is not uptodate and has
668 * not been truncated.
670 static void __set_page_dirty(struct page *page,
671 struct address_space *mapping, int warn)
673 spin_lock_irq(&mapping->tree_lock);
674 if (page->mapping) { /* Race with truncate? */
675 WARN_ON_ONCE(warn && !PageUptodate(page));
676 account_page_dirtied(page, mapping);
677 radix_tree_tag_set(&mapping->page_tree,
678 page_index(page), PAGECACHE_TAG_DIRTY);
680 spin_unlock_irq(&mapping->tree_lock);
681 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
685 * Add a page to the dirty page list.
687 * It is a sad fact of life that this function is called from several places
688 * deeply under spinlocking. It may not sleep.
690 * If the page has buffers, the uptodate buffers are set dirty, to preserve
691 * dirty-state coherency between the page and the buffers. It the page does
692 * not have buffers then when they are later attached they will all be set
695 * The buffers are dirtied before the page is dirtied. There's a small race
696 * window in which a writepage caller may see the page cleanness but not the
697 * buffer dirtiness. That's fine. If this code were to set the page dirty
698 * before the buffers, a concurrent writepage caller could clear the page dirty
699 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
700 * page on the dirty page list.
702 * We use private_lock to lock against try_to_free_buffers while using the
703 * page's buffer list. Also use this to protect against clean buffers being
704 * added to the page after it was set dirty.
706 * FIXME: may need to call ->reservepage here as well. That's rather up to the
707 * address_space though.
709 int __set_page_dirty_buffers(struct page *page)
712 struct address_space *mapping = page_mapping(page);
714 if (unlikely(!mapping))
715 return !TestSetPageDirty(page);
717 spin_lock(&mapping->private_lock);
718 if (page_has_buffers(page)) {
719 struct buffer_head *head = page_buffers(page);
720 struct buffer_head *bh = head;
723 set_buffer_dirty(bh);
724 bh = bh->b_this_page;
725 } while (bh != head);
727 newly_dirty = !TestSetPageDirty(page);
728 spin_unlock(&mapping->private_lock);
731 __set_page_dirty(page, mapping, 1);
734 EXPORT_SYMBOL(__set_page_dirty_buffers);
737 * Write out and wait upon a list of buffers.
739 * We have conflicting pressures: we want to make sure that all
740 * initially dirty buffers get waited on, but that any subsequently
741 * dirtied buffers don't. After all, we don't want fsync to last
742 * forever if somebody is actively writing to the file.
744 * Do this in two main stages: first we copy dirty buffers to a
745 * temporary inode list, queueing the writes as we go. Then we clean
746 * up, waiting for those writes to complete.
748 * During this second stage, any subsequent updates to the file may end
749 * up refiling the buffer on the original inode's dirty list again, so
750 * there is a chance we will end up with a buffer queued for write but
751 * not yet completed on that list. So, as a final cleanup we go through
752 * the osync code to catch these locked, dirty buffers without requeuing
753 * any newly dirty buffers for write.
755 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
757 struct buffer_head *bh;
758 struct list_head tmp;
759 struct address_space *mapping, *prev_mapping = NULL;
762 INIT_LIST_HEAD(&tmp);
765 while (!list_empty(list)) {
766 bh = BH_ENTRY(list->next);
767 mapping = bh->b_assoc_map;
768 __remove_assoc_queue(bh);
769 /* Avoid race with mark_buffer_dirty_inode() which does
770 * a lockless check and we rely on seeing the dirty bit */
772 if (buffer_dirty(bh) || buffer_locked(bh)) {
773 list_add(&bh->b_assoc_buffers, &tmp);
774 bh->b_assoc_map = mapping;
775 if (buffer_dirty(bh)) {
779 * Ensure any pending I/O completes so that
780 * ll_rw_block() actually writes the current
781 * contents - it is a noop if I/O is still in
782 * flight on potentially older contents.
784 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
787 * Kick off IO for the previous mapping. Note
788 * that we will not run the very last mapping,
789 * wait_on_buffer() will do that for us
790 * through sync_buffer().
792 if (prev_mapping && prev_mapping != mapping)
793 blk_run_address_space(prev_mapping);
794 prev_mapping = mapping;
802 while (!list_empty(&tmp)) {
803 bh = BH_ENTRY(tmp.prev);
805 mapping = bh->b_assoc_map;
806 __remove_assoc_queue(bh);
807 /* Avoid race with mark_buffer_dirty_inode() which does
808 * a lockless check and we rely on seeing the dirty bit */
810 if (buffer_dirty(bh)) {
811 list_add(&bh->b_assoc_buffers,
812 &mapping->private_list);
813 bh->b_assoc_map = mapping;
817 if (!buffer_uptodate(bh))
824 err2 = osync_buffers_list(lock, list);
832 * Invalidate any and all dirty buffers on a given inode. We are
833 * probably unmounting the fs, but that doesn't mean we have already
834 * done a sync(). Just drop the buffers from the inode list.
836 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
837 * assumes that all the buffers are against the blockdev. Not true
840 void invalidate_inode_buffers(struct inode *inode)
842 if (inode_has_buffers(inode)) {
843 struct address_space *mapping = &inode->i_data;
844 struct list_head *list = &mapping->private_list;
845 struct address_space *buffer_mapping = mapping->assoc_mapping;
847 spin_lock(&buffer_mapping->private_lock);
848 while (!list_empty(list))
849 __remove_assoc_queue(BH_ENTRY(list->next));
850 spin_unlock(&buffer_mapping->private_lock);
853 EXPORT_SYMBOL(invalidate_inode_buffers);
856 * Remove any clean buffers from the inode's buffer list. This is called
857 * when we're trying to free the inode itself. Those buffers can pin it.
859 * Returns true if all buffers were removed.
861 int remove_inode_buffers(struct inode *inode)
865 if (inode_has_buffers(inode)) {
866 struct address_space *mapping = &inode->i_data;
867 struct list_head *list = &mapping->private_list;
868 struct address_space *buffer_mapping = mapping->assoc_mapping;
870 spin_lock(&buffer_mapping->private_lock);
871 while (!list_empty(list)) {
872 struct buffer_head *bh = BH_ENTRY(list->next);
873 if (buffer_dirty(bh)) {
877 __remove_assoc_queue(bh);
879 spin_unlock(&buffer_mapping->private_lock);
885 * Create the appropriate buffers when given a page for data area and
886 * the size of each buffer.. Use the bh->b_this_page linked list to
887 * follow the buffers created. Return NULL if unable to create more
890 * The retry flag is used to differentiate async IO (paging, swapping)
891 * which may not fail from ordinary buffer allocations.
893 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
896 struct buffer_head *bh, *head;
902 while ((offset -= size) >= 0) {
903 bh = alloc_buffer_head(GFP_NOFS);
908 bh->b_this_page = head;
913 atomic_set(&bh->b_count, 0);
914 bh->b_private = NULL;
917 /* Link the buffer to its page */
918 set_bh_page(bh, page, offset);
920 init_buffer(bh, NULL, NULL);
924 * In case anything failed, we just free everything we got.
930 head = head->b_this_page;
931 free_buffer_head(bh);
936 * Return failure for non-async IO requests. Async IO requests
937 * are not allowed to fail, so we have to wait until buffer heads
938 * become available. But we don't want tasks sleeping with
939 * partially complete buffers, so all were released above.
944 /* We're _really_ low on memory. Now we just
945 * wait for old buffer heads to become free due to
946 * finishing IO. Since this is an async request and
947 * the reserve list is empty, we're sure there are
948 * async buffer heads in use.
953 EXPORT_SYMBOL_GPL(alloc_page_buffers);
956 link_dev_buffers(struct page *page, struct buffer_head *head)
958 struct buffer_head *bh, *tail;
963 bh = bh->b_this_page;
965 tail->b_this_page = head;
966 attach_page_buffers(page, head);
970 * Initialise the state of a blockdev page's buffers.
973 init_page_buffers(struct page *page, struct block_device *bdev,
974 sector_t block, int size)
976 struct buffer_head *head = page_buffers(page);
977 struct buffer_head *bh = head;
978 int uptodate = PageUptodate(page);
981 if (!buffer_mapped(bh)) {
982 init_buffer(bh, NULL, NULL);
984 bh->b_blocknr = block;
986 set_buffer_uptodate(bh);
987 set_buffer_mapped(bh);
990 bh = bh->b_this_page;
991 } while (bh != head);
995 * Create the page-cache page that contains the requested block.
997 * This is user purely for blockdev mappings.
1000 grow_dev_page(struct block_device *bdev, sector_t block,
1001 pgoff_t index, int size)
1003 struct inode *inode = bdev->bd_inode;
1005 struct buffer_head *bh;
1007 page = find_or_create_page(inode->i_mapping, index,
1008 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1012 BUG_ON(!PageLocked(page));
1014 if (page_has_buffers(page)) {
1015 bh = page_buffers(page);
1016 if (bh->b_size == size) {
1017 init_page_buffers(page, bdev, block, size);
1020 if (!try_to_free_buffers(page))
1025 * Allocate some buffers for this page
1027 bh = alloc_page_buffers(page, size, 0);
1032 * Link the page to the buffers and initialise them. Take the
1033 * lock to be atomic wrt __find_get_block(), which does not
1034 * run under the page lock.
1036 spin_lock(&inode->i_mapping->private_lock);
1037 link_dev_buffers(page, bh);
1038 init_page_buffers(page, bdev, block, size);
1039 spin_unlock(&inode->i_mapping->private_lock);
1045 page_cache_release(page);
1050 * Create buffers for the specified block device block's page. If
1051 * that page was dirty, the buffers are set dirty also.
1054 grow_buffers(struct block_device *bdev, sector_t block, int size)
1063 } while ((size << sizebits) < PAGE_SIZE);
1065 index = block >> sizebits;
1068 * Check for a block which wants to lie outside our maximum possible
1069 * pagecache index. (this comparison is done using sector_t types).
1071 if (unlikely(index != block >> sizebits)) {
1072 char b[BDEVNAME_SIZE];
1074 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1076 __func__, (unsigned long long)block,
1080 block = index << sizebits;
1081 /* Create a page with the proper size buffers.. */
1082 page = grow_dev_page(bdev, block, index, size);
1086 page_cache_release(page);
1090 static struct buffer_head *
1091 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1093 /* Size must be multiple of hard sectorsize */
1094 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1095 (size < 512 || size > PAGE_SIZE))) {
1096 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1098 printk(KERN_ERR "logical block size: %d\n",
1099 bdev_logical_block_size(bdev));
1106 struct buffer_head * bh;
1109 bh = __find_get_block(bdev, block, size);
1113 ret = grow_buffers(bdev, block, size);
1122 * The relationship between dirty buffers and dirty pages:
1124 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1125 * the page is tagged dirty in its radix tree.
1127 * At all times, the dirtiness of the buffers represents the dirtiness of
1128 * subsections of the page. If the page has buffers, the page dirty bit is
1129 * merely a hint about the true dirty state.
1131 * When a page is set dirty in its entirety, all its buffers are marked dirty
1132 * (if the page has buffers).
1134 * When a buffer is marked dirty, its page is dirtied, but the page's other
1137 * Also. When blockdev buffers are explicitly read with bread(), they
1138 * individually become uptodate. But their backing page remains not
1139 * uptodate - even if all of its buffers are uptodate. A subsequent
1140 * block_read_full_page() against that page will discover all the uptodate
1141 * buffers, will set the page uptodate and will perform no I/O.
1145 * mark_buffer_dirty - mark a buffer_head as needing writeout
1146 * @bh: the buffer_head to mark dirty
1148 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1149 * backing page dirty, then tag the page as dirty in its address_space's radix
1150 * tree and then attach the address_space's inode to its superblock's dirty
1153 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1154 * mapping->tree_lock and the global inode_lock.
1156 void mark_buffer_dirty(struct buffer_head *bh)
1158 WARN_ON_ONCE(!buffer_uptodate(bh));
1161 * Very *carefully* optimize the it-is-already-dirty case.
1163 * Don't let the final "is it dirty" escape to before we
1164 * perhaps modified the buffer.
1166 if (buffer_dirty(bh)) {
1168 if (buffer_dirty(bh))
1172 if (!test_set_buffer_dirty(bh)) {
1173 struct page *page = bh->b_page;
1174 if (!TestSetPageDirty(page)) {
1175 struct address_space *mapping = page_mapping(page);
1177 __set_page_dirty(page, mapping, 0);
1181 EXPORT_SYMBOL(mark_buffer_dirty);
1184 * Decrement a buffer_head's reference count. If all buffers against a page
1185 * have zero reference count, are clean and unlocked, and if the page is clean
1186 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1187 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1188 * a page but it ends up not being freed, and buffers may later be reattached).
1190 void __brelse(struct buffer_head * buf)
1192 if (atomic_read(&buf->b_count)) {
1196 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1198 EXPORT_SYMBOL(__brelse);
1201 * bforget() is like brelse(), except it discards any
1202 * potentially dirty data.
1204 void __bforget(struct buffer_head *bh)
1206 clear_buffer_dirty(bh);
1207 if (bh->b_assoc_map) {
1208 struct address_space *buffer_mapping = bh->b_page->mapping;
1210 spin_lock(&buffer_mapping->private_lock);
1211 list_del_init(&bh->b_assoc_buffers);
1212 bh->b_assoc_map = NULL;
1213 spin_unlock(&buffer_mapping->private_lock);
1217 EXPORT_SYMBOL(__bforget);
1219 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1222 if (buffer_uptodate(bh)) {
1227 bh->b_end_io = end_buffer_read_sync;
1228 submit_bh(READ, bh);
1230 if (buffer_uptodate(bh))
1238 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1239 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1240 * refcount elevated by one when they're in an LRU. A buffer can only appear
1241 * once in a particular CPU's LRU. A single buffer can be present in multiple
1242 * CPU's LRUs at the same time.
1244 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1245 * sb_find_get_block().
1247 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1248 * a local interrupt disable for that.
1251 #define BH_LRU_SIZE 8
1254 struct buffer_head *bhs[BH_LRU_SIZE];
1257 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1260 #define bh_lru_lock() local_irq_disable()
1261 #define bh_lru_unlock() local_irq_enable()
1263 #define bh_lru_lock() preempt_disable()
1264 #define bh_lru_unlock() preempt_enable()
1267 static inline void check_irqs_on(void)
1269 #ifdef irqs_disabled
1270 BUG_ON(irqs_disabled());
1275 * The LRU management algorithm is dopey-but-simple. Sorry.
1277 static void bh_lru_install(struct buffer_head *bh)
1279 struct buffer_head *evictee = NULL;
1284 lru = &__get_cpu_var(bh_lrus);
1285 if (lru->bhs[0] != bh) {
1286 struct buffer_head *bhs[BH_LRU_SIZE];
1292 for (in = 0; in < BH_LRU_SIZE; in++) {
1293 struct buffer_head *bh2 = lru->bhs[in];
1298 if (out >= BH_LRU_SIZE) {
1299 BUG_ON(evictee != NULL);
1306 while (out < BH_LRU_SIZE)
1308 memcpy(lru->bhs, bhs, sizeof(bhs));
1317 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1319 static struct buffer_head *
1320 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1322 struct buffer_head *ret = NULL;
1328 lru = &__get_cpu_var(bh_lrus);
1329 for (i = 0; i < BH_LRU_SIZE; i++) {
1330 struct buffer_head *bh = lru->bhs[i];
1332 if (bh && bh->b_bdev == bdev &&
1333 bh->b_blocknr == block && bh->b_size == size) {
1336 lru->bhs[i] = lru->bhs[i - 1];
1351 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1352 * it in the LRU and mark it as accessed. If it is not present then return
1355 struct buffer_head *
1356 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1358 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1361 bh = __find_get_block_slow(bdev, block);
1369 EXPORT_SYMBOL(__find_get_block);
1372 * __getblk will locate (and, if necessary, create) the buffer_head
1373 * which corresponds to the passed block_device, block and size. The
1374 * returned buffer has its reference count incremented.
1376 * __getblk() cannot fail - it just keeps trying. If you pass it an
1377 * illegal block number, __getblk() will happily return a buffer_head
1378 * which represents the non-existent block. Very weird.
1380 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1381 * attempt is failing. FIXME, perhaps?
1383 struct buffer_head *
1384 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1386 struct buffer_head *bh = __find_get_block(bdev, block, size);
1390 bh = __getblk_slow(bdev, block, size);
1393 EXPORT_SYMBOL(__getblk);
1396 * Do async read-ahead on a buffer..
1398 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1400 struct buffer_head *bh = __getblk(bdev, block, size);
1402 ll_rw_block(READA, 1, &bh);
1406 EXPORT_SYMBOL(__breadahead);
1409 * __bread() - reads a specified block and returns the bh
1410 * @bdev: the block_device to read from
1411 * @block: number of block
1412 * @size: size (in bytes) to read
1414 * Reads a specified block, and returns buffer head that contains it.
1415 * It returns NULL if the block was unreadable.
1417 struct buffer_head *
1418 __bread(struct block_device *bdev, sector_t block, unsigned size)
1420 struct buffer_head *bh = __getblk(bdev, block, size);
1422 if (likely(bh) && !buffer_uptodate(bh))
1423 bh = __bread_slow(bh);
1426 EXPORT_SYMBOL(__bread);
1429 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1430 * This doesn't race because it runs in each cpu either in irq
1431 * or with preempt disabled.
1433 static void invalidate_bh_lru(void *arg)
1435 struct bh_lru *b = &get_cpu_var(bh_lrus);
1438 for (i = 0; i < BH_LRU_SIZE; i++) {
1442 put_cpu_var(bh_lrus);
1445 void invalidate_bh_lrus(void)
1447 on_each_cpu(invalidate_bh_lru, NULL, 1);
1449 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1451 void set_bh_page(struct buffer_head *bh,
1452 struct page *page, unsigned long offset)
1455 BUG_ON(offset >= PAGE_SIZE);
1456 if (PageHighMem(page))
1458 * This catches illegal uses and preserves the offset:
1460 bh->b_data = (char *)(0 + offset);
1462 bh->b_data = page_address(page) + offset;
1464 EXPORT_SYMBOL(set_bh_page);
1467 * Called when truncating a buffer on a page completely.
1469 static void discard_buffer(struct buffer_head * bh)
1472 clear_buffer_dirty(bh);
1474 clear_buffer_mapped(bh);
1475 clear_buffer_req(bh);
1476 clear_buffer_new(bh);
1477 clear_buffer_delay(bh);
1478 clear_buffer_unwritten(bh);
1483 * block_invalidatepage - invalidate part of all of a buffer-backed page
1485 * @page: the page which is affected
1486 * @offset: the index of the truncation point
1488 * block_invalidatepage() is called when all or part of the page has become
1489 * invalidatedby a truncate operation.
1491 * block_invalidatepage() does not have to release all buffers, but it must
1492 * ensure that no dirty buffer is left outside @offset and that no I/O
1493 * is underway against any of the blocks which are outside the truncation
1494 * point. Because the caller is about to free (and possibly reuse) those
1497 void block_invalidatepage(struct page *page, unsigned long offset)
1499 struct buffer_head *head, *bh, *next;
1500 unsigned int curr_off = 0;
1502 BUG_ON(!PageLocked(page));
1503 if (!page_has_buffers(page))
1506 head = page_buffers(page);
1509 unsigned int next_off = curr_off + bh->b_size;
1510 next = bh->b_this_page;
1513 * is this block fully invalidated?
1515 if (offset <= curr_off)
1517 curr_off = next_off;
1519 } while (bh != head);
1522 * We release buffers only if the entire page is being invalidated.
1523 * The get_block cached value has been unconditionally invalidated,
1524 * so real IO is not possible anymore.
1527 try_to_release_page(page, 0);
1531 EXPORT_SYMBOL(block_invalidatepage);
1534 * We attach and possibly dirty the buffers atomically wrt
1535 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1536 * is already excluded via the page lock.
1538 void create_empty_buffers(struct page *page,
1539 unsigned long blocksize, unsigned long b_state)
1541 struct buffer_head *bh, *head, *tail;
1543 head = alloc_page_buffers(page, blocksize, 1);
1546 bh->b_state |= b_state;
1548 bh = bh->b_this_page;
1550 tail->b_this_page = head;
1552 spin_lock(&page->mapping->private_lock);
1553 if (PageUptodate(page) || PageDirty(page)) {
1556 if (PageDirty(page))
1557 set_buffer_dirty(bh);
1558 if (PageUptodate(page))
1559 set_buffer_uptodate(bh);
1560 bh = bh->b_this_page;
1561 } while (bh != head);
1563 attach_page_buffers(page, head);
1564 spin_unlock(&page->mapping->private_lock);
1566 EXPORT_SYMBOL(create_empty_buffers);
1569 * We are taking a block for data and we don't want any output from any
1570 * buffer-cache aliases starting from return from that function and
1571 * until the moment when something will explicitly mark the buffer
1572 * dirty (hopefully that will not happen until we will free that block ;-)
1573 * We don't even need to mark it not-uptodate - nobody can expect
1574 * anything from a newly allocated buffer anyway. We used to used
1575 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1576 * don't want to mark the alias unmapped, for example - it would confuse
1577 * anyone who might pick it with bread() afterwards...
1579 * Also.. Note that bforget() doesn't lock the buffer. So there can
1580 * be writeout I/O going on against recently-freed buffers. We don't
1581 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1582 * only if we really need to. That happens here.
1584 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1586 struct buffer_head *old_bh;
1590 old_bh = __find_get_block_slow(bdev, block);
1592 clear_buffer_dirty(old_bh);
1593 wait_on_buffer(old_bh);
1594 clear_buffer_req(old_bh);
1598 EXPORT_SYMBOL(unmap_underlying_metadata);
1601 * NOTE! All mapped/uptodate combinations are valid:
1603 * Mapped Uptodate Meaning
1605 * No No "unknown" - must do get_block()
1606 * No Yes "hole" - zero-filled
1607 * Yes No "allocated" - allocated on disk, not read in
1608 * Yes Yes "valid" - allocated and up-to-date in memory.
1610 * "Dirty" is valid only with the last case (mapped+uptodate).
1614 * While block_write_full_page is writing back the dirty buffers under
1615 * the page lock, whoever dirtied the buffers may decide to clean them
1616 * again at any time. We handle that by only looking at the buffer
1617 * state inside lock_buffer().
1619 * If block_write_full_page() is called for regular writeback
1620 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1621 * locked buffer. This only can happen if someone has written the buffer
1622 * directly, with submit_bh(). At the address_space level PageWriteback
1623 * prevents this contention from occurring.
1625 * If block_write_full_page() is called with wbc->sync_mode ==
1626 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1627 * causes the writes to be flagged as synchronous writes, but the
1628 * block device queue will NOT be unplugged, since usually many pages
1629 * will be pushed to the out before the higher-level caller actually
1630 * waits for the writes to be completed. The various wait functions,
1631 * such as wait_on_writeback_range() will ultimately call sync_page()
1632 * which will ultimately call blk_run_backing_dev(), which will end up
1633 * unplugging the device queue.
1635 static int __block_write_full_page(struct inode *inode, struct page *page,
1636 get_block_t *get_block, struct writeback_control *wbc,
1637 bh_end_io_t *handler)
1641 sector_t last_block;
1642 struct buffer_head *bh, *head;
1643 const unsigned blocksize = 1 << inode->i_blkbits;
1644 int nr_underway = 0;
1645 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1646 WRITE_SYNC_PLUG : WRITE);
1648 BUG_ON(!PageLocked(page));
1650 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1652 if (!page_has_buffers(page)) {
1653 create_empty_buffers(page, blocksize,
1654 (1 << BH_Dirty)|(1 << BH_Uptodate));
1658 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1659 * here, and the (potentially unmapped) buffers may become dirty at
1660 * any time. If a buffer becomes dirty here after we've inspected it
1661 * then we just miss that fact, and the page stays dirty.
1663 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1664 * handle that here by just cleaning them.
1667 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1668 head = page_buffers(page);
1672 * Get all the dirty buffers mapped to disk addresses and
1673 * handle any aliases from the underlying blockdev's mapping.
1676 if (block > last_block) {
1678 * mapped buffers outside i_size will occur, because
1679 * this page can be outside i_size when there is a
1680 * truncate in progress.
1683 * The buffer was zeroed by block_write_full_page()
1685 clear_buffer_dirty(bh);
1686 set_buffer_uptodate(bh);
1687 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1689 WARN_ON(bh->b_size != blocksize);
1690 err = get_block(inode, block, bh, 1);
1693 clear_buffer_delay(bh);
1694 if (buffer_new(bh)) {
1695 /* blockdev mappings never come here */
1696 clear_buffer_new(bh);
1697 unmap_underlying_metadata(bh->b_bdev,
1701 bh = bh->b_this_page;
1703 } while (bh != head);
1706 if (!buffer_mapped(bh))
1709 * If it's a fully non-blocking write attempt and we cannot
1710 * lock the buffer then redirty the page. Note that this can
1711 * potentially cause a busy-wait loop from writeback threads
1712 * and kswapd activity, but those code paths have their own
1713 * higher-level throttling.
1715 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1717 } else if (!trylock_buffer(bh)) {
1718 redirty_page_for_writepage(wbc, page);
1721 if (test_clear_buffer_dirty(bh)) {
1722 mark_buffer_async_write_endio(bh, handler);
1726 } while ((bh = bh->b_this_page) != head);
1729 * The page and its buffers are protected by PageWriteback(), so we can
1730 * drop the bh refcounts early.
1732 BUG_ON(PageWriteback(page));
1733 set_page_writeback(page);
1736 struct buffer_head *next = bh->b_this_page;
1737 if (buffer_async_write(bh)) {
1738 submit_bh(write_op, bh);
1742 } while (bh != head);
1747 if (nr_underway == 0) {
1749 * The page was marked dirty, but the buffers were
1750 * clean. Someone wrote them back by hand with
1751 * ll_rw_block/submit_bh. A rare case.
1753 end_page_writeback(page);
1756 * The page and buffer_heads can be released at any time from
1764 * ENOSPC, or some other error. We may already have added some
1765 * blocks to the file, so we need to write these out to avoid
1766 * exposing stale data.
1767 * The page is currently locked and not marked for writeback
1770 /* Recovery: lock and submit the mapped buffers */
1772 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1773 !buffer_delay(bh)) {
1775 mark_buffer_async_write_endio(bh, handler);
1778 * The buffer may have been set dirty during
1779 * attachment to a dirty page.
1781 clear_buffer_dirty(bh);
1783 } while ((bh = bh->b_this_page) != head);
1785 BUG_ON(PageWriteback(page));
1786 mapping_set_error(page->mapping, err);
1787 set_page_writeback(page);
1789 struct buffer_head *next = bh->b_this_page;
1790 if (buffer_async_write(bh)) {
1791 clear_buffer_dirty(bh);
1792 submit_bh(write_op, bh);
1796 } while (bh != head);
1802 * If a page has any new buffers, zero them out here, and mark them uptodate
1803 * and dirty so they'll be written out (in order to prevent uninitialised
1804 * block data from leaking). And clear the new bit.
1806 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1808 unsigned int block_start, block_end;
1809 struct buffer_head *head, *bh;
1811 BUG_ON(!PageLocked(page));
1812 if (!page_has_buffers(page))
1815 bh = head = page_buffers(page);
1818 block_end = block_start + bh->b_size;
1820 if (buffer_new(bh)) {
1821 if (block_end > from && block_start < to) {
1822 if (!PageUptodate(page)) {
1823 unsigned start, size;
1825 start = max(from, block_start);
1826 size = min(to, block_end) - start;
1828 zero_user(page, start, size);
1829 set_buffer_uptodate(bh);
1832 clear_buffer_new(bh);
1833 mark_buffer_dirty(bh);
1837 block_start = block_end;
1838 bh = bh->b_this_page;
1839 } while (bh != head);
1841 EXPORT_SYMBOL(page_zero_new_buffers);
1843 static int __block_prepare_write(struct inode *inode, struct page *page,
1844 unsigned from, unsigned to, get_block_t *get_block)
1846 unsigned block_start, block_end;
1849 unsigned blocksize, bbits;
1850 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1852 BUG_ON(!PageLocked(page));
1853 BUG_ON(from > PAGE_CACHE_SIZE);
1854 BUG_ON(to > PAGE_CACHE_SIZE);
1857 blocksize = 1 << inode->i_blkbits;
1858 if (!page_has_buffers(page))
1859 create_empty_buffers(page, blocksize, 0);
1860 head = page_buffers(page);
1862 bbits = inode->i_blkbits;
1863 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1865 for(bh = head, block_start = 0; bh != head || !block_start;
1866 block++, block_start=block_end, bh = bh->b_this_page) {
1867 block_end = block_start + blocksize;
1868 if (block_end <= from || block_start >= to) {
1869 if (PageUptodate(page)) {
1870 if (!buffer_uptodate(bh))
1871 set_buffer_uptodate(bh);
1876 clear_buffer_new(bh);
1877 if (!buffer_mapped(bh)) {
1878 WARN_ON(bh->b_size != blocksize);
1879 err = get_block(inode, block, bh, 1);
1882 if (buffer_new(bh)) {
1883 unmap_underlying_metadata(bh->b_bdev,
1885 if (PageUptodate(page)) {
1886 clear_buffer_new(bh);
1887 set_buffer_uptodate(bh);
1888 mark_buffer_dirty(bh);
1891 if (block_end > to || block_start < from)
1892 zero_user_segments(page,
1898 if (PageUptodate(page)) {
1899 if (!buffer_uptodate(bh))
1900 set_buffer_uptodate(bh);
1903 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1904 !buffer_unwritten(bh) &&
1905 (block_start < from || block_end > to)) {
1906 ll_rw_block(READ, 1, &bh);
1911 * If we issued read requests - let them complete.
1913 while(wait_bh > wait) {
1914 wait_on_buffer(*--wait_bh);
1915 if (!buffer_uptodate(*wait_bh))
1919 page_zero_new_buffers(page, from, to);
1923 static int __block_commit_write(struct inode *inode, struct page *page,
1924 unsigned from, unsigned to)
1926 unsigned block_start, block_end;
1929 struct buffer_head *bh, *head;
1931 blocksize = 1 << inode->i_blkbits;
1933 for(bh = head = page_buffers(page), block_start = 0;
1934 bh != head || !block_start;
1935 block_start=block_end, bh = bh->b_this_page) {
1936 block_end = block_start + blocksize;
1937 if (block_end <= from || block_start >= to) {
1938 if (!buffer_uptodate(bh))
1941 set_buffer_uptodate(bh);
1942 mark_buffer_dirty(bh);
1944 clear_buffer_new(bh);
1948 * If this is a partial write which happened to make all buffers
1949 * uptodate then we can optimize away a bogus readpage() for
1950 * the next read(). Here we 'discover' whether the page went
1951 * uptodate as a result of this (potentially partial) write.
1954 SetPageUptodate(page);
1959 * block_write_begin takes care of the basic task of block allocation and
1960 * bringing partial write blocks uptodate first.
1962 * If *pagep is not NULL, then block_write_begin uses the locked page
1963 * at *pagep rather than allocating its own. In this case, the page will
1964 * not be unlocked or deallocated on failure.
1966 int block_write_begin(struct file *file, struct address_space *mapping,
1967 loff_t pos, unsigned len, unsigned flags,
1968 struct page **pagep, void **fsdata,
1969 get_block_t *get_block)
1971 struct inode *inode = mapping->host;
1975 unsigned start, end;
1978 index = pos >> PAGE_CACHE_SHIFT;
1979 start = pos & (PAGE_CACHE_SIZE - 1);
1985 page = grab_cache_page_write_begin(mapping, index, flags);
1992 BUG_ON(!PageLocked(page));
1994 status = __block_prepare_write(inode, page, start, end, get_block);
1995 if (unlikely(status)) {
1996 ClearPageUptodate(page);
2000 page_cache_release(page);
2004 * prepare_write() may have instantiated a few blocks
2005 * outside i_size. Trim these off again. Don't need
2006 * i_size_read because we hold i_mutex.
2008 if (pos + len > inode->i_size)
2009 vmtruncate(inode, inode->i_size);
2016 EXPORT_SYMBOL(block_write_begin);
2018 int block_write_end(struct file *file, struct address_space *mapping,
2019 loff_t pos, unsigned len, unsigned copied,
2020 struct page *page, void *fsdata)
2022 struct inode *inode = mapping->host;
2025 start = pos & (PAGE_CACHE_SIZE - 1);
2027 if (unlikely(copied < len)) {
2029 * The buffers that were written will now be uptodate, so we
2030 * don't have to worry about a readpage reading them and
2031 * overwriting a partial write. However if we have encountered
2032 * a short write and only partially written into a buffer, it
2033 * will not be marked uptodate, so a readpage might come in and
2034 * destroy our partial write.
2036 * Do the simplest thing, and just treat any short write to a
2037 * non uptodate page as a zero-length write, and force the
2038 * caller to redo the whole thing.
2040 if (!PageUptodate(page))
2043 page_zero_new_buffers(page, start+copied, start+len);
2045 flush_dcache_page(page);
2047 /* This could be a short (even 0-length) commit */
2048 __block_commit_write(inode, page, start, start+copied);
2052 EXPORT_SYMBOL(block_write_end);
2054 int generic_write_end(struct file *file, struct address_space *mapping,
2055 loff_t pos, unsigned len, unsigned copied,
2056 struct page *page, void *fsdata)
2058 struct inode *inode = mapping->host;
2059 int i_size_changed = 0;
2061 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2064 * No need to use i_size_read() here, the i_size
2065 * cannot change under us because we hold i_mutex.
2067 * But it's important to update i_size while still holding page lock:
2068 * page writeout could otherwise come in and zero beyond i_size.
2070 if (pos+copied > inode->i_size) {
2071 i_size_write(inode, pos+copied);
2076 page_cache_release(page);
2079 * Don't mark the inode dirty under page lock. First, it unnecessarily
2080 * makes the holding time of page lock longer. Second, it forces lock
2081 * ordering of page lock and transaction start for journaling
2085 mark_inode_dirty(inode);
2089 EXPORT_SYMBOL(generic_write_end);
2092 * block_is_partially_uptodate checks whether buffers within a page are
2095 * Returns true if all buffers which correspond to a file portion
2096 * we want to read are uptodate.
2098 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2101 struct inode *inode = page->mapping->host;
2102 unsigned block_start, block_end, blocksize;
2104 struct buffer_head *bh, *head;
2107 if (!page_has_buffers(page))
2110 blocksize = 1 << inode->i_blkbits;
2111 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2113 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2116 head = page_buffers(page);
2120 block_end = block_start + blocksize;
2121 if (block_end > from && block_start < to) {
2122 if (!buffer_uptodate(bh)) {
2126 if (block_end >= to)
2129 block_start = block_end;
2130 bh = bh->b_this_page;
2131 } while (bh != head);
2135 EXPORT_SYMBOL(block_is_partially_uptodate);
2138 * Generic "read page" function for block devices that have the normal
2139 * get_block functionality. This is most of the block device filesystems.
2140 * Reads the page asynchronously --- the unlock_buffer() and
2141 * set/clear_buffer_uptodate() functions propagate buffer state into the
2142 * page struct once IO has completed.
2144 int block_read_full_page(struct page *page, get_block_t *get_block)
2146 struct inode *inode = page->mapping->host;
2147 sector_t iblock, lblock;
2148 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2149 unsigned int blocksize;
2151 int fully_mapped = 1;
2153 BUG_ON(!PageLocked(page));
2154 blocksize = 1 << inode->i_blkbits;
2155 if (!page_has_buffers(page))
2156 create_empty_buffers(page, blocksize, 0);
2157 head = page_buffers(page);
2159 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2160 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2166 if (buffer_uptodate(bh))
2169 if (!buffer_mapped(bh)) {
2173 if (iblock < lblock) {
2174 WARN_ON(bh->b_size != blocksize);
2175 err = get_block(inode, iblock, bh, 0);
2179 if (!buffer_mapped(bh)) {
2180 zero_user(page, i * blocksize, blocksize);
2182 set_buffer_uptodate(bh);
2186 * get_block() might have updated the buffer
2189 if (buffer_uptodate(bh))
2193 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2196 SetPageMappedToDisk(page);
2200 * All buffers are uptodate - we can set the page uptodate
2201 * as well. But not if get_block() returned an error.
2203 if (!PageError(page))
2204 SetPageUptodate(page);
2209 /* Stage two: lock the buffers */
2210 for (i = 0; i < nr; i++) {
2213 mark_buffer_async_read(bh);
2217 * Stage 3: start the IO. Check for uptodateness
2218 * inside the buffer lock in case another process reading
2219 * the underlying blockdev brought it uptodate (the sct fix).
2221 for (i = 0; i < nr; i++) {
2223 if (buffer_uptodate(bh))
2224 end_buffer_async_read(bh, 1);
2226 submit_bh(READ, bh);
2230 EXPORT_SYMBOL(block_read_full_page);
2232 /* utility function for filesystems that need to do work on expanding
2233 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2234 * deal with the hole.
2236 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2238 struct address_space *mapping = inode->i_mapping;
2243 err = inode_newsize_ok(inode, size);
2247 err = pagecache_write_begin(NULL, mapping, size, 0,
2248 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2253 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2259 EXPORT_SYMBOL(generic_cont_expand_simple);
2261 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2262 loff_t pos, loff_t *bytes)
2264 struct inode *inode = mapping->host;
2265 unsigned blocksize = 1 << inode->i_blkbits;
2268 pgoff_t index, curidx;
2270 unsigned zerofrom, offset, len;
2273 index = pos >> PAGE_CACHE_SHIFT;
2274 offset = pos & ~PAGE_CACHE_MASK;
2276 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2277 zerofrom = curpos & ~PAGE_CACHE_MASK;
2278 if (zerofrom & (blocksize-1)) {
2279 *bytes |= (blocksize-1);
2282 len = PAGE_CACHE_SIZE - zerofrom;
2284 err = pagecache_write_begin(file, mapping, curpos, len,
2285 AOP_FLAG_UNINTERRUPTIBLE,
2289 zero_user(page, zerofrom, len);
2290 err = pagecache_write_end(file, mapping, curpos, len, len,
2297 balance_dirty_pages_ratelimited(mapping);
2300 /* page covers the boundary, find the boundary offset */
2301 if (index == curidx) {
2302 zerofrom = curpos & ~PAGE_CACHE_MASK;
2303 /* if we will expand the thing last block will be filled */
2304 if (offset <= zerofrom) {
2307 if (zerofrom & (blocksize-1)) {
2308 *bytes |= (blocksize-1);
2311 len = offset - zerofrom;
2313 err = pagecache_write_begin(file, mapping, curpos, len,
2314 AOP_FLAG_UNINTERRUPTIBLE,
2318 zero_user(page, zerofrom, len);
2319 err = pagecache_write_end(file, mapping, curpos, len, len,
2331 * For moronic filesystems that do not allow holes in file.
2332 * We may have to extend the file.
2334 int cont_write_begin(struct file *file, struct address_space *mapping,
2335 loff_t pos, unsigned len, unsigned flags,
2336 struct page **pagep, void **fsdata,
2337 get_block_t *get_block, loff_t *bytes)
2339 struct inode *inode = mapping->host;
2340 unsigned blocksize = 1 << inode->i_blkbits;
2344 err = cont_expand_zero(file, mapping, pos, bytes);
2348 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2349 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2350 *bytes |= (blocksize-1);
2355 err = block_write_begin(file, mapping, pos, len,
2356 flags, pagep, fsdata, get_block);
2360 EXPORT_SYMBOL(cont_write_begin);
2362 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2363 get_block_t *get_block)
2365 struct inode *inode = page->mapping->host;
2366 int err = __block_prepare_write(inode, page, from, to, get_block);
2368 ClearPageUptodate(page);
2371 EXPORT_SYMBOL(block_prepare_write);
2373 int block_commit_write(struct page *page, unsigned from, unsigned to)
2375 struct inode *inode = page->mapping->host;
2376 __block_commit_write(inode,page,from,to);
2379 EXPORT_SYMBOL(block_commit_write);
2382 * block_page_mkwrite() is not allowed to change the file size as it gets
2383 * called from a page fault handler when a page is first dirtied. Hence we must
2384 * be careful to check for EOF conditions here. We set the page up correctly
2385 * for a written page which means we get ENOSPC checking when writing into
2386 * holes and correct delalloc and unwritten extent mapping on filesystems that
2387 * support these features.
2389 * We are not allowed to take the i_mutex here so we have to play games to
2390 * protect against truncate races as the page could now be beyond EOF. Because
2391 * vmtruncate() writes the inode size before removing pages, once we have the
2392 * page lock we can determine safely if the page is beyond EOF. If it is not
2393 * beyond EOF, then the page is guaranteed safe against truncation until we
2397 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2398 get_block_t get_block)
2400 struct page *page = vmf->page;
2401 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2404 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2407 size = i_size_read(inode);
2408 if ((page->mapping != inode->i_mapping) ||
2409 (page_offset(page) > size)) {
2410 /* page got truncated out from underneath us */
2415 /* page is wholly or partially inside EOF */
2416 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2417 end = size & ~PAGE_CACHE_MASK;
2419 end = PAGE_CACHE_SIZE;
2421 ret = block_prepare_write(page, 0, end, get_block);
2423 ret = block_commit_write(page, 0, end);
2425 if (unlikely(ret)) {
2429 else /* -ENOSPC, -EIO, etc */
2430 ret = VM_FAULT_SIGBUS;
2432 ret = VM_FAULT_LOCKED;
2437 EXPORT_SYMBOL(block_page_mkwrite);
2440 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2441 * immediately, while under the page lock. So it needs a special end_io
2442 * handler which does not touch the bh after unlocking it.
2444 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2446 __end_buffer_read_notouch(bh, uptodate);
2450 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2451 * the page (converting it to circular linked list and taking care of page
2454 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2456 struct buffer_head *bh;
2458 BUG_ON(!PageLocked(page));
2460 spin_lock(&page->mapping->private_lock);
2463 if (PageDirty(page))
2464 set_buffer_dirty(bh);
2465 if (!bh->b_this_page)
2466 bh->b_this_page = head;
2467 bh = bh->b_this_page;
2468 } while (bh != head);
2469 attach_page_buffers(page, head);
2470 spin_unlock(&page->mapping->private_lock);
2474 * On entry, the page is fully not uptodate.
2475 * On exit the page is fully uptodate in the areas outside (from,to)
2477 int nobh_write_begin(struct file *file, struct address_space *mapping,
2478 loff_t pos, unsigned len, unsigned flags,
2479 struct page **pagep, void **fsdata,
2480 get_block_t *get_block)
2482 struct inode *inode = mapping->host;
2483 const unsigned blkbits = inode->i_blkbits;
2484 const unsigned blocksize = 1 << blkbits;
2485 struct buffer_head *head, *bh;
2489 unsigned block_in_page;
2490 unsigned block_start, block_end;
2491 sector_t block_in_file;
2494 int is_mapped_to_disk = 1;
2496 index = pos >> PAGE_CACHE_SHIFT;
2497 from = pos & (PAGE_CACHE_SIZE - 1);
2500 page = grab_cache_page_write_begin(mapping, index, flags);
2506 if (page_has_buffers(page)) {
2508 page_cache_release(page);
2510 return block_write_begin(file, mapping, pos, len, flags, pagep,
2514 if (PageMappedToDisk(page))
2518 * Allocate buffers so that we can keep track of state, and potentially
2519 * attach them to the page if an error occurs. In the common case of
2520 * no error, they will just be freed again without ever being attached
2521 * to the page (which is all OK, because we're under the page lock).
2523 * Be careful: the buffer linked list is a NULL terminated one, rather
2524 * than the circular one we're used to.
2526 head = alloc_page_buffers(page, blocksize, 0);
2532 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2535 * We loop across all blocks in the page, whether or not they are
2536 * part of the affected region. This is so we can discover if the
2537 * page is fully mapped-to-disk.
2539 for (block_start = 0, block_in_page = 0, bh = head;
2540 block_start < PAGE_CACHE_SIZE;
2541 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2544 block_end = block_start + blocksize;
2547 if (block_start >= to)
2549 ret = get_block(inode, block_in_file + block_in_page,
2553 if (!buffer_mapped(bh))
2554 is_mapped_to_disk = 0;
2556 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2557 if (PageUptodate(page)) {
2558 set_buffer_uptodate(bh);
2561 if (buffer_new(bh) || !buffer_mapped(bh)) {
2562 zero_user_segments(page, block_start, from,
2566 if (buffer_uptodate(bh))
2567 continue; /* reiserfs does this */
2568 if (block_start < from || block_end > to) {
2570 bh->b_end_io = end_buffer_read_nobh;
2571 submit_bh(READ, bh);
2578 * The page is locked, so these buffers are protected from
2579 * any VM or truncate activity. Hence we don't need to care
2580 * for the buffer_head refcounts.
2582 for (bh = head; bh; bh = bh->b_this_page) {
2584 if (!buffer_uptodate(bh))
2591 if (is_mapped_to_disk)
2592 SetPageMappedToDisk(page);
2594 *fsdata = head; /* to be released by nobh_write_end */
2601 * Error recovery is a bit difficult. We need to zero out blocks that
2602 * were newly allocated, and dirty them to ensure they get written out.
2603 * Buffers need to be attached to the page at this point, otherwise
2604 * the handling of potential IO errors during writeout would be hard
2605 * (could try doing synchronous writeout, but what if that fails too?)
2607 attach_nobh_buffers(page, head);
2608 page_zero_new_buffers(page, from, to);
2612 page_cache_release(page);
2615 if (pos + len > inode->i_size)
2616 vmtruncate(inode, inode->i_size);
2620 EXPORT_SYMBOL(nobh_write_begin);
2622 int nobh_write_end(struct file *file, struct address_space *mapping,
2623 loff_t pos, unsigned len, unsigned copied,
2624 struct page *page, void *fsdata)
2626 struct inode *inode = page->mapping->host;
2627 struct buffer_head *head = fsdata;
2628 struct buffer_head *bh;
2629 BUG_ON(fsdata != NULL && page_has_buffers(page));
2631 if (unlikely(copied < len) && head)
2632 attach_nobh_buffers(page, head);
2633 if (page_has_buffers(page))
2634 return generic_write_end(file, mapping, pos, len,
2635 copied, page, fsdata);
2637 SetPageUptodate(page);
2638 set_page_dirty(page);
2639 if (pos+copied > inode->i_size) {
2640 i_size_write(inode, pos+copied);
2641 mark_inode_dirty(inode);
2645 page_cache_release(page);
2649 head = head->b_this_page;
2650 free_buffer_head(bh);
2655 EXPORT_SYMBOL(nobh_write_end);
2658 * nobh_writepage() - based on block_full_write_page() except
2659 * that it tries to operate without attaching bufferheads to
2662 int nobh_writepage(struct page *page, get_block_t *get_block,
2663 struct writeback_control *wbc)
2665 struct inode * const inode = page->mapping->host;
2666 loff_t i_size = i_size_read(inode);
2667 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2671 /* Is the page fully inside i_size? */
2672 if (page->index < end_index)
2675 /* Is the page fully outside i_size? (truncate in progress) */
2676 offset = i_size & (PAGE_CACHE_SIZE-1);
2677 if (page->index >= end_index+1 || !offset) {
2679 * The page may have dirty, unmapped buffers. For example,
2680 * they may have been added in ext3_writepage(). Make them
2681 * freeable here, so the page does not leak.
2684 /* Not really sure about this - do we need this ? */
2685 if (page->mapping->a_ops->invalidatepage)
2686 page->mapping->a_ops->invalidatepage(page, offset);
2689 return 0; /* don't care */
2693 * The page straddles i_size. It must be zeroed out on each and every
2694 * writepage invocation because it may be mmapped. "A file is mapped
2695 * in multiples of the page size. For a file that is not a multiple of
2696 * the page size, the remaining memory is zeroed when mapped, and
2697 * writes to that region are not written out to the file."
2699 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2701 ret = mpage_writepage(page, get_block, wbc);
2703 ret = __block_write_full_page(inode, page, get_block, wbc,
2704 end_buffer_async_write);
2707 EXPORT_SYMBOL(nobh_writepage);
2709 int nobh_truncate_page(struct address_space *mapping,
2710 loff_t from, get_block_t *get_block)
2712 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2713 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2716 unsigned length, pos;
2717 struct inode *inode = mapping->host;
2719 struct buffer_head map_bh;
2722 blocksize = 1 << inode->i_blkbits;
2723 length = offset & (blocksize - 1);
2725 /* Block boundary? Nothing to do */
2729 length = blocksize - length;
2730 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2732 page = grab_cache_page(mapping, index);
2737 if (page_has_buffers(page)) {
2740 page_cache_release(page);
2741 return block_truncate_page(mapping, from, get_block);
2744 /* Find the buffer that contains "offset" */
2746 while (offset >= pos) {
2751 map_bh.b_size = blocksize;
2753 err = get_block(inode, iblock, &map_bh, 0);
2756 /* unmapped? It's a hole - nothing to do */
2757 if (!buffer_mapped(&map_bh))
2760 /* Ok, it's mapped. Make sure it's up-to-date */
2761 if (!PageUptodate(page)) {
2762 err = mapping->a_ops->readpage(NULL, page);
2764 page_cache_release(page);
2768 if (!PageUptodate(page)) {
2772 if (page_has_buffers(page))
2775 zero_user(page, offset, length);
2776 set_page_dirty(page);
2781 page_cache_release(page);
2785 EXPORT_SYMBOL(nobh_truncate_page);
2787 int block_truncate_page(struct address_space *mapping,
2788 loff_t from, get_block_t *get_block)
2790 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2791 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2794 unsigned length, pos;
2795 struct inode *inode = mapping->host;
2797 struct buffer_head *bh;
2800 blocksize = 1 << inode->i_blkbits;
2801 length = offset & (blocksize - 1);
2803 /* Block boundary? Nothing to do */
2807 length = blocksize - length;
2808 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2810 page = grab_cache_page(mapping, index);
2815 if (!page_has_buffers(page))
2816 create_empty_buffers(page, blocksize, 0);
2818 /* Find the buffer that contains "offset" */
2819 bh = page_buffers(page);
2821 while (offset >= pos) {
2822 bh = bh->b_this_page;
2828 if (!buffer_mapped(bh)) {
2829 WARN_ON(bh->b_size != blocksize);
2830 err = get_block(inode, iblock, bh, 0);
2833 /* unmapped? It's a hole - nothing to do */
2834 if (!buffer_mapped(bh))
2838 /* Ok, it's mapped. Make sure it's up-to-date */
2839 if (PageUptodate(page))
2840 set_buffer_uptodate(bh);
2842 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2844 ll_rw_block(READ, 1, &bh);
2846 /* Uhhuh. Read error. Complain and punt. */
2847 if (!buffer_uptodate(bh))
2851 zero_user(page, offset, length);
2852 mark_buffer_dirty(bh);
2857 page_cache_release(page);
2861 EXPORT_SYMBOL(block_truncate_page);
2864 * The generic ->writepage function for buffer-backed address_spaces
2865 * this form passes in the end_io handler used to finish the IO.
2867 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2868 struct writeback_control *wbc, bh_end_io_t *handler)
2870 struct inode * const inode = page->mapping->host;
2871 loff_t i_size = i_size_read(inode);
2872 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2875 /* Is the page fully inside i_size? */
2876 if (page->index < end_index)
2877 return __block_write_full_page(inode, page, get_block, wbc,
2880 /* Is the page fully outside i_size? (truncate in progress) */
2881 offset = i_size & (PAGE_CACHE_SIZE-1);
2882 if (page->index >= end_index+1 || !offset) {
2884 * The page may have dirty, unmapped buffers. For example,
2885 * they may have been added in ext3_writepage(). Make them
2886 * freeable here, so the page does not leak.
2888 do_invalidatepage(page, 0);
2890 return 0; /* don't care */
2894 * The page straddles i_size. It must be zeroed out on each and every
2895 * writepage invocation because it may be mmapped. "A file is mapped
2896 * in multiples of the page size. For a file that is not a multiple of
2897 * the page size, the remaining memory is zeroed when mapped, and
2898 * writes to that region are not written out to the file."
2900 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2901 return __block_write_full_page(inode, page, get_block, wbc, handler);
2903 EXPORT_SYMBOL(block_write_full_page_endio);
2906 * The generic ->writepage function for buffer-backed address_spaces
2908 int block_write_full_page(struct page *page, get_block_t *get_block,
2909 struct writeback_control *wbc)
2911 return block_write_full_page_endio(page, get_block, wbc,
2912 end_buffer_async_write);
2914 EXPORT_SYMBOL(block_write_full_page);
2916 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2917 get_block_t *get_block)
2919 struct buffer_head tmp;
2920 struct inode *inode = mapping->host;
2923 tmp.b_size = 1 << inode->i_blkbits;
2924 get_block(inode, block, &tmp, 0);
2925 return tmp.b_blocknr;
2927 EXPORT_SYMBOL(generic_block_bmap);
2929 static void end_bio_bh_io_sync(struct bio *bio, int err)
2931 struct buffer_head *bh = bio->bi_private;
2933 if (err == -EOPNOTSUPP) {
2934 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2935 set_bit(BH_Eopnotsupp, &bh->b_state);
2938 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2939 set_bit(BH_Quiet, &bh->b_state);
2941 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2945 int submit_bh(int rw, struct buffer_head * bh)
2950 BUG_ON(!buffer_locked(bh));
2951 BUG_ON(!buffer_mapped(bh));
2952 BUG_ON(!bh->b_end_io);
2953 BUG_ON(buffer_delay(bh));
2954 BUG_ON(buffer_unwritten(bh));
2957 * Mask in barrier bit for a write (could be either a WRITE or a
2960 if (buffer_ordered(bh) && (rw & WRITE))
2961 rw |= WRITE_BARRIER;
2964 * Only clear out a write error when rewriting
2966 if (test_set_buffer_req(bh) && (rw & WRITE))
2967 clear_buffer_write_io_error(bh);
2970 * from here on down, it's all bio -- do the initial mapping,
2971 * submit_bio -> generic_make_request may further map this bio around
2973 bio = bio_alloc(GFP_NOIO, 1);
2975 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2976 bio->bi_bdev = bh->b_bdev;
2977 bio->bi_io_vec[0].bv_page = bh->b_page;
2978 bio->bi_io_vec[0].bv_len = bh->b_size;
2979 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2983 bio->bi_size = bh->b_size;
2985 bio->bi_end_io = end_bio_bh_io_sync;
2986 bio->bi_private = bh;
2989 submit_bio(rw, bio);
2991 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2997 EXPORT_SYMBOL(submit_bh);
3000 * ll_rw_block: low-level access to block devices (DEPRECATED)
3001 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
3002 * @nr: number of &struct buffer_heads in the array
3003 * @bhs: array of pointers to &struct buffer_head
3005 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3006 * requests an I/O operation on them, either a %READ or a %WRITE. The third
3007 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
3008 * are sent to disk. The fourth %READA option is described in the documentation
3009 * for generic_make_request() which ll_rw_block() calls.
3011 * This function drops any buffer that it cannot get a lock on (with the
3012 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
3013 * clean when doing a write request, and any buffer that appears to be
3014 * up-to-date when doing read request. Further it marks as clean buffers that
3015 * are processed for writing (the buffer cache won't assume that they are
3016 * actually clean until the buffer gets unlocked).
3018 * ll_rw_block sets b_end_io to simple completion handler that marks
3019 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3022 * All of the buffers must be for the same device, and must also be a
3023 * multiple of the current approved size for the device.
3025 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3029 for (i = 0; i < nr; i++) {
3030 struct buffer_head *bh = bhs[i];
3032 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
3034 else if (!trylock_buffer(bh))
3037 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
3038 rw == SWRITE_SYNC_PLUG) {
3039 if (test_clear_buffer_dirty(bh)) {
3040 bh->b_end_io = end_buffer_write_sync;
3042 if (rw == SWRITE_SYNC)
3043 submit_bh(WRITE_SYNC, bh);
3045 submit_bh(WRITE, bh);
3049 if (!buffer_uptodate(bh)) {
3050 bh->b_end_io = end_buffer_read_sync;
3059 EXPORT_SYMBOL(ll_rw_block);
3062 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3063 * and then start new I/O and then wait upon it. The caller must have a ref on
3066 int sync_dirty_buffer(struct buffer_head *bh)
3070 WARN_ON(atomic_read(&bh->b_count) < 1);
3072 if (test_clear_buffer_dirty(bh)) {
3074 bh->b_end_io = end_buffer_write_sync;
3075 ret = submit_bh(WRITE_SYNC, bh);
3077 if (buffer_eopnotsupp(bh)) {
3078 clear_buffer_eopnotsupp(bh);
3081 if (!ret && !buffer_uptodate(bh))
3088 EXPORT_SYMBOL(sync_dirty_buffer);
3091 * try_to_free_buffers() checks if all the buffers on this particular page
3092 * are unused, and releases them if so.
3094 * Exclusion against try_to_free_buffers may be obtained by either
3095 * locking the page or by holding its mapping's private_lock.
3097 * If the page is dirty but all the buffers are clean then we need to
3098 * be sure to mark the page clean as well. This is because the page
3099 * may be against a block device, and a later reattachment of buffers
3100 * to a dirty page will set *all* buffers dirty. Which would corrupt
3101 * filesystem data on the same device.
3103 * The same applies to regular filesystem pages: if all the buffers are
3104 * clean then we set the page clean and proceed. To do that, we require
3105 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3108 * try_to_free_buffers() is non-blocking.
3110 static inline int buffer_busy(struct buffer_head *bh)
3112 return atomic_read(&bh->b_count) |
3113 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3117 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3119 struct buffer_head *head = page_buffers(page);
3120 struct buffer_head *bh;
3124 if (buffer_write_io_error(bh) && page->mapping)
3125 set_bit(AS_EIO, &page->mapping->flags);
3126 if (buffer_busy(bh))
3128 bh = bh->b_this_page;
3129 } while (bh != head);
3132 struct buffer_head *next = bh->b_this_page;
3134 if (bh->b_assoc_map)
3135 __remove_assoc_queue(bh);
3137 } while (bh != head);
3138 *buffers_to_free = head;
3139 __clear_page_buffers(page);
3145 int try_to_free_buffers(struct page *page)
3147 struct address_space * const mapping = page->mapping;
3148 struct buffer_head *buffers_to_free = NULL;
3151 BUG_ON(!PageLocked(page));
3152 if (PageWriteback(page))
3155 if (mapping == NULL) { /* can this still happen? */
3156 ret = drop_buffers(page, &buffers_to_free);
3160 spin_lock(&mapping->private_lock);
3161 ret = drop_buffers(page, &buffers_to_free);
3164 * If the filesystem writes its buffers by hand (eg ext3)
3165 * then we can have clean buffers against a dirty page. We
3166 * clean the page here; otherwise the VM will never notice
3167 * that the filesystem did any IO at all.
3169 * Also, during truncate, discard_buffer will have marked all
3170 * the page's buffers clean. We discover that here and clean
3173 * private_lock must be held over this entire operation in order
3174 * to synchronise against __set_page_dirty_buffers and prevent the
3175 * dirty bit from being lost.
3178 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3179 spin_unlock(&mapping->private_lock);
3181 if (buffers_to_free) {
3182 struct buffer_head *bh = buffers_to_free;
3185 struct buffer_head *next = bh->b_this_page;
3186 free_buffer_head(bh);
3188 } while (bh != buffers_to_free);
3192 EXPORT_SYMBOL(try_to_free_buffers);
3194 void block_sync_page(struct page *page)
3196 struct address_space *mapping;
3199 mapping = page_mapping(page);
3201 blk_run_backing_dev(mapping->backing_dev_info, page);
3203 EXPORT_SYMBOL(block_sync_page);
3206 * There are no bdflush tunables left. But distributions are
3207 * still running obsolete flush daemons, so we terminate them here.
3209 * Use of bdflush() is deprecated and will be removed in a future kernel.
3210 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3212 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3214 static int msg_count;
3216 if (!capable(CAP_SYS_ADMIN))
3219 if (msg_count < 5) {
3222 "warning: process `%s' used the obsolete bdflush"
3223 " system call\n", current->comm);
3224 printk(KERN_INFO "Fix your initscripts?\n");
3233 * Buffer-head allocation
3235 static struct kmem_cache *bh_cachep;
3238 * Once the number of bh's in the machine exceeds this level, we start
3239 * stripping them in writeback.
3241 static int max_buffer_heads;
3243 int buffer_heads_over_limit;
3245 struct bh_accounting {
3246 int nr; /* Number of live bh's */
3247 int ratelimit; /* Limit cacheline bouncing */
3250 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3252 static void recalc_bh_state(void)
3257 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3259 __get_cpu_var(bh_accounting).ratelimit = 0;
3260 for_each_online_cpu(i)
3261 tot += per_cpu(bh_accounting, i).nr;
3262 buffer_heads_over_limit = (tot > max_buffer_heads);
3265 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3267 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3269 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3270 get_cpu_var(bh_accounting).nr++;
3272 put_cpu_var(bh_accounting);
3276 EXPORT_SYMBOL(alloc_buffer_head);
3278 void free_buffer_head(struct buffer_head *bh)
3280 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3281 kmem_cache_free(bh_cachep, bh);
3282 get_cpu_var(bh_accounting).nr--;
3284 put_cpu_var(bh_accounting);
3286 EXPORT_SYMBOL(free_buffer_head);
3288 static void buffer_exit_cpu(int cpu)
3291 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3293 for (i = 0; i < BH_LRU_SIZE; i++) {
3297 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3298 per_cpu(bh_accounting, cpu).nr = 0;
3299 put_cpu_var(bh_accounting);
3302 static int buffer_cpu_notify(struct notifier_block *self,
3303 unsigned long action, void *hcpu)
3305 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3306 buffer_exit_cpu((unsigned long)hcpu);
3311 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3312 * @bh: struct buffer_head
3314 * Return true if the buffer is up-to-date and false,
3315 * with the buffer locked, if not.
3317 int bh_uptodate_or_lock(struct buffer_head *bh)
3319 if (!buffer_uptodate(bh)) {
3321 if (!buffer_uptodate(bh))
3327 EXPORT_SYMBOL(bh_uptodate_or_lock);
3330 * bh_submit_read - Submit a locked buffer for reading
3331 * @bh: struct buffer_head
3333 * Returns zero on success and -EIO on error.
3335 int bh_submit_read(struct buffer_head *bh)
3337 BUG_ON(!buffer_locked(bh));
3339 if (buffer_uptodate(bh)) {
3345 bh->b_end_io = end_buffer_read_sync;
3346 submit_bh(READ, bh);
3348 if (buffer_uptodate(bh))
3352 EXPORT_SYMBOL(bh_submit_read);
3354 void __init buffer_init(void)
3358 bh_cachep = kmem_cache_create("buffer_head",
3359 sizeof(struct buffer_head), 0,
3360 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3365 * Limit the bh occupancy to 10% of ZONE_NORMAL
3367 nrpages = (nr_free_buffer_pages() * 10) / 100;
3368 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3369 hotcpu_notifier(buffer_cpu_notify, 0);