[PATCH] __block_write_full_page speedup
[safe/jmp/linux-2.6] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/syscalls.h>
24 #include <linux/fs.h>
25 #include <linux/mm.h>
26 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp_lock.h>
29 #include <linux/blkdev.h>
30 #include <linux/file.h>
31 #include <linux/quotaops.h>
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43
44 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
45 static void invalidate_bh_lrus(void);
46
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52         bh->b_end_io = handler;
53         bh->b_private = private;
54 }
55
56 static int sync_buffer(void *word)
57 {
58         struct block_device *bd;
59         struct buffer_head *bh
60                 = container_of(word, struct buffer_head, b_state);
61
62         smp_mb();
63         bd = bh->b_bdev;
64         if (bd)
65                 blk_run_address_space(bd->bd_inode->i_mapping);
66         io_schedule();
67         return 0;
68 }
69
70 void fastcall __lock_buffer(struct buffer_head *bh)
71 {
72         wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73                                                         TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76
77 void fastcall unlock_buffer(struct buffer_head *bh)
78 {
79         clear_buffer_locked(bh);
80         smp_mb__after_clear_bit();
81         wake_up_bit(&bh->b_state, BH_Lock);
82 }
83
84 /*
85  * Block until a buffer comes unlocked.  This doesn't stop it
86  * from becoming locked again - you have to lock it yourself
87  * if you want to preserve its state.
88  */
89 void __wait_on_buffer(struct buffer_head * bh)
90 {
91         wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92 }
93
94 static void
95 __clear_page_buffers(struct page *page)
96 {
97         ClearPagePrivate(page);
98         page->private = 0;
99         page_cache_release(page);
100 }
101
102 static void buffer_io_error(struct buffer_head *bh)
103 {
104         char b[BDEVNAME_SIZE];
105
106         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
107                         bdevname(bh->b_bdev, b),
108                         (unsigned long long)bh->b_blocknr);
109 }
110
111 /*
112  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
113  * unlock the buffer. This is what ll_rw_block uses too.
114  */
115 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
116 {
117         if (uptodate) {
118                 set_buffer_uptodate(bh);
119         } else {
120                 /* This happens, due to failed READA attempts. */
121                 clear_buffer_uptodate(bh);
122         }
123         unlock_buffer(bh);
124         put_bh(bh);
125 }
126
127 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
128 {
129         char b[BDEVNAME_SIZE];
130
131         if (uptodate) {
132                 set_buffer_uptodate(bh);
133         } else {
134                 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
135                         buffer_io_error(bh);
136                         printk(KERN_WARNING "lost page write due to "
137                                         "I/O error on %s\n",
138                                        bdevname(bh->b_bdev, b));
139                 }
140                 set_buffer_write_io_error(bh);
141                 clear_buffer_uptodate(bh);
142         }
143         unlock_buffer(bh);
144         put_bh(bh);
145 }
146
147 /*
148  * Write out and wait upon all the dirty data associated with a block
149  * device via its mapping.  Does not take the superblock lock.
150  */
151 int sync_blockdev(struct block_device *bdev)
152 {
153         int ret = 0;
154
155         if (bdev) {
156                 int err;
157
158                 ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
159                 err = filemap_fdatawait(bdev->bd_inode->i_mapping);
160                 if (!ret)
161                         ret = err;
162         }
163         return ret;
164 }
165 EXPORT_SYMBOL(sync_blockdev);
166
167 /*
168  * Write out and wait upon all dirty data associated with this
169  * superblock.  Filesystem data as well as the underlying block
170  * device.  Takes the superblock lock.
171  */
172 int fsync_super(struct super_block *sb)
173 {
174         sync_inodes_sb(sb, 0);
175         DQUOT_SYNC(sb);
176         lock_super(sb);
177         if (sb->s_dirt && sb->s_op->write_super)
178                 sb->s_op->write_super(sb);
179         unlock_super(sb);
180         if (sb->s_op->sync_fs)
181                 sb->s_op->sync_fs(sb, 1);
182         sync_blockdev(sb->s_bdev);
183         sync_inodes_sb(sb, 1);
184
185         return sync_blockdev(sb->s_bdev);
186 }
187
188 /*
189  * Write out and wait upon all dirty data associated with this
190  * device.   Filesystem data as well as the underlying block
191  * device.  Takes the superblock lock.
192  */
193 int fsync_bdev(struct block_device *bdev)
194 {
195         struct super_block *sb = get_super(bdev);
196         if (sb) {
197                 int res = fsync_super(sb);
198                 drop_super(sb);
199                 return res;
200         }
201         return sync_blockdev(bdev);
202 }
203
204 /**
205  * freeze_bdev  --  lock a filesystem and force it into a consistent state
206  * @bdev:       blockdevice to lock
207  *
208  * This takes the block device bd_mount_sem to make sure no new mounts
209  * happen on bdev until thaw_bdev() is called.
210  * If a superblock is found on this device, we take the s_umount semaphore
211  * on it to make sure nobody unmounts until the snapshot creation is done.
212  */
213 struct super_block *freeze_bdev(struct block_device *bdev)
214 {
215         struct super_block *sb;
216
217         down(&bdev->bd_mount_sem);
218         sb = get_super(bdev);
219         if (sb && !(sb->s_flags & MS_RDONLY)) {
220                 sb->s_frozen = SB_FREEZE_WRITE;
221                 smp_wmb();
222
223                 sync_inodes_sb(sb, 0);
224                 DQUOT_SYNC(sb);
225
226                 lock_super(sb);
227                 if (sb->s_dirt && sb->s_op->write_super)
228                         sb->s_op->write_super(sb);
229                 unlock_super(sb);
230
231                 if (sb->s_op->sync_fs)
232                         sb->s_op->sync_fs(sb, 1);
233
234                 sync_blockdev(sb->s_bdev);
235                 sync_inodes_sb(sb, 1);
236
237                 sb->s_frozen = SB_FREEZE_TRANS;
238                 smp_wmb();
239
240                 sync_blockdev(sb->s_bdev);
241
242                 if (sb->s_op->write_super_lockfs)
243                         sb->s_op->write_super_lockfs(sb);
244         }
245
246         sync_blockdev(bdev);
247         return sb;      /* thaw_bdev releases s->s_umount and bd_mount_sem */
248 }
249 EXPORT_SYMBOL(freeze_bdev);
250
251 /**
252  * thaw_bdev  -- unlock filesystem
253  * @bdev:       blockdevice to unlock
254  * @sb:         associated superblock
255  *
256  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
257  */
258 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
259 {
260         if (sb) {
261                 BUG_ON(sb->s_bdev != bdev);
262
263                 if (sb->s_op->unlockfs)
264                         sb->s_op->unlockfs(sb);
265                 sb->s_frozen = SB_UNFROZEN;
266                 smp_wmb();
267                 wake_up(&sb->s_wait_unfrozen);
268                 drop_super(sb);
269         }
270
271         up(&bdev->bd_mount_sem);
272 }
273 EXPORT_SYMBOL(thaw_bdev);
274
275 /*
276  * sync everything.  Start out by waking pdflush, because that writes back
277  * all queues in parallel.
278  */
279 static void do_sync(unsigned long wait)
280 {
281         wakeup_bdflush(0);
282         sync_inodes(0);         /* All mappings, inodes and their blockdevs */
283         DQUOT_SYNC(NULL);
284         sync_supers();          /* Write the superblocks */
285         sync_filesystems(0);    /* Start syncing the filesystems */
286         sync_filesystems(wait); /* Waitingly sync the filesystems */
287         sync_inodes(wait);      /* Mappings, inodes and blockdevs, again. */
288         if (!wait)
289                 printk("Emergency Sync complete\n");
290         if (unlikely(laptop_mode))
291                 laptop_sync_completion();
292 }
293
294 asmlinkage long sys_sync(void)
295 {
296         do_sync(1);
297         return 0;
298 }
299
300 void emergency_sync(void)
301 {
302         pdflush_operation(do_sync, 0);
303 }
304
305 /*
306  * Generic function to fsync a file.
307  *
308  * filp may be NULL if called via the msync of a vma.
309  */
310  
311 int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
312 {
313         struct inode * inode = dentry->d_inode;
314         struct super_block * sb;
315         int ret, err;
316
317         /* sync the inode to buffers */
318         ret = write_inode_now(inode, 0);
319
320         /* sync the superblock to buffers */
321         sb = inode->i_sb;
322         lock_super(sb);
323         if (sb->s_op->write_super)
324                 sb->s_op->write_super(sb);
325         unlock_super(sb);
326
327         /* .. finally sync the buffers to disk */
328         err = sync_blockdev(sb->s_bdev);
329         if (!ret)
330                 ret = err;
331         return ret;
332 }
333
334 asmlinkage long sys_fsync(unsigned int fd)
335 {
336         struct file * file;
337         struct address_space *mapping;
338         int ret, err;
339
340         ret = -EBADF;
341         file = fget(fd);
342         if (!file)
343                 goto out;
344
345         mapping = file->f_mapping;
346
347         ret = -EINVAL;
348         if (!file->f_op || !file->f_op->fsync) {
349                 /* Why?  We can still call filemap_fdatawrite */
350                 goto out_putf;
351         }
352
353         current->flags |= PF_SYNCWRITE;
354         ret = filemap_fdatawrite(mapping);
355
356         /*
357          * We need to protect against concurrent writers,
358          * which could cause livelocks in fsync_buffers_list
359          */
360         down(&mapping->host->i_sem);
361         err = file->f_op->fsync(file, file->f_dentry, 0);
362         if (!ret)
363                 ret = err;
364         up(&mapping->host->i_sem);
365         err = filemap_fdatawait(mapping);
366         if (!ret)
367                 ret = err;
368         current->flags &= ~PF_SYNCWRITE;
369
370 out_putf:
371         fput(file);
372 out:
373         return ret;
374 }
375
376 asmlinkage long sys_fdatasync(unsigned int fd)
377 {
378         struct file * file;
379         struct address_space *mapping;
380         int ret, err;
381
382         ret = -EBADF;
383         file = fget(fd);
384         if (!file)
385                 goto out;
386
387         ret = -EINVAL;
388         if (!file->f_op || !file->f_op->fsync)
389                 goto out_putf;
390
391         mapping = file->f_mapping;
392
393         current->flags |= PF_SYNCWRITE;
394         ret = filemap_fdatawrite(mapping);
395         down(&mapping->host->i_sem);
396         err = file->f_op->fsync(file, file->f_dentry, 1);
397         if (!ret)
398                 ret = err;
399         up(&mapping->host->i_sem);
400         err = filemap_fdatawait(mapping);
401         if (!ret)
402                 ret = err;
403         current->flags &= ~PF_SYNCWRITE;
404
405 out_putf:
406         fput(file);
407 out:
408         return ret;
409 }
410
411 /*
412  * Various filesystems appear to want __find_get_block to be non-blocking.
413  * But it's the page lock which protects the buffers.  To get around this,
414  * we get exclusion from try_to_free_buffers with the blockdev mapping's
415  * private_lock.
416  *
417  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
418  * may be quite high.  This code could TryLock the page, and if that
419  * succeeds, there is no need to take private_lock. (But if
420  * private_lock is contended then so is mapping->tree_lock).
421  */
422 static struct buffer_head *
423 __find_get_block_slow(struct block_device *bdev, sector_t block, int unused)
424 {
425         struct inode *bd_inode = bdev->bd_inode;
426         struct address_space *bd_mapping = bd_inode->i_mapping;
427         struct buffer_head *ret = NULL;
428         pgoff_t index;
429         struct buffer_head *bh;
430         struct buffer_head *head;
431         struct page *page;
432         int all_mapped = 1;
433
434         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
435         page = find_get_page(bd_mapping, index);
436         if (!page)
437                 goto out;
438
439         spin_lock(&bd_mapping->private_lock);
440         if (!page_has_buffers(page))
441                 goto out_unlock;
442         head = page_buffers(page);
443         bh = head;
444         do {
445                 if (bh->b_blocknr == block) {
446                         ret = bh;
447                         get_bh(bh);
448                         goto out_unlock;
449                 }
450                 if (!buffer_mapped(bh))
451                         all_mapped = 0;
452                 bh = bh->b_this_page;
453         } while (bh != head);
454
455         /* we might be here because some of the buffers on this page are
456          * not mapped.  This is due to various races between
457          * file io on the block device and getblk.  It gets dealt with
458          * elsewhere, don't buffer_error if we had some unmapped buffers
459          */
460         if (all_mapped) {
461                 printk("__find_get_block_slow() failed. "
462                         "block=%llu, b_blocknr=%llu\n",
463                         (unsigned long long)block, (unsigned long long)bh->b_blocknr);
464                 printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
465                 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
466         }
467 out_unlock:
468         spin_unlock(&bd_mapping->private_lock);
469         page_cache_release(page);
470 out:
471         return ret;
472 }
473
474 /* If invalidate_buffers() will trash dirty buffers, it means some kind
475    of fs corruption is going on. Trashing dirty data always imply losing
476    information that was supposed to be just stored on the physical layer
477    by the user.
478
479    Thus invalidate_buffers in general usage is not allwowed to trash
480    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
481    be preserved.  These buffers are simply skipped.
482   
483    We also skip buffers which are still in use.  For example this can
484    happen if a userspace program is reading the block device.
485
486    NOTE: In the case where the user removed a removable-media-disk even if
487    there's still dirty data not synced on disk (due a bug in the device driver
488    or due an error of the user), by not destroying the dirty buffers we could
489    generate corruption also on the next media inserted, thus a parameter is
490    necessary to handle this case in the most safe way possible (trying
491    to not corrupt also the new disk inserted with the data belonging to
492    the old now corrupted disk). Also for the ramdisk the natural thing
493    to do in order to release the ramdisk memory is to destroy dirty buffers.
494
495    These are two special cases. Normal usage imply the device driver
496    to issue a sync on the device (without waiting I/O completion) and
497    then an invalidate_buffers call that doesn't trash dirty buffers.
498
499    For handling cache coherency with the blkdev pagecache the 'update' case
500    is been introduced. It is needed to re-read from disk any pinned
501    buffer. NOTE: re-reading from disk is destructive so we can do it only
502    when we assume nobody is changing the buffercache under our I/O and when
503    we think the disk contains more recent information than the buffercache.
504    The update == 1 pass marks the buffers we need to update, the update == 2
505    pass does the actual I/O. */
506 void invalidate_bdev(struct block_device *bdev, int destroy_dirty_buffers)
507 {
508         invalidate_bh_lrus();
509         /*
510          * FIXME: what about destroy_dirty_buffers?
511          * We really want to use invalidate_inode_pages2() for
512          * that, but not until that's cleaned up.
513          */
514         invalidate_inode_pages(bdev->bd_inode->i_mapping);
515 }
516
517 /*
518  * Kick pdflush then try to free up some ZONE_NORMAL memory.
519  */
520 static void free_more_memory(void)
521 {
522         struct zone **zones;
523         pg_data_t *pgdat;
524
525         wakeup_bdflush(1024);
526         yield();
527
528         for_each_pgdat(pgdat) {
529                 zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
530                 if (*zones)
531                         try_to_free_pages(zones, GFP_NOFS, 0);
532         }
533 }
534
535 /*
536  * I/O completion handler for block_read_full_page() - pages
537  * which come unlocked at the end of I/O.
538  */
539 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
540 {
541         static DEFINE_SPINLOCK(page_uptodate_lock);
542         unsigned long flags;
543         struct buffer_head *tmp;
544         struct page *page;
545         int page_uptodate = 1;
546
547         BUG_ON(!buffer_async_read(bh));
548
549         page = bh->b_page;
550         if (uptodate) {
551                 set_buffer_uptodate(bh);
552         } else {
553                 clear_buffer_uptodate(bh);
554                 if (printk_ratelimit())
555                         buffer_io_error(bh);
556                 SetPageError(page);
557         }
558
559         /*
560          * Be _very_ careful from here on. Bad things can happen if
561          * two buffer heads end IO at almost the same time and both
562          * decide that the page is now completely done.
563          */
564         spin_lock_irqsave(&page_uptodate_lock, flags);
565         clear_buffer_async_read(bh);
566         unlock_buffer(bh);
567         tmp = bh;
568         do {
569                 if (!buffer_uptodate(tmp))
570                         page_uptodate = 0;
571                 if (buffer_async_read(tmp)) {
572                         BUG_ON(!buffer_locked(tmp));
573                         goto still_busy;
574                 }
575                 tmp = tmp->b_this_page;
576         } while (tmp != bh);
577         spin_unlock_irqrestore(&page_uptodate_lock, flags);
578
579         /*
580          * If none of the buffers had errors and they are all
581          * uptodate then we can set the page uptodate.
582          */
583         if (page_uptodate && !PageError(page))
584                 SetPageUptodate(page);
585         unlock_page(page);
586         return;
587
588 still_busy:
589         spin_unlock_irqrestore(&page_uptodate_lock, flags);
590         return;
591 }
592
593 /*
594  * Completion handler for block_write_full_page() - pages which are unlocked
595  * during I/O, and which have PageWriteback cleared upon I/O completion.
596  */
597 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
598 {
599         char b[BDEVNAME_SIZE];
600         static DEFINE_SPINLOCK(page_uptodate_lock);
601         unsigned long flags;
602         struct buffer_head *tmp;
603         struct page *page;
604
605         BUG_ON(!buffer_async_write(bh));
606
607         page = bh->b_page;
608         if (uptodate) {
609                 set_buffer_uptodate(bh);
610         } else {
611                 if (printk_ratelimit()) {
612                         buffer_io_error(bh);
613                         printk(KERN_WARNING "lost page write due to "
614                                         "I/O error on %s\n",
615                                bdevname(bh->b_bdev, b));
616                 }
617                 set_bit(AS_EIO, &page->mapping->flags);
618                 clear_buffer_uptodate(bh);
619                 SetPageError(page);
620         }
621
622         spin_lock_irqsave(&page_uptodate_lock, flags);
623         clear_buffer_async_write(bh);
624         unlock_buffer(bh);
625         tmp = bh->b_this_page;
626         while (tmp != bh) {
627                 if (buffer_async_write(tmp)) {
628                         BUG_ON(!buffer_locked(tmp));
629                         goto still_busy;
630                 }
631                 tmp = tmp->b_this_page;
632         }
633         spin_unlock_irqrestore(&page_uptodate_lock, flags);
634         end_page_writeback(page);
635         return;
636
637 still_busy:
638         spin_unlock_irqrestore(&page_uptodate_lock, flags);
639         return;
640 }
641
642 /*
643  * If a page's buffers are under async readin (end_buffer_async_read
644  * completion) then there is a possibility that another thread of
645  * control could lock one of the buffers after it has completed
646  * but while some of the other buffers have not completed.  This
647  * locked buffer would confuse end_buffer_async_read() into not unlocking
648  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
649  * that this buffer is not under async I/O.
650  *
651  * The page comes unlocked when it has no locked buffer_async buffers
652  * left.
653  *
654  * PageLocked prevents anyone starting new async I/O reads any of
655  * the buffers.
656  *
657  * PageWriteback is used to prevent simultaneous writeout of the same
658  * page.
659  *
660  * PageLocked prevents anyone from starting writeback of a page which is
661  * under read I/O (PageWriteback is only ever set against a locked page).
662  */
663 static void mark_buffer_async_read(struct buffer_head *bh)
664 {
665         bh->b_end_io = end_buffer_async_read;
666         set_buffer_async_read(bh);
667 }
668
669 void mark_buffer_async_write(struct buffer_head *bh)
670 {
671         bh->b_end_io = end_buffer_async_write;
672         set_buffer_async_write(bh);
673 }
674 EXPORT_SYMBOL(mark_buffer_async_write);
675
676
677 /*
678  * fs/buffer.c contains helper functions for buffer-backed address space's
679  * fsync functions.  A common requirement for buffer-based filesystems is
680  * that certain data from the backing blockdev needs to be written out for
681  * a successful fsync().  For example, ext2 indirect blocks need to be
682  * written back and waited upon before fsync() returns.
683  *
684  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
685  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
686  * management of a list of dependent buffers at ->i_mapping->private_list.
687  *
688  * Locking is a little subtle: try_to_free_buffers() will remove buffers
689  * from their controlling inode's queue when they are being freed.  But
690  * try_to_free_buffers() will be operating against the *blockdev* mapping
691  * at the time, not against the S_ISREG file which depends on those buffers.
692  * So the locking for private_list is via the private_lock in the address_space
693  * which backs the buffers.  Which is different from the address_space 
694  * against which the buffers are listed.  So for a particular address_space,
695  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
696  * mapping->private_list will always be protected by the backing blockdev's
697  * ->private_lock.
698  *
699  * Which introduces a requirement: all buffers on an address_space's
700  * ->private_list must be from the same address_space: the blockdev's.
701  *
702  * address_spaces which do not place buffers at ->private_list via these
703  * utility functions are free to use private_lock and private_list for
704  * whatever they want.  The only requirement is that list_empty(private_list)
705  * be true at clear_inode() time.
706  *
707  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
708  * filesystems should do that.  invalidate_inode_buffers() should just go
709  * BUG_ON(!list_empty).
710  *
711  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
712  * take an address_space, not an inode.  And it should be called
713  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
714  * queued up.
715  *
716  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
717  * list if it is already on a list.  Because if the buffer is on a list,
718  * it *must* already be on the right one.  If not, the filesystem is being
719  * silly.  This will save a ton of locking.  But first we have to ensure
720  * that buffers are taken *off* the old inode's list when they are freed
721  * (presumably in truncate).  That requires careful auditing of all
722  * filesystems (do it inside bforget()).  It could also be done by bringing
723  * b_inode back.
724  */
725
726 /*
727  * The buffer's backing address_space's private_lock must be held
728  */
729 static inline void __remove_assoc_queue(struct buffer_head *bh)
730 {
731         list_del_init(&bh->b_assoc_buffers);
732 }
733
734 int inode_has_buffers(struct inode *inode)
735 {
736         return !list_empty(&inode->i_data.private_list);
737 }
738
739 /*
740  * osync is designed to support O_SYNC io.  It waits synchronously for
741  * all already-submitted IO to complete, but does not queue any new
742  * writes to the disk.
743  *
744  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
745  * you dirty the buffers, and then use osync_inode_buffers to wait for
746  * completion.  Any other dirty buffers which are not yet queued for
747  * write will not be flushed to disk by the osync.
748  */
749 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
750 {
751         struct buffer_head *bh;
752         struct list_head *p;
753         int err = 0;
754
755         spin_lock(lock);
756 repeat:
757         list_for_each_prev(p, list) {
758                 bh = BH_ENTRY(p);
759                 if (buffer_locked(bh)) {
760                         get_bh(bh);
761                         spin_unlock(lock);
762                         wait_on_buffer(bh);
763                         if (!buffer_uptodate(bh))
764                                 err = -EIO;
765                         brelse(bh);
766                         spin_lock(lock);
767                         goto repeat;
768                 }
769         }
770         spin_unlock(lock);
771         return err;
772 }
773
774 /**
775  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
776  *                        buffers
777  * @mapping: the mapping which wants those buffers written
778  *
779  * Starts I/O against the buffers at mapping->private_list, and waits upon
780  * that I/O.
781  *
782  * Basically, this is a convenience function for fsync().
783  * @mapping is a file or directory which needs those buffers to be written for
784  * a successful fsync().
785  */
786 int sync_mapping_buffers(struct address_space *mapping)
787 {
788         struct address_space *buffer_mapping = mapping->assoc_mapping;
789
790         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
791                 return 0;
792
793         return fsync_buffers_list(&buffer_mapping->private_lock,
794                                         &mapping->private_list);
795 }
796 EXPORT_SYMBOL(sync_mapping_buffers);
797
798 /*
799  * Called when we've recently written block `bblock', and it is known that
800  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
801  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
802  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
803  */
804 void write_boundary_block(struct block_device *bdev,
805                         sector_t bblock, unsigned blocksize)
806 {
807         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
808         if (bh) {
809                 if (buffer_dirty(bh))
810                         ll_rw_block(WRITE, 1, &bh);
811                 put_bh(bh);
812         }
813 }
814
815 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
816 {
817         struct address_space *mapping = inode->i_mapping;
818         struct address_space *buffer_mapping = bh->b_page->mapping;
819
820         mark_buffer_dirty(bh);
821         if (!mapping->assoc_mapping) {
822                 mapping->assoc_mapping = buffer_mapping;
823         } else {
824                 if (mapping->assoc_mapping != buffer_mapping)
825                         BUG();
826         }
827         if (list_empty(&bh->b_assoc_buffers)) {
828                 spin_lock(&buffer_mapping->private_lock);
829                 list_move_tail(&bh->b_assoc_buffers,
830                                 &mapping->private_list);
831                 spin_unlock(&buffer_mapping->private_lock);
832         }
833 }
834 EXPORT_SYMBOL(mark_buffer_dirty_inode);
835
836 /*
837  * Add a page to the dirty page list.
838  *
839  * It is a sad fact of life that this function is called from several places
840  * deeply under spinlocking.  It may not sleep.
841  *
842  * If the page has buffers, the uptodate buffers are set dirty, to preserve
843  * dirty-state coherency between the page and the buffers.  It the page does
844  * not have buffers then when they are later attached they will all be set
845  * dirty.
846  *
847  * The buffers are dirtied before the page is dirtied.  There's a small race
848  * window in which a writepage caller may see the page cleanness but not the
849  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
850  * before the buffers, a concurrent writepage caller could clear the page dirty
851  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
852  * page on the dirty page list.
853  *
854  * We use private_lock to lock against try_to_free_buffers while using the
855  * page's buffer list.  Also use this to protect against clean buffers being
856  * added to the page after it was set dirty.
857  *
858  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
859  * address_space though.
860  */
861 int __set_page_dirty_buffers(struct page *page)
862 {
863         struct address_space * const mapping = page->mapping;
864
865         spin_lock(&mapping->private_lock);
866         if (page_has_buffers(page)) {
867                 struct buffer_head *head = page_buffers(page);
868                 struct buffer_head *bh = head;
869
870                 do {
871                         set_buffer_dirty(bh);
872                         bh = bh->b_this_page;
873                 } while (bh != head);
874         }
875         spin_unlock(&mapping->private_lock);
876
877         if (!TestSetPageDirty(page)) {
878                 write_lock_irq(&mapping->tree_lock);
879                 if (page->mapping) {    /* Race with truncate? */
880                         if (mapping_cap_account_dirty(mapping))
881                                 inc_page_state(nr_dirty);
882                         radix_tree_tag_set(&mapping->page_tree,
883                                                 page_index(page),
884                                                 PAGECACHE_TAG_DIRTY);
885                 }
886                 write_unlock_irq(&mapping->tree_lock);
887                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
888         }
889         
890         return 0;
891 }
892 EXPORT_SYMBOL(__set_page_dirty_buffers);
893
894 /*
895  * Write out and wait upon a list of buffers.
896  *
897  * We have conflicting pressures: we want to make sure that all
898  * initially dirty buffers get waited on, but that any subsequently
899  * dirtied buffers don't.  After all, we don't want fsync to last
900  * forever if somebody is actively writing to the file.
901  *
902  * Do this in two main stages: first we copy dirty buffers to a
903  * temporary inode list, queueing the writes as we go.  Then we clean
904  * up, waiting for those writes to complete.
905  * 
906  * During this second stage, any subsequent updates to the file may end
907  * up refiling the buffer on the original inode's dirty list again, so
908  * there is a chance we will end up with a buffer queued for write but
909  * not yet completed on that list.  So, as a final cleanup we go through
910  * the osync code to catch these locked, dirty buffers without requeuing
911  * any newly dirty buffers for write.
912  */
913 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
914 {
915         struct buffer_head *bh;
916         struct list_head tmp;
917         int err = 0, err2;
918
919         INIT_LIST_HEAD(&tmp);
920
921         spin_lock(lock);
922         while (!list_empty(list)) {
923                 bh = BH_ENTRY(list->next);
924                 list_del_init(&bh->b_assoc_buffers);
925                 if (buffer_dirty(bh) || buffer_locked(bh)) {
926                         list_add(&bh->b_assoc_buffers, &tmp);
927                         if (buffer_dirty(bh)) {
928                                 get_bh(bh);
929                                 spin_unlock(lock);
930                                 /*
931                                  * Ensure any pending I/O completes so that
932                                  * ll_rw_block() actually writes the current
933                                  * contents - it is a noop if I/O is still in
934                                  * flight on potentially older contents.
935                                  */
936                                 wait_on_buffer(bh);
937                                 ll_rw_block(WRITE, 1, &bh);
938                                 brelse(bh);
939                                 spin_lock(lock);
940                         }
941                 }
942         }
943
944         while (!list_empty(&tmp)) {
945                 bh = BH_ENTRY(tmp.prev);
946                 __remove_assoc_queue(bh);
947                 get_bh(bh);
948                 spin_unlock(lock);
949                 wait_on_buffer(bh);
950                 if (!buffer_uptodate(bh))
951                         err = -EIO;
952                 brelse(bh);
953                 spin_lock(lock);
954         }
955         
956         spin_unlock(lock);
957         err2 = osync_buffers_list(lock, list);
958         if (err)
959                 return err;
960         else
961                 return err2;
962 }
963
964 /*
965  * Invalidate any and all dirty buffers on a given inode.  We are
966  * probably unmounting the fs, but that doesn't mean we have already
967  * done a sync().  Just drop the buffers from the inode list.
968  *
969  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
970  * assumes that all the buffers are against the blockdev.  Not true
971  * for reiserfs.
972  */
973 void invalidate_inode_buffers(struct inode *inode)
974 {
975         if (inode_has_buffers(inode)) {
976                 struct address_space *mapping = &inode->i_data;
977                 struct list_head *list = &mapping->private_list;
978                 struct address_space *buffer_mapping = mapping->assoc_mapping;
979
980                 spin_lock(&buffer_mapping->private_lock);
981                 while (!list_empty(list))
982                         __remove_assoc_queue(BH_ENTRY(list->next));
983                 spin_unlock(&buffer_mapping->private_lock);
984         }
985 }
986
987 /*
988  * Remove any clean buffers from the inode's buffer list.  This is called
989  * when we're trying to free the inode itself.  Those buffers can pin it.
990  *
991  * Returns true if all buffers were removed.
992  */
993 int remove_inode_buffers(struct inode *inode)
994 {
995         int ret = 1;
996
997         if (inode_has_buffers(inode)) {
998                 struct address_space *mapping = &inode->i_data;
999                 struct list_head *list = &mapping->private_list;
1000                 struct address_space *buffer_mapping = mapping->assoc_mapping;
1001
1002                 spin_lock(&buffer_mapping->private_lock);
1003                 while (!list_empty(list)) {
1004                         struct buffer_head *bh = BH_ENTRY(list->next);
1005                         if (buffer_dirty(bh)) {
1006                                 ret = 0;
1007                                 break;
1008                         }
1009                         __remove_assoc_queue(bh);
1010                 }
1011                 spin_unlock(&buffer_mapping->private_lock);
1012         }
1013         return ret;
1014 }
1015
1016 /*
1017  * Create the appropriate buffers when given a page for data area and
1018  * the size of each buffer.. Use the bh->b_this_page linked list to
1019  * follow the buffers created.  Return NULL if unable to create more
1020  * buffers.
1021  *
1022  * The retry flag is used to differentiate async IO (paging, swapping)
1023  * which may not fail from ordinary buffer allocations.
1024  */
1025 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
1026                 int retry)
1027 {
1028         struct buffer_head *bh, *head;
1029         long offset;
1030
1031 try_again:
1032         head = NULL;
1033         offset = PAGE_SIZE;
1034         while ((offset -= size) >= 0) {
1035                 bh = alloc_buffer_head(GFP_NOFS);
1036                 if (!bh)
1037                         goto no_grow;
1038
1039                 bh->b_bdev = NULL;
1040                 bh->b_this_page = head;
1041                 bh->b_blocknr = -1;
1042                 head = bh;
1043
1044                 bh->b_state = 0;
1045                 atomic_set(&bh->b_count, 0);
1046                 bh->b_size = size;
1047
1048                 /* Link the buffer to its page */
1049                 set_bh_page(bh, page, offset);
1050
1051                 bh->b_end_io = NULL;
1052         }
1053         return head;
1054 /*
1055  * In case anything failed, we just free everything we got.
1056  */
1057 no_grow:
1058         if (head) {
1059                 do {
1060                         bh = head;
1061                         head = head->b_this_page;
1062                         free_buffer_head(bh);
1063                 } while (head);
1064         }
1065
1066         /*
1067          * Return failure for non-async IO requests.  Async IO requests
1068          * are not allowed to fail, so we have to wait until buffer heads
1069          * become available.  But we don't want tasks sleeping with 
1070          * partially complete buffers, so all were released above.
1071          */
1072         if (!retry)
1073                 return NULL;
1074
1075         /* We're _really_ low on memory. Now we just
1076          * wait for old buffer heads to become free due to
1077          * finishing IO.  Since this is an async request and
1078          * the reserve list is empty, we're sure there are 
1079          * async buffer heads in use.
1080          */
1081         free_more_memory();
1082         goto try_again;
1083 }
1084 EXPORT_SYMBOL_GPL(alloc_page_buffers);
1085
1086 static inline void
1087 link_dev_buffers(struct page *page, struct buffer_head *head)
1088 {
1089         struct buffer_head *bh, *tail;
1090
1091         bh = head;
1092         do {
1093                 tail = bh;
1094                 bh = bh->b_this_page;
1095         } while (bh);
1096         tail->b_this_page = head;
1097         attach_page_buffers(page, head);
1098 }
1099
1100 /*
1101  * Initialise the state of a blockdev page's buffers.
1102  */ 
1103 static void
1104 init_page_buffers(struct page *page, struct block_device *bdev,
1105                         sector_t block, int size)
1106 {
1107         struct buffer_head *head = page_buffers(page);
1108         struct buffer_head *bh = head;
1109         int uptodate = PageUptodate(page);
1110
1111         do {
1112                 if (!buffer_mapped(bh)) {
1113                         init_buffer(bh, NULL, NULL);
1114                         bh->b_bdev = bdev;
1115                         bh->b_blocknr = block;
1116                         if (uptodate)
1117                                 set_buffer_uptodate(bh);
1118                         set_buffer_mapped(bh);
1119                 }
1120                 block++;
1121                 bh = bh->b_this_page;
1122         } while (bh != head);
1123 }
1124
1125 /*
1126  * Create the page-cache page that contains the requested block.
1127  *
1128  * This is user purely for blockdev mappings.
1129  */
1130 static struct page *
1131 grow_dev_page(struct block_device *bdev, sector_t block,
1132                 pgoff_t index, int size)
1133 {
1134         struct inode *inode = bdev->bd_inode;
1135         struct page *page;
1136         struct buffer_head *bh;
1137
1138         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
1139         if (!page)
1140                 return NULL;
1141
1142         if (!PageLocked(page))
1143                 BUG();
1144
1145         if (page_has_buffers(page)) {
1146                 bh = page_buffers(page);
1147                 if (bh->b_size == size) {
1148                         init_page_buffers(page, bdev, block, size);
1149                         return page;
1150                 }
1151                 if (!try_to_free_buffers(page))
1152                         goto failed;
1153         }
1154
1155         /*
1156          * Allocate some buffers for this page
1157          */
1158         bh = alloc_page_buffers(page, size, 0);
1159         if (!bh)
1160                 goto failed;
1161
1162         /*
1163          * Link the page to the buffers and initialise them.  Take the
1164          * lock to be atomic wrt __find_get_block(), which does not
1165          * run under the page lock.
1166          */
1167         spin_lock(&inode->i_mapping->private_lock);
1168         link_dev_buffers(page, bh);
1169         init_page_buffers(page, bdev, block, size);
1170         spin_unlock(&inode->i_mapping->private_lock);
1171         return page;
1172
1173 failed:
1174         BUG();
1175         unlock_page(page);
1176         page_cache_release(page);
1177         return NULL;
1178 }
1179
1180 /*
1181  * Create buffers for the specified block device block's page.  If
1182  * that page was dirty, the buffers are set dirty also.
1183  *
1184  * Except that's a bug.  Attaching dirty buffers to a dirty
1185  * blockdev's page can result in filesystem corruption, because
1186  * some of those buffers may be aliases of filesystem data.
1187  * grow_dev_page() will go BUG() if this happens.
1188  */
1189 static inline int
1190 grow_buffers(struct block_device *bdev, sector_t block, int size)
1191 {
1192         struct page *page;
1193         pgoff_t index;
1194         int sizebits;
1195
1196         sizebits = -1;
1197         do {
1198                 sizebits++;
1199         } while ((size << sizebits) < PAGE_SIZE);
1200
1201         index = block >> sizebits;
1202         block = index << sizebits;
1203
1204         /* Create a page with the proper size buffers.. */
1205         page = grow_dev_page(bdev, block, index, size);
1206         if (!page)
1207                 return 0;
1208         unlock_page(page);
1209         page_cache_release(page);
1210         return 1;
1211 }
1212
1213 struct buffer_head *
1214 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1215 {
1216         /* Size must be multiple of hard sectorsize */
1217         if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1218                         (size < 512 || size > PAGE_SIZE))) {
1219                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1220                                         size);
1221                 printk(KERN_ERR "hardsect size: %d\n",
1222                                         bdev_hardsect_size(bdev));
1223
1224                 dump_stack();
1225                 return NULL;
1226         }
1227
1228         for (;;) {
1229                 struct buffer_head * bh;
1230
1231                 bh = __find_get_block(bdev, block, size);
1232                 if (bh)
1233                         return bh;
1234
1235                 if (!grow_buffers(bdev, block, size))
1236                         free_more_memory();
1237         }
1238 }
1239
1240 /*
1241  * The relationship between dirty buffers and dirty pages:
1242  *
1243  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1244  * the page is tagged dirty in its radix tree.
1245  *
1246  * At all times, the dirtiness of the buffers represents the dirtiness of
1247  * subsections of the page.  If the page has buffers, the page dirty bit is
1248  * merely a hint about the true dirty state.
1249  *
1250  * When a page is set dirty in its entirety, all its buffers are marked dirty
1251  * (if the page has buffers).
1252  *
1253  * When a buffer is marked dirty, its page is dirtied, but the page's other
1254  * buffers are not.
1255  *
1256  * Also.  When blockdev buffers are explicitly read with bread(), they
1257  * individually become uptodate.  But their backing page remains not
1258  * uptodate - even if all of its buffers are uptodate.  A subsequent
1259  * block_read_full_page() against that page will discover all the uptodate
1260  * buffers, will set the page uptodate and will perform no I/O.
1261  */
1262
1263 /**
1264  * mark_buffer_dirty - mark a buffer_head as needing writeout
1265  * @bh: the buffer_head to mark dirty
1266  *
1267  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1268  * backing page dirty, then tag the page as dirty in its address_space's radix
1269  * tree and then attach the address_space's inode to its superblock's dirty
1270  * inode list.
1271  *
1272  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1273  * mapping->tree_lock and the global inode_lock.
1274  */
1275 void fastcall mark_buffer_dirty(struct buffer_head *bh)
1276 {
1277         if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1278                 __set_page_dirty_nobuffers(bh->b_page);
1279 }
1280
1281 /*
1282  * Decrement a buffer_head's reference count.  If all buffers against a page
1283  * have zero reference count, are clean and unlocked, and if the page is clean
1284  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1285  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1286  * a page but it ends up not being freed, and buffers may later be reattached).
1287  */
1288 void __brelse(struct buffer_head * buf)
1289 {
1290         if (atomic_read(&buf->b_count)) {
1291                 put_bh(buf);
1292                 return;
1293         }
1294         printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1295         WARN_ON(1);
1296 }
1297
1298 /*
1299  * bforget() is like brelse(), except it discards any
1300  * potentially dirty data.
1301  */
1302 void __bforget(struct buffer_head *bh)
1303 {
1304         clear_buffer_dirty(bh);
1305         if (!list_empty(&bh->b_assoc_buffers)) {
1306                 struct address_space *buffer_mapping = bh->b_page->mapping;
1307
1308                 spin_lock(&buffer_mapping->private_lock);
1309                 list_del_init(&bh->b_assoc_buffers);
1310                 spin_unlock(&buffer_mapping->private_lock);
1311         }
1312         __brelse(bh);
1313 }
1314
1315 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1316 {
1317         lock_buffer(bh);
1318         if (buffer_uptodate(bh)) {
1319                 unlock_buffer(bh);
1320                 return bh;
1321         } else {
1322                 get_bh(bh);
1323                 bh->b_end_io = end_buffer_read_sync;
1324                 submit_bh(READ, bh);
1325                 wait_on_buffer(bh);
1326                 if (buffer_uptodate(bh))
1327                         return bh;
1328         }
1329         brelse(bh);
1330         return NULL;
1331 }
1332
1333 /*
1334  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1335  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1336  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1337  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1338  * CPU's LRUs at the same time.
1339  *
1340  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1341  * sb_find_get_block().
1342  *
1343  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1344  * a local interrupt disable for that.
1345  */
1346
1347 #define BH_LRU_SIZE     8
1348
1349 struct bh_lru {
1350         struct buffer_head *bhs[BH_LRU_SIZE];
1351 };
1352
1353 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1354
1355 #ifdef CONFIG_SMP
1356 #define bh_lru_lock()   local_irq_disable()
1357 #define bh_lru_unlock() local_irq_enable()
1358 #else
1359 #define bh_lru_lock()   preempt_disable()
1360 #define bh_lru_unlock() preempt_enable()
1361 #endif
1362
1363 static inline void check_irqs_on(void)
1364 {
1365 #ifdef irqs_disabled
1366         BUG_ON(irqs_disabled());
1367 #endif
1368 }
1369
1370 /*
1371  * The LRU management algorithm is dopey-but-simple.  Sorry.
1372  */
1373 static void bh_lru_install(struct buffer_head *bh)
1374 {
1375         struct buffer_head *evictee = NULL;
1376         struct bh_lru *lru;
1377
1378         check_irqs_on();
1379         bh_lru_lock();
1380         lru = &__get_cpu_var(bh_lrus);
1381         if (lru->bhs[0] != bh) {
1382                 struct buffer_head *bhs[BH_LRU_SIZE];
1383                 int in;
1384                 int out = 0;
1385
1386                 get_bh(bh);
1387                 bhs[out++] = bh;
1388                 for (in = 0; in < BH_LRU_SIZE; in++) {
1389                         struct buffer_head *bh2 = lru->bhs[in];
1390
1391                         if (bh2 == bh) {
1392                                 __brelse(bh2);
1393                         } else {
1394                                 if (out >= BH_LRU_SIZE) {
1395                                         BUG_ON(evictee != NULL);
1396                                         evictee = bh2;
1397                                 } else {
1398                                         bhs[out++] = bh2;
1399                                 }
1400                         }
1401                 }
1402                 while (out < BH_LRU_SIZE)
1403                         bhs[out++] = NULL;
1404                 memcpy(lru->bhs, bhs, sizeof(bhs));
1405         }
1406         bh_lru_unlock();
1407
1408         if (evictee)
1409                 __brelse(evictee);
1410 }
1411
1412 /*
1413  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1414  */
1415 static inline struct buffer_head *
1416 lookup_bh_lru(struct block_device *bdev, sector_t block, int size)
1417 {
1418         struct buffer_head *ret = NULL;
1419         struct bh_lru *lru;
1420         int i;
1421
1422         check_irqs_on();
1423         bh_lru_lock();
1424         lru = &__get_cpu_var(bh_lrus);
1425         for (i = 0; i < BH_LRU_SIZE; i++) {
1426                 struct buffer_head *bh = lru->bhs[i];
1427
1428                 if (bh && bh->b_bdev == bdev &&
1429                                 bh->b_blocknr == block && bh->b_size == size) {
1430                         if (i) {
1431                                 while (i) {
1432                                         lru->bhs[i] = lru->bhs[i - 1];
1433                                         i--;
1434                                 }
1435                                 lru->bhs[0] = bh;
1436                         }
1437                         get_bh(bh);
1438                         ret = bh;
1439                         break;
1440                 }
1441         }
1442         bh_lru_unlock();
1443         return ret;
1444 }
1445
1446 /*
1447  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1448  * it in the LRU and mark it as accessed.  If it is not present then return
1449  * NULL
1450  */
1451 struct buffer_head *
1452 __find_get_block(struct block_device *bdev, sector_t block, int size)
1453 {
1454         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1455
1456         if (bh == NULL) {
1457                 bh = __find_get_block_slow(bdev, block, size);
1458                 if (bh)
1459                         bh_lru_install(bh);
1460         }
1461         if (bh)
1462                 touch_buffer(bh);
1463         return bh;
1464 }
1465 EXPORT_SYMBOL(__find_get_block);
1466
1467 /*
1468  * __getblk will locate (and, if necessary, create) the buffer_head
1469  * which corresponds to the passed block_device, block and size. The
1470  * returned buffer has its reference count incremented.
1471  *
1472  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1473  * illegal block number, __getblk() will happily return a buffer_head
1474  * which represents the non-existent block.  Very weird.
1475  *
1476  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1477  * attempt is failing.  FIXME, perhaps?
1478  */
1479 struct buffer_head *
1480 __getblk(struct block_device *bdev, sector_t block, int size)
1481 {
1482         struct buffer_head *bh = __find_get_block(bdev, block, size);
1483
1484         might_sleep();
1485         if (bh == NULL)
1486                 bh = __getblk_slow(bdev, block, size);
1487         return bh;
1488 }
1489 EXPORT_SYMBOL(__getblk);
1490
1491 /*
1492  * Do async read-ahead on a buffer..
1493  */
1494 void __breadahead(struct block_device *bdev, sector_t block, int size)
1495 {
1496         struct buffer_head *bh = __getblk(bdev, block, size);
1497         ll_rw_block(READA, 1, &bh);
1498         brelse(bh);
1499 }
1500 EXPORT_SYMBOL(__breadahead);
1501
1502 /**
1503  *  __bread() - reads a specified block and returns the bh
1504  *  @bdev: the block_device to read from
1505  *  @block: number of block
1506  *  @size: size (in bytes) to read
1507  * 
1508  *  Reads a specified block, and returns buffer head that contains it.
1509  *  It returns NULL if the block was unreadable.
1510  */
1511 struct buffer_head *
1512 __bread(struct block_device *bdev, sector_t block, int size)
1513 {
1514         struct buffer_head *bh = __getblk(bdev, block, size);
1515
1516         if (!buffer_uptodate(bh))
1517                 bh = __bread_slow(bh);
1518         return bh;
1519 }
1520 EXPORT_SYMBOL(__bread);
1521
1522 /*
1523  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1524  * This doesn't race because it runs in each cpu either in irq
1525  * or with preempt disabled.
1526  */
1527 static void invalidate_bh_lru(void *arg)
1528 {
1529         struct bh_lru *b = &get_cpu_var(bh_lrus);
1530         int i;
1531
1532         for (i = 0; i < BH_LRU_SIZE; i++) {
1533                 brelse(b->bhs[i]);
1534                 b->bhs[i] = NULL;
1535         }
1536         put_cpu_var(bh_lrus);
1537 }
1538         
1539 static void invalidate_bh_lrus(void)
1540 {
1541         on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1542 }
1543
1544 void set_bh_page(struct buffer_head *bh,
1545                 struct page *page, unsigned long offset)
1546 {
1547         bh->b_page = page;
1548         if (offset >= PAGE_SIZE)
1549                 BUG();
1550         if (PageHighMem(page))
1551                 /*
1552                  * This catches illegal uses and preserves the offset:
1553                  */
1554                 bh->b_data = (char *)(0 + offset);
1555         else
1556                 bh->b_data = page_address(page) + offset;
1557 }
1558 EXPORT_SYMBOL(set_bh_page);
1559
1560 /*
1561  * Called when truncating a buffer on a page completely.
1562  */
1563 static inline void discard_buffer(struct buffer_head * bh)
1564 {
1565         lock_buffer(bh);
1566         clear_buffer_dirty(bh);
1567         bh->b_bdev = NULL;
1568         clear_buffer_mapped(bh);
1569         clear_buffer_req(bh);
1570         clear_buffer_new(bh);
1571         clear_buffer_delay(bh);
1572         unlock_buffer(bh);
1573 }
1574
1575 /**
1576  * try_to_release_page() - release old fs-specific metadata on a page
1577  *
1578  * @page: the page which the kernel is trying to free
1579  * @gfp_mask: memory allocation flags (and I/O mode)
1580  *
1581  * The address_space is to try to release any data against the page
1582  * (presumably at page->private).  If the release was successful, return `1'.
1583  * Otherwise return zero.
1584  *
1585  * The @gfp_mask argument specifies whether I/O may be performed to release
1586  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1587  *
1588  * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1589  */
1590 int try_to_release_page(struct page *page, int gfp_mask)
1591 {
1592         struct address_space * const mapping = page->mapping;
1593
1594         BUG_ON(!PageLocked(page));
1595         if (PageWriteback(page))
1596                 return 0;
1597         
1598         if (mapping && mapping->a_ops->releasepage)
1599                 return mapping->a_ops->releasepage(page, gfp_mask);
1600         return try_to_free_buffers(page);
1601 }
1602 EXPORT_SYMBOL(try_to_release_page);
1603
1604 /**
1605  * block_invalidatepage - invalidate part of all of a buffer-backed page
1606  *
1607  * @page: the page which is affected
1608  * @offset: the index of the truncation point
1609  *
1610  * block_invalidatepage() is called when all or part of the page has become
1611  * invalidatedby a truncate operation.
1612  *
1613  * block_invalidatepage() does not have to release all buffers, but it must
1614  * ensure that no dirty buffer is left outside @offset and that no I/O
1615  * is underway against any of the blocks which are outside the truncation
1616  * point.  Because the caller is about to free (and possibly reuse) those
1617  * blocks on-disk.
1618  */
1619 int block_invalidatepage(struct page *page, unsigned long offset)
1620 {
1621         struct buffer_head *head, *bh, *next;
1622         unsigned int curr_off = 0;
1623         int ret = 1;
1624
1625         BUG_ON(!PageLocked(page));
1626         if (!page_has_buffers(page))
1627                 goto out;
1628
1629         head = page_buffers(page);
1630         bh = head;
1631         do {
1632                 unsigned int next_off = curr_off + bh->b_size;
1633                 next = bh->b_this_page;
1634
1635                 /*
1636                  * is this block fully invalidated?
1637                  */
1638                 if (offset <= curr_off)
1639                         discard_buffer(bh);
1640                 curr_off = next_off;
1641                 bh = next;
1642         } while (bh != head);
1643
1644         /*
1645          * We release buffers only if the entire page is being invalidated.
1646          * The get_block cached value has been unconditionally invalidated,
1647          * so real IO is not possible anymore.
1648          */
1649         if (offset == 0)
1650                 ret = try_to_release_page(page, 0);
1651 out:
1652         return ret;
1653 }
1654 EXPORT_SYMBOL(block_invalidatepage);
1655
1656 /*
1657  * We attach and possibly dirty the buffers atomically wrt
1658  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1659  * is already excluded via the page lock.
1660  */
1661 void create_empty_buffers(struct page *page,
1662                         unsigned long blocksize, unsigned long b_state)
1663 {
1664         struct buffer_head *bh, *head, *tail;
1665
1666         head = alloc_page_buffers(page, blocksize, 1);
1667         bh = head;
1668         do {
1669                 bh->b_state |= b_state;
1670                 tail = bh;
1671                 bh = bh->b_this_page;
1672         } while (bh);
1673         tail->b_this_page = head;
1674
1675         spin_lock(&page->mapping->private_lock);
1676         if (PageUptodate(page) || PageDirty(page)) {
1677                 bh = head;
1678                 do {
1679                         if (PageDirty(page))
1680                                 set_buffer_dirty(bh);
1681                         if (PageUptodate(page))
1682                                 set_buffer_uptodate(bh);
1683                         bh = bh->b_this_page;
1684                 } while (bh != head);
1685         }
1686         attach_page_buffers(page, head);
1687         spin_unlock(&page->mapping->private_lock);
1688 }
1689 EXPORT_SYMBOL(create_empty_buffers);
1690
1691 /*
1692  * We are taking a block for data and we don't want any output from any
1693  * buffer-cache aliases starting from return from that function and
1694  * until the moment when something will explicitly mark the buffer
1695  * dirty (hopefully that will not happen until we will free that block ;-)
1696  * We don't even need to mark it not-uptodate - nobody can expect
1697  * anything from a newly allocated buffer anyway. We used to used
1698  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1699  * don't want to mark the alias unmapped, for example - it would confuse
1700  * anyone who might pick it with bread() afterwards...
1701  *
1702  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1703  * be writeout I/O going on against recently-freed buffers.  We don't
1704  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1705  * only if we really need to.  That happens here.
1706  */
1707 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1708 {
1709         struct buffer_head *old_bh;
1710
1711         might_sleep();
1712
1713         old_bh = __find_get_block_slow(bdev, block, 0);
1714         if (old_bh) {
1715                 clear_buffer_dirty(old_bh);
1716                 wait_on_buffer(old_bh);
1717                 clear_buffer_req(old_bh);
1718                 __brelse(old_bh);
1719         }
1720 }
1721 EXPORT_SYMBOL(unmap_underlying_metadata);
1722
1723 /*
1724  * NOTE! All mapped/uptodate combinations are valid:
1725  *
1726  *      Mapped  Uptodate        Meaning
1727  *
1728  *      No      No              "unknown" - must do get_block()
1729  *      No      Yes             "hole" - zero-filled
1730  *      Yes     No              "allocated" - allocated on disk, not read in
1731  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1732  *
1733  * "Dirty" is valid only with the last case (mapped+uptodate).
1734  */
1735
1736 /*
1737  * While block_write_full_page is writing back the dirty buffers under
1738  * the page lock, whoever dirtied the buffers may decide to clean them
1739  * again at any time.  We handle that by only looking at the buffer
1740  * state inside lock_buffer().
1741  *
1742  * If block_write_full_page() is called for regular writeback
1743  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1744  * locked buffer.   This only can happen if someone has written the buffer
1745  * directly, with submit_bh().  At the address_space level PageWriteback
1746  * prevents this contention from occurring.
1747  */
1748 static int __block_write_full_page(struct inode *inode, struct page *page,
1749                         get_block_t *get_block, struct writeback_control *wbc)
1750 {
1751         int err;
1752         sector_t block;
1753         sector_t last_block;
1754         struct buffer_head *bh, *head, *last_bh = NULL;
1755         int nr_underway = 0;
1756
1757         BUG_ON(!PageLocked(page));
1758
1759         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1760
1761         if (!page_has_buffers(page)) {
1762                 create_empty_buffers(page, 1 << inode->i_blkbits,
1763                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1764         }
1765
1766         /*
1767          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1768          * here, and the (potentially unmapped) buffers may become dirty at
1769          * any time.  If a buffer becomes dirty here after we've inspected it
1770          * then we just miss that fact, and the page stays dirty.
1771          *
1772          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1773          * handle that here by just cleaning them.
1774          */
1775
1776         block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1777         head = page_buffers(page);
1778         bh = head;
1779
1780         /*
1781          * Get all the dirty buffers mapped to disk addresses and
1782          * handle any aliases from the underlying blockdev's mapping.
1783          */
1784         do {
1785                 if (block > last_block) {
1786                         /*
1787                          * mapped buffers outside i_size will occur, because
1788                          * this page can be outside i_size when there is a
1789                          * truncate in progress.
1790                          */
1791                         /*
1792                          * The buffer was zeroed by block_write_full_page()
1793                          */
1794                         clear_buffer_dirty(bh);
1795                         set_buffer_uptodate(bh);
1796                 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1797                         err = get_block(inode, block, bh, 1);
1798                         if (err)
1799                                 goto recover;
1800                         if (buffer_new(bh)) {
1801                                 /* blockdev mappings never come here */
1802                                 clear_buffer_new(bh);
1803                                 unmap_underlying_metadata(bh->b_bdev,
1804                                                         bh->b_blocknr);
1805                         }
1806                 }
1807                 bh = bh->b_this_page;
1808                 block++;
1809         } while (bh != head);
1810
1811         do {
1812                 if (!buffer_mapped(bh))
1813                         continue;
1814                 /*
1815                  * If it's a fully non-blocking write attempt and we cannot
1816                  * lock the buffer then redirty the page.  Note that this can
1817                  * potentially cause a busy-wait loop from pdflush and kswapd
1818                  * activity, but those code paths have their own higher-level
1819                  * throttling.
1820                  */
1821                 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1822                         lock_buffer(bh);
1823                 } else if (test_set_buffer_locked(bh)) {
1824                         redirty_page_for_writepage(wbc, page);
1825                         continue;
1826                 }
1827                 if (test_clear_buffer_dirty(bh)) {
1828                         mark_buffer_async_write(bh);
1829                         last_bh = bh;
1830                 } else {
1831                         unlock_buffer(bh);
1832                 }
1833         } while ((bh = bh->b_this_page) != head);
1834
1835         /*
1836          * The page and its buffers are protected by PageWriteback(), so we can
1837          * drop the bh refcounts early.
1838          */
1839         BUG_ON(PageWriteback(page));
1840         set_page_writeback(page);
1841
1842         do {
1843                 struct buffer_head *next = bh->b_this_page;
1844                 if (buffer_async_write(bh)) {
1845                         submit_bh(WRITE, bh);
1846                         nr_underway++;
1847                         if (bh == last_bh)
1848                                 break;
1849                 }
1850                 bh = next;
1851         } while (bh != head);
1852         bh = head;
1853         unlock_page(page);
1854
1855         err = 0;
1856 done:
1857         if (nr_underway == 0) {
1858                 /*
1859                  * The page was marked dirty, but the buffers were
1860                  * clean.  Someone wrote them back by hand with
1861                  * ll_rw_block/submit_bh.  A rare case.
1862                  */
1863                 int uptodate = 1;
1864                 do {
1865                         if (!buffer_uptodate(bh)) {
1866                                 uptodate = 0;
1867                                 break;
1868                         }
1869                         bh = bh->b_this_page;
1870                 } while (bh != head);
1871                 if (uptodate)
1872                         SetPageUptodate(page);
1873                 end_page_writeback(page);
1874                 /*
1875                  * The page and buffer_heads can be released at any time from
1876                  * here on.
1877                  */
1878                 wbc->pages_skipped++;   /* We didn't write this page */
1879         }
1880         return err;
1881
1882 recover:
1883         /*
1884          * ENOSPC, or some other error.  We may already have added some
1885          * blocks to the file, so we need to write these out to avoid
1886          * exposing stale data.
1887          * The page is currently locked and not marked for writeback
1888          */
1889         bh = head;
1890         /* Recovery: lock and submit the mapped buffers */
1891         do {
1892                 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1893                         lock_buffer(bh);
1894                         mark_buffer_async_write(bh);
1895                         last_bh = bh;
1896                 } else {
1897                         /*
1898                          * The buffer may have been set dirty during
1899                          * attachment to a dirty page.
1900                          */
1901                         clear_buffer_dirty(bh);
1902                 }
1903         } while ((bh = bh->b_this_page) != head);
1904         SetPageError(page);
1905         BUG_ON(PageWriteback(page));
1906         set_page_writeback(page);
1907         unlock_page(page);
1908         do {
1909                 struct buffer_head *next = bh->b_this_page;
1910                 if (buffer_async_write(bh)) {
1911                         clear_buffer_dirty(bh);
1912                         submit_bh(WRITE, bh);
1913                         nr_underway++;
1914                         if (bh == last_bh)
1915                                 break;
1916                 }
1917                 bh = next;
1918         } while (bh != head);
1919         bh = head;
1920         goto done;
1921 }
1922
1923 static int __block_prepare_write(struct inode *inode, struct page *page,
1924                 unsigned from, unsigned to, get_block_t *get_block)
1925 {
1926         unsigned block_start, block_end;
1927         sector_t block;
1928         int err = 0;
1929         unsigned blocksize, bbits;
1930         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1931
1932         BUG_ON(!PageLocked(page));
1933         BUG_ON(from > PAGE_CACHE_SIZE);
1934         BUG_ON(to > PAGE_CACHE_SIZE);
1935         BUG_ON(from > to);
1936
1937         blocksize = 1 << inode->i_blkbits;
1938         if (!page_has_buffers(page))
1939                 create_empty_buffers(page, blocksize, 0);
1940         head = page_buffers(page);
1941
1942         bbits = inode->i_blkbits;
1943         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1944
1945         for(bh = head, block_start = 0; bh != head || !block_start;
1946             block++, block_start=block_end, bh = bh->b_this_page) {
1947                 block_end = block_start + blocksize;
1948                 if (block_end <= from || block_start >= to) {
1949                         if (PageUptodate(page)) {
1950                                 if (!buffer_uptodate(bh))
1951                                         set_buffer_uptodate(bh);
1952                         }
1953                         continue;
1954                 }
1955                 if (buffer_new(bh))
1956                         clear_buffer_new(bh);
1957                 if (!buffer_mapped(bh)) {
1958                         err = get_block(inode, block, bh, 1);
1959                         if (err)
1960                                 break;
1961                         if (buffer_new(bh)) {
1962                                 clear_buffer_new(bh);
1963                                 unmap_underlying_metadata(bh->b_bdev,
1964                                                         bh->b_blocknr);
1965                                 if (PageUptodate(page)) {
1966                                         set_buffer_uptodate(bh);
1967                                         continue;
1968                                 }
1969                                 if (block_end > to || block_start < from) {
1970                                         void *kaddr;
1971
1972                                         kaddr = kmap_atomic(page, KM_USER0);
1973                                         if (block_end > to)
1974                                                 memset(kaddr+to, 0,
1975                                                         block_end-to);
1976                                         if (block_start < from)
1977                                                 memset(kaddr+block_start,
1978                                                         0, from-block_start);
1979                                         flush_dcache_page(page);
1980                                         kunmap_atomic(kaddr, KM_USER0);
1981                                 }
1982                                 continue;
1983                         }
1984                 }
1985                 if (PageUptodate(page)) {
1986                         if (!buffer_uptodate(bh))
1987                                 set_buffer_uptodate(bh);
1988                         continue; 
1989                 }
1990                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1991                      (block_start < from || block_end > to)) {
1992                         ll_rw_block(READ, 1, &bh);
1993                         *wait_bh++=bh;
1994                 }
1995         }
1996         /*
1997          * If we issued read requests - let them complete.
1998          */
1999         while(wait_bh > wait) {
2000                 wait_on_buffer(*--wait_bh);
2001                 if (!buffer_uptodate(*wait_bh))
2002                         err = -EIO;
2003         }
2004         if (!err)
2005                 return err;
2006
2007         /* Error case: */
2008         /*
2009          * Zero out any newly allocated blocks to avoid exposing stale
2010          * data.  If BH_New is set, we know that the block was newly
2011          * allocated in the above loop.
2012          */
2013         bh = head;
2014         block_start = 0;
2015         do {
2016                 block_end = block_start+blocksize;
2017                 if (block_end <= from)
2018                         goto next_bh;
2019                 if (block_start >= to)
2020                         break;
2021                 if (buffer_new(bh)) {
2022                         void *kaddr;
2023
2024                         clear_buffer_new(bh);
2025                         kaddr = kmap_atomic(page, KM_USER0);
2026                         memset(kaddr+block_start, 0, bh->b_size);
2027                         kunmap_atomic(kaddr, KM_USER0);
2028                         set_buffer_uptodate(bh);
2029                         mark_buffer_dirty(bh);
2030                 }
2031 next_bh:
2032                 block_start = block_end;
2033                 bh = bh->b_this_page;
2034         } while (bh != head);
2035         return err;
2036 }
2037
2038 static int __block_commit_write(struct inode *inode, struct page *page,
2039                 unsigned from, unsigned to)
2040 {
2041         unsigned block_start, block_end;
2042         int partial = 0;
2043         unsigned blocksize;
2044         struct buffer_head *bh, *head;
2045
2046         blocksize = 1 << inode->i_blkbits;
2047
2048         for(bh = head = page_buffers(page), block_start = 0;
2049             bh != head || !block_start;
2050             block_start=block_end, bh = bh->b_this_page) {
2051                 block_end = block_start + blocksize;
2052                 if (block_end <= from || block_start >= to) {
2053                         if (!buffer_uptodate(bh))
2054                                 partial = 1;
2055                 } else {
2056                         set_buffer_uptodate(bh);
2057                         mark_buffer_dirty(bh);
2058                 }
2059         }
2060
2061         /*
2062          * If this is a partial write which happened to make all buffers
2063          * uptodate then we can optimize away a bogus readpage() for
2064          * the next read(). Here we 'discover' whether the page went
2065          * uptodate as a result of this (potentially partial) write.
2066          */
2067         if (!partial)
2068                 SetPageUptodate(page);
2069         return 0;
2070 }
2071
2072 /*
2073  * Generic "read page" function for block devices that have the normal
2074  * get_block functionality. This is most of the block device filesystems.
2075  * Reads the page asynchronously --- the unlock_buffer() and
2076  * set/clear_buffer_uptodate() functions propagate buffer state into the
2077  * page struct once IO has completed.
2078  */
2079 int block_read_full_page(struct page *page, get_block_t *get_block)
2080 {
2081         struct inode *inode = page->mapping->host;
2082         sector_t iblock, lblock;
2083         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2084         unsigned int blocksize;
2085         int nr, i;
2086         int fully_mapped = 1;
2087
2088         BUG_ON(!PageLocked(page));
2089         blocksize = 1 << inode->i_blkbits;
2090         if (!page_has_buffers(page))
2091                 create_empty_buffers(page, blocksize, 0);
2092         head = page_buffers(page);
2093
2094         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2095         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2096         bh = head;
2097         nr = 0;
2098         i = 0;
2099
2100         do {
2101                 if (buffer_uptodate(bh))
2102                         continue;
2103
2104                 if (!buffer_mapped(bh)) {
2105                         fully_mapped = 0;
2106                         if (iblock < lblock) {
2107                                 if (get_block(inode, iblock, bh, 0))
2108                                         SetPageError(page);
2109                         }
2110                         if (!buffer_mapped(bh)) {
2111                                 void *kaddr = kmap_atomic(page, KM_USER0);
2112                                 memset(kaddr + i * blocksize, 0, blocksize);
2113                                 flush_dcache_page(page);
2114                                 kunmap_atomic(kaddr, KM_USER0);
2115                                 set_buffer_uptodate(bh);
2116                                 continue;
2117                         }
2118                         /*
2119                          * get_block() might have updated the buffer
2120                          * synchronously
2121                          */
2122                         if (buffer_uptodate(bh))
2123                                 continue;
2124                 }
2125                 arr[nr++] = bh;
2126         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2127
2128         if (fully_mapped)
2129                 SetPageMappedToDisk(page);
2130
2131         if (!nr) {
2132                 /*
2133                  * All buffers are uptodate - we can set the page uptodate
2134                  * as well. But not if get_block() returned an error.
2135                  */
2136                 if (!PageError(page))
2137                         SetPageUptodate(page);
2138                 unlock_page(page);
2139                 return 0;
2140         }
2141
2142         /* Stage two: lock the buffers */
2143         for (i = 0; i < nr; i++) {
2144                 bh = arr[i];
2145                 lock_buffer(bh);
2146                 mark_buffer_async_read(bh);
2147         }
2148
2149         /*
2150          * Stage 3: start the IO.  Check for uptodateness
2151          * inside the buffer lock in case another process reading
2152          * the underlying blockdev brought it uptodate (the sct fix).
2153          */
2154         for (i = 0; i < nr; i++) {
2155                 bh = arr[i];
2156                 if (buffer_uptodate(bh))
2157                         end_buffer_async_read(bh, 1);
2158                 else
2159                         submit_bh(READ, bh);
2160         }
2161         return 0;
2162 }
2163
2164 /* utility function for filesystems that need to do work on expanding
2165  * truncates.  Uses prepare/commit_write to allow the filesystem to
2166  * deal with the hole.  
2167  */
2168 int generic_cont_expand(struct inode *inode, loff_t size)
2169 {
2170         struct address_space *mapping = inode->i_mapping;
2171         struct page *page;
2172         unsigned long index, offset, limit;
2173         int err;
2174
2175         err = -EFBIG;
2176         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2177         if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2178                 send_sig(SIGXFSZ, current, 0);
2179                 goto out;
2180         }
2181         if (size > inode->i_sb->s_maxbytes)
2182                 goto out;
2183
2184         offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
2185
2186         /* ugh.  in prepare/commit_write, if from==to==start of block, we 
2187         ** skip the prepare.  make sure we never send an offset for the start
2188         ** of a block
2189         */
2190         if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
2191                 offset++;
2192         }
2193         index = size >> PAGE_CACHE_SHIFT;
2194         err = -ENOMEM;
2195         page = grab_cache_page(mapping, index);
2196         if (!page)
2197                 goto out;
2198         err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
2199         if (!err) {
2200                 err = mapping->a_ops->commit_write(NULL, page, offset, offset);
2201         }
2202         unlock_page(page);
2203         page_cache_release(page);
2204         if (err > 0)
2205                 err = 0;
2206 out:
2207         return err;
2208 }
2209
2210 /*
2211  * For moronic filesystems that do not allow holes in file.
2212  * We may have to extend the file.
2213  */
2214
2215 int cont_prepare_write(struct page *page, unsigned offset,
2216                 unsigned to, get_block_t *get_block, loff_t *bytes)
2217 {
2218         struct address_space *mapping = page->mapping;
2219         struct inode *inode = mapping->host;
2220         struct page *new_page;
2221         pgoff_t pgpos;
2222         long status;
2223         unsigned zerofrom;
2224         unsigned blocksize = 1 << inode->i_blkbits;
2225         void *kaddr;
2226
2227         while(page->index > (pgpos = *bytes>>PAGE_CACHE_SHIFT)) {
2228                 status = -ENOMEM;
2229                 new_page = grab_cache_page(mapping, pgpos);
2230                 if (!new_page)
2231                         goto out;
2232                 /* we might sleep */
2233                 if (*bytes>>PAGE_CACHE_SHIFT != pgpos) {
2234                         unlock_page(new_page);
2235                         page_cache_release(new_page);
2236                         continue;
2237                 }
2238                 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2239                 if (zerofrom & (blocksize-1)) {
2240                         *bytes |= (blocksize-1);
2241                         (*bytes)++;
2242                 }
2243                 status = __block_prepare_write(inode, new_page, zerofrom,
2244                                                 PAGE_CACHE_SIZE, get_block);
2245                 if (status)
2246                         goto out_unmap;
2247                 kaddr = kmap_atomic(new_page, KM_USER0);
2248                 memset(kaddr+zerofrom, 0, PAGE_CACHE_SIZE-zerofrom);
2249                 flush_dcache_page(new_page);
2250                 kunmap_atomic(kaddr, KM_USER0);
2251                 generic_commit_write(NULL, new_page, zerofrom, PAGE_CACHE_SIZE);
2252                 unlock_page(new_page);
2253                 page_cache_release(new_page);
2254         }
2255
2256         if (page->index < pgpos) {
2257                 /* completely inside the area */
2258                 zerofrom = offset;
2259         } else {
2260                 /* page covers the boundary, find the boundary offset */
2261                 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2262
2263                 /* if we will expand the thing last block will be filled */
2264                 if (to > zerofrom && (zerofrom & (blocksize-1))) {
2265                         *bytes |= (blocksize-1);
2266                         (*bytes)++;
2267                 }
2268
2269                 /* starting below the boundary? Nothing to zero out */
2270                 if (offset <= zerofrom)
2271                         zerofrom = offset;
2272         }
2273         status = __block_prepare_write(inode, page, zerofrom, to, get_block);
2274         if (status)
2275                 goto out1;
2276         if (zerofrom < offset) {
2277                 kaddr = kmap_atomic(page, KM_USER0);
2278                 memset(kaddr+zerofrom, 0, offset-zerofrom);
2279                 flush_dcache_page(page);
2280                 kunmap_atomic(kaddr, KM_USER0);
2281                 __block_commit_write(inode, page, zerofrom, offset);
2282         }
2283         return 0;
2284 out1:
2285         ClearPageUptodate(page);
2286         return status;
2287
2288 out_unmap:
2289         ClearPageUptodate(new_page);
2290         unlock_page(new_page);
2291         page_cache_release(new_page);
2292 out:
2293         return status;
2294 }
2295
2296 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2297                         get_block_t *get_block)
2298 {
2299         struct inode *inode = page->mapping->host;
2300         int err = __block_prepare_write(inode, page, from, to, get_block);
2301         if (err)
2302                 ClearPageUptodate(page);
2303         return err;
2304 }
2305
2306 int block_commit_write(struct page *page, unsigned from, unsigned to)
2307 {
2308         struct inode *inode = page->mapping->host;
2309         __block_commit_write(inode,page,from,to);
2310         return 0;
2311 }
2312
2313 int generic_commit_write(struct file *file, struct page *page,
2314                 unsigned from, unsigned to)
2315 {
2316         struct inode *inode = page->mapping->host;
2317         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2318         __block_commit_write(inode,page,from,to);
2319         /*
2320          * No need to use i_size_read() here, the i_size
2321          * cannot change under us because we hold i_sem.
2322          */
2323         if (pos > inode->i_size) {
2324                 i_size_write(inode, pos);
2325                 mark_inode_dirty(inode);
2326         }
2327         return 0;
2328 }
2329
2330
2331 /*
2332  * nobh_prepare_write()'s prereads are special: the buffer_heads are freed
2333  * immediately, while under the page lock.  So it needs a special end_io
2334  * handler which does not touch the bh after unlocking it.
2335  *
2336  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
2337  * a race there is benign: unlock_buffer() only use the bh's address for
2338  * hashing after unlocking the buffer, so it doesn't actually touch the bh
2339  * itself.
2340  */
2341 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2342 {
2343         if (uptodate) {
2344                 set_buffer_uptodate(bh);
2345         } else {
2346                 /* This happens, due to failed READA attempts. */
2347                 clear_buffer_uptodate(bh);
2348         }
2349         unlock_buffer(bh);
2350 }
2351
2352 /*
2353  * On entry, the page is fully not uptodate.
2354  * On exit the page is fully uptodate in the areas outside (from,to)
2355  */
2356 int nobh_prepare_write(struct page *page, unsigned from, unsigned to,
2357                         get_block_t *get_block)
2358 {
2359         struct inode *inode = page->mapping->host;
2360         const unsigned blkbits = inode->i_blkbits;
2361         const unsigned blocksize = 1 << blkbits;
2362         struct buffer_head map_bh;
2363         struct buffer_head *read_bh[MAX_BUF_PER_PAGE];
2364         unsigned block_in_page;
2365         unsigned block_start;
2366         sector_t block_in_file;
2367         char *kaddr;
2368         int nr_reads = 0;
2369         int i;
2370         int ret = 0;
2371         int is_mapped_to_disk = 1;
2372         int dirtied_it = 0;
2373
2374         if (PageMappedToDisk(page))
2375                 return 0;
2376
2377         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2378         map_bh.b_page = page;
2379
2380         /*
2381          * We loop across all blocks in the page, whether or not they are
2382          * part of the affected region.  This is so we can discover if the
2383          * page is fully mapped-to-disk.
2384          */
2385         for (block_start = 0, block_in_page = 0;
2386                   block_start < PAGE_CACHE_SIZE;
2387                   block_in_page++, block_start += blocksize) {
2388                 unsigned block_end = block_start + blocksize;
2389                 int create;
2390
2391                 map_bh.b_state = 0;
2392                 create = 1;
2393                 if (block_start >= to)
2394                         create = 0;
2395                 ret = get_block(inode, block_in_file + block_in_page,
2396                                         &map_bh, create);
2397                 if (ret)
2398                         goto failed;
2399                 if (!buffer_mapped(&map_bh))
2400                         is_mapped_to_disk = 0;
2401                 if (buffer_new(&map_bh))
2402                         unmap_underlying_metadata(map_bh.b_bdev,
2403                                                         map_bh.b_blocknr);
2404                 if (PageUptodate(page))
2405                         continue;
2406                 if (buffer_new(&map_bh) || !buffer_mapped(&map_bh)) {
2407                         kaddr = kmap_atomic(page, KM_USER0);
2408                         if (block_start < from) {
2409                                 memset(kaddr+block_start, 0, from-block_start);
2410                                 dirtied_it = 1;
2411                         }
2412                         if (block_end > to) {
2413                                 memset(kaddr + to, 0, block_end - to);
2414                                 dirtied_it = 1;
2415                         }
2416                         flush_dcache_page(page);
2417                         kunmap_atomic(kaddr, KM_USER0);
2418                         continue;
2419                 }
2420                 if (buffer_uptodate(&map_bh))
2421                         continue;       /* reiserfs does this */
2422                 if (block_start < from || block_end > to) {
2423                         struct buffer_head *bh = alloc_buffer_head(GFP_NOFS);
2424
2425                         if (!bh) {
2426                                 ret = -ENOMEM;
2427                                 goto failed;
2428                         }
2429                         bh->b_state = map_bh.b_state;
2430                         atomic_set(&bh->b_count, 0);
2431                         bh->b_this_page = NULL;
2432                         bh->b_page = page;
2433                         bh->b_blocknr = map_bh.b_blocknr;
2434                         bh->b_size = blocksize;
2435                         bh->b_data = (char *)(long)block_start;
2436                         bh->b_bdev = map_bh.b_bdev;
2437                         bh->b_private = NULL;
2438                         read_bh[nr_reads++] = bh;
2439                 }
2440         }
2441
2442         if (nr_reads) {
2443                 struct buffer_head *bh;
2444
2445                 /*
2446                  * The page is locked, so these buffers are protected from
2447                  * any VM or truncate activity.  Hence we don't need to care
2448                  * for the buffer_head refcounts.
2449                  */
2450                 for (i = 0; i < nr_reads; i++) {
2451                         bh = read_bh[i];
2452                         lock_buffer(bh);
2453                         bh->b_end_io = end_buffer_read_nobh;
2454                         submit_bh(READ, bh);
2455                 }
2456                 for (i = 0; i < nr_reads; i++) {
2457                         bh = read_bh[i];
2458                         wait_on_buffer(bh);
2459                         if (!buffer_uptodate(bh))
2460                                 ret = -EIO;
2461                         free_buffer_head(bh);
2462                         read_bh[i] = NULL;
2463                 }
2464                 if (ret)
2465                         goto failed;
2466         }
2467
2468         if (is_mapped_to_disk)
2469                 SetPageMappedToDisk(page);
2470         SetPageUptodate(page);
2471
2472         /*
2473          * Setting the page dirty here isn't necessary for the prepare_write
2474          * function - commit_write will do that.  But if/when this function is
2475          * used within the pagefault handler to ensure that all mmapped pages
2476          * have backing space in the filesystem, we will need to dirty the page
2477          * if its contents were altered.
2478          */
2479         if (dirtied_it)
2480                 set_page_dirty(page);
2481
2482         return 0;
2483
2484 failed:
2485         for (i = 0; i < nr_reads; i++) {
2486                 if (read_bh[i])
2487                         free_buffer_head(read_bh[i]);
2488         }
2489
2490         /*
2491          * Error recovery is pretty slack.  Clear the page and mark it dirty
2492          * so we'll later zero out any blocks which _were_ allocated.
2493          */
2494         kaddr = kmap_atomic(page, KM_USER0);
2495         memset(kaddr, 0, PAGE_CACHE_SIZE);
2496         kunmap_atomic(kaddr, KM_USER0);
2497         SetPageUptodate(page);
2498         set_page_dirty(page);
2499         return ret;
2500 }
2501 EXPORT_SYMBOL(nobh_prepare_write);
2502
2503 int nobh_commit_write(struct file *file, struct page *page,
2504                 unsigned from, unsigned to)
2505 {
2506         struct inode *inode = page->mapping->host;
2507         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2508
2509         set_page_dirty(page);
2510         if (pos > inode->i_size) {
2511                 i_size_write(inode, pos);
2512                 mark_inode_dirty(inode);
2513         }
2514         return 0;
2515 }
2516 EXPORT_SYMBOL(nobh_commit_write);
2517
2518 /*
2519  * nobh_writepage() - based on block_full_write_page() except
2520  * that it tries to operate without attaching bufferheads to
2521  * the page.
2522  */
2523 int nobh_writepage(struct page *page, get_block_t *get_block,
2524                         struct writeback_control *wbc)
2525 {
2526         struct inode * const inode = page->mapping->host;
2527         loff_t i_size = i_size_read(inode);
2528         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2529         unsigned offset;
2530         void *kaddr;
2531         int ret;
2532
2533         /* Is the page fully inside i_size? */
2534         if (page->index < end_index)
2535                 goto out;
2536
2537         /* Is the page fully outside i_size? (truncate in progress) */
2538         offset = i_size & (PAGE_CACHE_SIZE-1);
2539         if (page->index >= end_index+1 || !offset) {
2540                 /*
2541                  * The page may have dirty, unmapped buffers.  For example,
2542                  * they may have been added in ext3_writepage().  Make them
2543                  * freeable here, so the page does not leak.
2544                  */
2545 #if 0
2546                 /* Not really sure about this  - do we need this ? */
2547                 if (page->mapping->a_ops->invalidatepage)
2548                         page->mapping->a_ops->invalidatepage(page, offset);
2549 #endif
2550                 unlock_page(page);
2551                 return 0; /* don't care */
2552         }
2553
2554         /*
2555          * The page straddles i_size.  It must be zeroed out on each and every
2556          * writepage invocation because it may be mmapped.  "A file is mapped
2557          * in multiples of the page size.  For a file that is not a multiple of
2558          * the  page size, the remaining memory is zeroed when mapped, and
2559          * writes to that region are not written out to the file."
2560          */
2561         kaddr = kmap_atomic(page, KM_USER0);
2562         memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2563         flush_dcache_page(page);
2564         kunmap_atomic(kaddr, KM_USER0);
2565 out:
2566         ret = mpage_writepage(page, get_block, wbc);
2567         if (ret == -EAGAIN)
2568                 ret = __block_write_full_page(inode, page, get_block, wbc);
2569         return ret;
2570 }
2571 EXPORT_SYMBOL(nobh_writepage);
2572
2573 /*
2574  * This function assumes that ->prepare_write() uses nobh_prepare_write().
2575  */
2576 int nobh_truncate_page(struct address_space *mapping, loff_t from)
2577 {
2578         struct inode *inode = mapping->host;
2579         unsigned blocksize = 1 << inode->i_blkbits;
2580         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2581         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2582         unsigned to;
2583         struct page *page;
2584         struct address_space_operations *a_ops = mapping->a_ops;
2585         char *kaddr;
2586         int ret = 0;
2587
2588         if ((offset & (blocksize - 1)) == 0)
2589                 goto out;
2590
2591         ret = -ENOMEM;
2592         page = grab_cache_page(mapping, index);
2593         if (!page)
2594                 goto out;
2595
2596         to = (offset + blocksize) & ~(blocksize - 1);
2597         ret = a_ops->prepare_write(NULL, page, offset, to);
2598         if (ret == 0) {
2599                 kaddr = kmap_atomic(page, KM_USER0);
2600                 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2601                 flush_dcache_page(page);
2602                 kunmap_atomic(kaddr, KM_USER0);
2603                 set_page_dirty(page);
2604         }
2605         unlock_page(page);
2606         page_cache_release(page);
2607 out:
2608         return ret;
2609 }
2610 EXPORT_SYMBOL(nobh_truncate_page);
2611
2612 int block_truncate_page(struct address_space *mapping,
2613                         loff_t from, get_block_t *get_block)
2614 {
2615         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2616         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2617         unsigned blocksize;
2618         pgoff_t iblock;
2619         unsigned length, pos;
2620         struct inode *inode = mapping->host;
2621         struct page *page;
2622         struct buffer_head *bh;
2623         void *kaddr;
2624         int err;
2625
2626         blocksize = 1 << inode->i_blkbits;
2627         length = offset & (blocksize - 1);
2628
2629         /* Block boundary? Nothing to do */
2630         if (!length)
2631                 return 0;
2632
2633         length = blocksize - length;
2634         iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2635         
2636         page = grab_cache_page(mapping, index);
2637         err = -ENOMEM;
2638         if (!page)
2639                 goto out;
2640
2641         if (!page_has_buffers(page))
2642                 create_empty_buffers(page, blocksize, 0);
2643
2644         /* Find the buffer that contains "offset" */
2645         bh = page_buffers(page);
2646         pos = blocksize;
2647         while (offset >= pos) {
2648                 bh = bh->b_this_page;
2649                 iblock++;
2650                 pos += blocksize;
2651         }
2652
2653         err = 0;
2654         if (!buffer_mapped(bh)) {
2655                 err = get_block(inode, iblock, bh, 0);
2656                 if (err)
2657                         goto unlock;
2658                 /* unmapped? It's a hole - nothing to do */
2659                 if (!buffer_mapped(bh))
2660                         goto unlock;
2661         }
2662
2663         /* Ok, it's mapped. Make sure it's up-to-date */
2664         if (PageUptodate(page))
2665                 set_buffer_uptodate(bh);
2666
2667         if (!buffer_uptodate(bh) && !buffer_delay(bh)) {
2668                 err = -EIO;
2669                 ll_rw_block(READ, 1, &bh);
2670                 wait_on_buffer(bh);
2671                 /* Uhhuh. Read error. Complain and punt. */
2672                 if (!buffer_uptodate(bh))
2673                         goto unlock;
2674         }
2675
2676         kaddr = kmap_atomic(page, KM_USER0);
2677         memset(kaddr + offset, 0, length);
2678         flush_dcache_page(page);
2679         kunmap_atomic(kaddr, KM_USER0);
2680
2681         mark_buffer_dirty(bh);
2682         err = 0;
2683
2684 unlock:
2685         unlock_page(page);
2686         page_cache_release(page);
2687 out:
2688         return err;
2689 }
2690
2691 /*
2692  * The generic ->writepage function for buffer-backed address_spaces
2693  */
2694 int block_write_full_page(struct page *page, get_block_t *get_block,
2695                         struct writeback_control *wbc)
2696 {
2697         struct inode * const inode = page->mapping->host;
2698         loff_t i_size = i_size_read(inode);
2699         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2700         unsigned offset;
2701         void *kaddr;
2702
2703         /* Is the page fully inside i_size? */
2704         if (page->index < end_index)
2705                 return __block_write_full_page(inode, page, get_block, wbc);
2706
2707         /* Is the page fully outside i_size? (truncate in progress) */
2708         offset = i_size & (PAGE_CACHE_SIZE-1);
2709         if (page->index >= end_index+1 || !offset) {
2710                 /*
2711                  * The page may have dirty, unmapped buffers.  For example,
2712                  * they may have been added in ext3_writepage().  Make them
2713                  * freeable here, so the page does not leak.
2714                  */
2715                 block_invalidatepage(page, 0);
2716                 unlock_page(page);
2717                 return 0; /* don't care */
2718         }
2719
2720         /*
2721          * The page straddles i_size.  It must be zeroed out on each and every
2722          * writepage invokation because it may be mmapped.  "A file is mapped
2723          * in multiples of the page size.  For a file that is not a multiple of
2724          * the  page size, the remaining memory is zeroed when mapped, and
2725          * writes to that region are not written out to the file."
2726          */
2727         kaddr = kmap_atomic(page, KM_USER0);
2728         memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2729         flush_dcache_page(page);
2730         kunmap_atomic(kaddr, KM_USER0);
2731         return __block_write_full_page(inode, page, get_block, wbc);
2732 }
2733
2734 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2735                             get_block_t *get_block)
2736 {
2737         struct buffer_head tmp;
2738         struct inode *inode = mapping->host;
2739         tmp.b_state = 0;
2740         tmp.b_blocknr = 0;
2741         get_block(inode, block, &tmp, 0);
2742         return tmp.b_blocknr;
2743 }
2744
2745 static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2746 {
2747         struct buffer_head *bh = bio->bi_private;
2748
2749         if (bio->bi_size)
2750                 return 1;
2751
2752         if (err == -EOPNOTSUPP) {
2753                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2754                 set_bit(BH_Eopnotsupp, &bh->b_state);
2755         }
2756
2757         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2758         bio_put(bio);
2759         return 0;
2760 }
2761
2762 int submit_bh(int rw, struct buffer_head * bh)
2763 {
2764         struct bio *bio;
2765         int ret = 0;
2766
2767         BUG_ON(!buffer_locked(bh));
2768         BUG_ON(!buffer_mapped(bh));
2769         BUG_ON(!bh->b_end_io);
2770
2771         if (buffer_ordered(bh) && (rw == WRITE))
2772                 rw = WRITE_BARRIER;
2773
2774         /*
2775          * Only clear out a write error when rewriting, should this
2776          * include WRITE_SYNC as well?
2777          */
2778         if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2779                 clear_buffer_write_io_error(bh);
2780
2781         /*
2782          * from here on down, it's all bio -- do the initial mapping,
2783          * submit_bio -> generic_make_request may further map this bio around
2784          */
2785         bio = bio_alloc(GFP_NOIO, 1);
2786
2787         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2788         bio->bi_bdev = bh->b_bdev;
2789         bio->bi_io_vec[0].bv_page = bh->b_page;
2790         bio->bi_io_vec[0].bv_len = bh->b_size;
2791         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2792
2793         bio->bi_vcnt = 1;
2794         bio->bi_idx = 0;
2795         bio->bi_size = bh->b_size;
2796
2797         bio->bi_end_io = end_bio_bh_io_sync;
2798         bio->bi_private = bh;
2799
2800         bio_get(bio);
2801         submit_bio(rw, bio);
2802
2803         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2804                 ret = -EOPNOTSUPP;
2805
2806         bio_put(bio);
2807         return ret;
2808 }
2809
2810 /**
2811  * ll_rw_block: low-level access to block devices (DEPRECATED)
2812  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2813  * @nr: number of &struct buffer_heads in the array
2814  * @bhs: array of pointers to &struct buffer_head
2815  *
2816  * ll_rw_block() takes an array of pointers to &struct buffer_heads,
2817  * and requests an I/O operation on them, either a %READ or a %WRITE.
2818  * The third %READA option is described in the documentation for
2819  * generic_make_request() which ll_rw_block() calls.
2820  *
2821  * This function drops any buffer that it cannot get a lock on (with the
2822  * BH_Lock state bit), any buffer that appears to be clean when doing a
2823  * write request, and any buffer that appears to be up-to-date when doing
2824  * read request.  Further it marks as clean buffers that are processed for
2825  * writing (the buffer cache won't assume that they are actually clean until
2826  * the buffer gets unlocked).
2827  *
2828  * ll_rw_block sets b_end_io to simple completion handler that marks
2829  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2830  * any waiters. 
2831  *
2832  * All of the buffers must be for the same device, and must also be a
2833  * multiple of the current approved size for the device.
2834  */
2835 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2836 {
2837         int i;
2838
2839         for (i = 0; i < nr; i++) {
2840                 struct buffer_head *bh = bhs[i];
2841
2842                 if (test_set_buffer_locked(bh))
2843                         continue;
2844
2845                 get_bh(bh);
2846                 if (rw == WRITE) {
2847                         if (test_clear_buffer_dirty(bh)) {
2848                                 bh->b_end_io = end_buffer_write_sync;
2849                                 submit_bh(WRITE, bh);
2850                                 continue;
2851                         }
2852                 } else {
2853                         if (!buffer_uptodate(bh)) {
2854                                 bh->b_end_io = end_buffer_read_sync;
2855                                 submit_bh(rw, bh);
2856                                 continue;
2857                         }
2858                 }
2859                 unlock_buffer(bh);
2860                 put_bh(bh);
2861         }
2862 }
2863
2864 /*
2865  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2866  * and then start new I/O and then wait upon it.  The caller must have a ref on
2867  * the buffer_head.
2868  */
2869 int sync_dirty_buffer(struct buffer_head *bh)
2870 {
2871         int ret = 0;
2872
2873         WARN_ON(atomic_read(&bh->b_count) < 1);
2874         lock_buffer(bh);
2875         if (test_clear_buffer_dirty(bh)) {
2876                 get_bh(bh);
2877                 bh->b_end_io = end_buffer_write_sync;
2878                 ret = submit_bh(WRITE, bh);
2879                 wait_on_buffer(bh);
2880                 if (buffer_eopnotsupp(bh)) {
2881                         clear_buffer_eopnotsupp(bh);
2882                         ret = -EOPNOTSUPP;
2883                 }
2884                 if (!ret && !buffer_uptodate(bh))
2885                         ret = -EIO;
2886         } else {
2887                 unlock_buffer(bh);
2888         }
2889         return ret;
2890 }
2891
2892 /*
2893  * try_to_free_buffers() checks if all the buffers on this particular page
2894  * are unused, and releases them if so.
2895  *
2896  * Exclusion against try_to_free_buffers may be obtained by either
2897  * locking the page or by holding its mapping's private_lock.
2898  *
2899  * If the page is dirty but all the buffers are clean then we need to
2900  * be sure to mark the page clean as well.  This is because the page
2901  * may be against a block device, and a later reattachment of buffers
2902  * to a dirty page will set *all* buffers dirty.  Which would corrupt
2903  * filesystem data on the same device.
2904  *
2905  * The same applies to regular filesystem pages: if all the buffers are
2906  * clean then we set the page clean and proceed.  To do that, we require
2907  * total exclusion from __set_page_dirty_buffers().  That is obtained with
2908  * private_lock.
2909  *
2910  * try_to_free_buffers() is non-blocking.
2911  */
2912 static inline int buffer_busy(struct buffer_head *bh)
2913 {
2914         return atomic_read(&bh->b_count) |
2915                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2916 }
2917
2918 static int
2919 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
2920 {
2921         struct buffer_head *head = page_buffers(page);
2922         struct buffer_head *bh;
2923
2924         bh = head;
2925         do {
2926                 if (buffer_write_io_error(bh) && page->mapping)
2927                         set_bit(AS_EIO, &page->mapping->flags);
2928                 if (buffer_busy(bh))
2929                         goto failed;
2930                 bh = bh->b_this_page;
2931         } while (bh != head);
2932
2933         do {
2934                 struct buffer_head *next = bh->b_this_page;
2935
2936                 if (!list_empty(&bh->b_assoc_buffers))
2937                         __remove_assoc_queue(bh);
2938                 bh = next;
2939         } while (bh != head);
2940         *buffers_to_free = head;
2941         __clear_page_buffers(page);
2942         return 1;
2943 failed:
2944         return 0;
2945 }
2946
2947 int try_to_free_buffers(struct page *page)
2948 {
2949         struct address_space * const mapping = page->mapping;
2950         struct buffer_head *buffers_to_free = NULL;
2951         int ret = 0;
2952
2953         BUG_ON(!PageLocked(page));
2954         if (PageWriteback(page))
2955                 return 0;
2956
2957         if (mapping == NULL) {          /* can this still happen? */
2958                 ret = drop_buffers(page, &buffers_to_free);
2959                 goto out;
2960         }
2961
2962         spin_lock(&mapping->private_lock);
2963         ret = drop_buffers(page, &buffers_to_free);
2964         if (ret) {
2965                 /*
2966                  * If the filesystem writes its buffers by hand (eg ext3)
2967                  * then we can have clean buffers against a dirty page.  We
2968                  * clean the page here; otherwise later reattachment of buffers
2969                  * could encounter a non-uptodate page, which is unresolvable.
2970                  * This only applies in the rare case where try_to_free_buffers
2971                  * succeeds but the page is not freed.
2972                  */
2973                 clear_page_dirty(page);
2974         }
2975         spin_unlock(&mapping->private_lock);
2976 out:
2977         if (buffers_to_free) {
2978                 struct buffer_head *bh = buffers_to_free;
2979
2980                 do {
2981                         struct buffer_head *next = bh->b_this_page;
2982                         free_buffer_head(bh);
2983                         bh = next;
2984                 } while (bh != buffers_to_free);
2985         }
2986         return ret;
2987 }
2988 EXPORT_SYMBOL(try_to_free_buffers);
2989
2990 int block_sync_page(struct page *page)
2991 {
2992         struct address_space *mapping;
2993
2994         smp_mb();
2995         mapping = page_mapping(page);
2996         if (mapping)
2997                 blk_run_backing_dev(mapping->backing_dev_info, page);
2998         return 0;
2999 }
3000
3001 /*
3002  * There are no bdflush tunables left.  But distributions are
3003  * still running obsolete flush daemons, so we terminate them here.
3004  *
3005  * Use of bdflush() is deprecated and will be removed in a future kernel.
3006  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3007  */
3008 asmlinkage long sys_bdflush(int func, long data)
3009 {
3010         static int msg_count;
3011
3012         if (!capable(CAP_SYS_ADMIN))
3013                 return -EPERM;
3014
3015         if (msg_count < 5) {
3016                 msg_count++;
3017                 printk(KERN_INFO
3018                         "warning: process `%s' used the obsolete bdflush"
3019                         " system call\n", current->comm);
3020                 printk(KERN_INFO "Fix your initscripts?\n");
3021         }
3022
3023         if (func == 1)
3024                 do_exit(0);
3025         return 0;
3026 }
3027
3028 /*
3029  * Buffer-head allocation
3030  */
3031 static kmem_cache_t *bh_cachep;
3032
3033 /*
3034  * Once the number of bh's in the machine exceeds this level, we start
3035  * stripping them in writeback.
3036  */
3037 static int max_buffer_heads;
3038
3039 int buffer_heads_over_limit;
3040
3041 struct bh_accounting {
3042         int nr;                 /* Number of live bh's */
3043         int ratelimit;          /* Limit cacheline bouncing */
3044 };
3045
3046 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3047
3048 static void recalc_bh_state(void)
3049 {
3050         int i;
3051         int tot = 0;
3052
3053         if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3054                 return;
3055         __get_cpu_var(bh_accounting).ratelimit = 0;
3056         for_each_cpu(i)
3057                 tot += per_cpu(bh_accounting, i).nr;
3058         buffer_heads_over_limit = (tot > max_buffer_heads);
3059 }
3060         
3061 struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
3062 {
3063         struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3064         if (ret) {
3065                 preempt_disable();
3066                 __get_cpu_var(bh_accounting).nr++;
3067                 recalc_bh_state();
3068                 preempt_enable();
3069         }
3070         return ret;
3071 }
3072 EXPORT_SYMBOL(alloc_buffer_head);
3073
3074 void free_buffer_head(struct buffer_head *bh)
3075 {
3076         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3077         kmem_cache_free(bh_cachep, bh);
3078         preempt_disable();
3079         __get_cpu_var(bh_accounting).nr--;
3080         recalc_bh_state();
3081         preempt_enable();
3082 }
3083 EXPORT_SYMBOL(free_buffer_head);
3084
3085 static void
3086 init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
3087 {
3088         if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
3089                             SLAB_CTOR_CONSTRUCTOR) {
3090                 struct buffer_head * bh = (struct buffer_head *)data;
3091
3092                 memset(bh, 0, sizeof(*bh));
3093                 INIT_LIST_HEAD(&bh->b_assoc_buffers);
3094         }
3095 }
3096
3097 #ifdef CONFIG_HOTPLUG_CPU
3098 static void buffer_exit_cpu(int cpu)
3099 {
3100         int i;
3101         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3102
3103         for (i = 0; i < BH_LRU_SIZE; i++) {
3104                 brelse(b->bhs[i]);
3105                 b->bhs[i] = NULL;
3106         }
3107 }
3108
3109 static int buffer_cpu_notify(struct notifier_block *self,
3110                               unsigned long action, void *hcpu)
3111 {
3112         if (action == CPU_DEAD)
3113                 buffer_exit_cpu((unsigned long)hcpu);
3114         return NOTIFY_OK;
3115 }
3116 #endif /* CONFIG_HOTPLUG_CPU */
3117
3118 void __init buffer_init(void)
3119 {
3120         int nrpages;
3121
3122         bh_cachep = kmem_cache_create("buffer_head",
3123                         sizeof(struct buffer_head), 0,
3124                         SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
3125
3126         /*
3127          * Limit the bh occupancy to 10% of ZONE_NORMAL
3128          */
3129         nrpages = (nr_free_buffer_pages() * 10) / 100;
3130         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3131         hotcpu_notifier(buffer_cpu_notify, 0);
3132 }
3133
3134 EXPORT_SYMBOL(__bforget);
3135 EXPORT_SYMBOL(__brelse);
3136 EXPORT_SYMBOL(__wait_on_buffer);
3137 EXPORT_SYMBOL(block_commit_write);
3138 EXPORT_SYMBOL(block_prepare_write);
3139 EXPORT_SYMBOL(block_read_full_page);
3140 EXPORT_SYMBOL(block_sync_page);
3141 EXPORT_SYMBOL(block_truncate_page);
3142 EXPORT_SYMBOL(block_write_full_page);
3143 EXPORT_SYMBOL(cont_prepare_write);
3144 EXPORT_SYMBOL(end_buffer_async_write);
3145 EXPORT_SYMBOL(end_buffer_read_sync);
3146 EXPORT_SYMBOL(end_buffer_write_sync);
3147 EXPORT_SYMBOL(file_fsync);
3148 EXPORT_SYMBOL(fsync_bdev);
3149 EXPORT_SYMBOL(generic_block_bmap);
3150 EXPORT_SYMBOL(generic_commit_write);
3151 EXPORT_SYMBOL(generic_cont_expand);
3152 EXPORT_SYMBOL(init_buffer);
3153 EXPORT_SYMBOL(invalidate_bdev);
3154 EXPORT_SYMBOL(ll_rw_block);
3155 EXPORT_SYMBOL(mark_buffer_dirty);
3156 EXPORT_SYMBOL(submit_bh);
3157 EXPORT_SYMBOL(sync_dirty_buffer);
3158 EXPORT_SYMBOL(unlock_buffer);