make fs/buffer.c:cont_expand_zero() static
[safe/jmp/linux-2.6] / fs / buffer.c
index 86e58b1..189efa4 100644 (file)
@@ -67,14 +67,14 @@ static int sync_buffer(void *word)
        return 0;
 }
 
-void fastcall __lock_buffer(struct buffer_head *bh)
+void __lock_buffer(struct buffer_head *bh)
 {
        wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_buffer);
 
-void fastcall unlock_buffer(struct buffer_head *bh)
+void unlock_buffer(struct buffer_head *bh)
 {
        smp_mb__before_clear_bit();
        clear_buffer_locked(bh);
@@ -360,16 +360,19 @@ void invalidate_bdev(struct block_device *bdev)
  */
 static void free_more_memory(void)
 {
-       struct zone **zones;
-       pg_data_t *pgdat;
+       struct zone *zone;
+       int nid;
 
        wakeup_pdflush(1024);
        yield();
 
-       for_each_online_pgdat(pgdat) {
-               zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
-               if (*zones)
-                       try_to_free_pages(zones, 0, GFP_NOFS);
+       for_each_online_node(nid) {
+               (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
+                                               gfp_zone(GFP_NOFS), NULL,
+                                               &zone);
+               if (zone)
+                       try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
+                                               GFP_NOFS);
        }
 }
 
@@ -627,8 +630,7 @@ repeat:
 }
 
 /**
- * sync_mapping_buffers - write out and wait upon a mapping's "associated"
- *                        buffers
+ * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
  * @mapping: the mapping which wants those buffers written
  *
  * Starts I/O against the buffers at mapping->private_list, and waits upon
@@ -678,7 +680,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
        } else {
                BUG_ON(mapping->assoc_mapping != buffer_mapping);
        }
-       if (list_empty(&bh->b_assoc_buffers)) {
+       if (!bh->b_assoc_map) {
                spin_lock(&buffer_mapping->private_lock);
                list_move_tail(&bh->b_assoc_buffers,
                                &mapping->private_list);
@@ -794,6 +796,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
+       struct address_space *mapping;
        int err = 0, err2;
 
        INIT_LIST_HEAD(&tmp);
@@ -801,9 +804,14 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
        spin_lock(lock);
        while (!list_empty(list)) {
                bh = BH_ENTRY(list->next);
+               mapping = bh->b_assoc_map;
                __remove_assoc_queue(bh);
+               /* Avoid race with mark_buffer_dirty_inode() which does
+                * a lockless check and we rely on seeing the dirty bit */
+               smp_mb();
                if (buffer_dirty(bh) || buffer_locked(bh)) {
                        list_add(&bh->b_assoc_buffers, &tmp);
+                       bh->b_assoc_map = mapping;
                        if (buffer_dirty(bh)) {
                                get_bh(bh);
                                spin_unlock(lock);
@@ -822,8 +830,17 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 
        while (!list_empty(&tmp)) {
                bh = BH_ENTRY(tmp.prev);
-               list_del_init(&bh->b_assoc_buffers);
                get_bh(bh);
+               mapping = bh->b_assoc_map;
+               __remove_assoc_queue(bh);
+               /* Avoid race with mark_buffer_dirty_inode() which does
+                * a lockless check and we rely on seeing the dirty bit */
+               smp_mb();
+               if (buffer_dirty(bh)) {
+                       list_add(&bh->b_assoc_buffers,
+                                &mapping->private_list);
+                       bh->b_assoc_map = mapping;
+               }
                spin_unlock(lock);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh))
@@ -1164,10 +1181,23 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
  * mapping->tree_lock and the global inode_lock.
  */
-void fastcall mark_buffer_dirty(struct buffer_head *bh)
+void mark_buffer_dirty(struct buffer_head *bh)
 {
        WARN_ON_ONCE(!buffer_uptodate(bh));
-       if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
+
+       /*
+        * Very *carefully* optimize the it-is-already-dirty case.
+        *
+        * Don't let the final "is it dirty" escape to before we
+        * perhaps modified the buffer.
+        */
+       if (buffer_dirty(bh)) {
+               smp_mb();
+               if (buffer_dirty(bh))
+                       return;
+       }
+
+       if (!test_set_buffer_dirty(bh))
                __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
 }
 
@@ -1195,7 +1225,7 @@ void __brelse(struct buffer_head * buf)
 void __bforget(struct buffer_head *bh)
 {
        clear_buffer_dirty(bh);
-       if (!list_empty(&bh->b_assoc_buffers)) {
+       if (bh->b_assoc_map) {
                struct address_space *buffer_mapping = bh->b_page->mapping;
 
                spin_lock(&buffer_mapping->private_lock);
@@ -1436,6 +1466,7 @@ void invalidate_bh_lrus(void)
 {
        on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
 }
+EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
 
 void set_bh_page(struct buffer_head *bh,
                struct page *page, unsigned long offset)
@@ -1730,7 +1761,6 @@ done:
                 * The page and buffer_heads can be released at any time from
                 * here on.
                 */
-               wbc->pages_skipped++;   /* We didn't write this page */
        }
        return err;
 
@@ -1799,7 +1829,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
                                        start = max(from, block_start);
                                        size = min(to, block_end) - start;
 
-                                       zero_user_page(page, start, size, KM_USER0);
+                                       zero_user(page, start, size);
                                        set_buffer_uptodate(bh);
                                }
 
@@ -1862,19 +1892,10 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                                        mark_buffer_dirty(bh);
                                        continue;
                                }
-                               if (block_end > to || block_start < from) {
-                                       void *kaddr;
-
-                                       kaddr = kmap_atomic(page, KM_USER0);
-                                       if (block_end > to)
-                                               memset(kaddr+to, 0,
-                                                       block_end-to);
-                                       if (block_start < from)
-                                               memset(kaddr+block_start,
-                                                       0, from-block_start);
-                                       flush_dcache_page(page);
-                                       kunmap_atomic(kaddr, KM_USER0);
-                               }
+                               if (block_end > to || block_start < from)
+                                       zero_user_segments(page,
+                                               to, block_end,
+                                               block_start, from);
                                continue;
                        }
                }
@@ -2105,8 +2126,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
-                               zero_user_page(page, i * blocksize, blocksize,
-                                               KM_USER0);
+                               zero_user(page, i * blocksize, blocksize);
                                if (!err)
                                        set_buffer_uptodate(bh);
                                continue;
@@ -2191,8 +2211,8 @@ out:
        return err;
 }
 
-int cont_expand_zero(struct file *file, struct address_space *mapping,
-                       loff_t pos, loff_t *bytes)
+static int cont_expand_zero(struct file *file, struct address_space *mapping,
+                           loff_t pos, loff_t *bytes)
 {
        struct inode *inode = mapping->host;
        unsigned blocksize = 1 << inode->i_blkbits;
@@ -2219,13 +2239,15 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
                                                &page, &fsdata);
                if (err)
                        goto out;
-               zero_user_page(page, zerofrom, len, KM_USER0);
+               zero_user(page, zerofrom, len);
                err = pagecache_write_end(file, mapping, curpos, len, len,
                                                page, fsdata);
                if (err < 0)
                        goto out;
                BUG_ON(err != len);
                err = 0;
+
+               balance_dirty_pages_ratelimited(mapping);
        }
 
        /* page covers the boundary, find the boundary offset */
@@ -2246,7 +2268,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
                                                &page, &fsdata);
                if (err)
                        goto out;
-               zero_user_page(page, zerofrom, len, KM_USER0);
+               zero_user(page, zerofrom, len);
                err = pagecache_write_end(file, mapping, curpos, len, len,
                                                page, fsdata);
                if (err < 0)
@@ -2306,23 +2328,6 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
        return 0;
 }
 
-int generic_commit_write(struct file *file, struct page *page,
-               unsigned from, unsigned to)
-{
-       struct inode *inode = page->mapping->host;
-       loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-       __block_commit_write(inode,page,from,to);
-       /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_mutex.
-        */
-       if (pos > inode->i_size) {
-               i_size_write(inode, pos);
-               mark_inode_dirty(inode);
-       }
-       return 0;
-}
-
 /*
  * block_page_mkwrite() is not allowed to change the file size as it gets
  * called from a page fault handler when a page is first dirtied. Hence we must
@@ -2423,7 +2428,6 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
        unsigned block_in_page;
        unsigned block_start, block_end;
        sector_t block_in_file;
-       char *kaddr;
        int nr_reads = 0;
        int ret = 0;
        int is_mapped_to_disk = 1;
@@ -2494,13 +2498,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
                        continue;
                }
                if (buffer_new(bh) || !buffer_mapped(bh)) {
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       if (block_start < from)
-                               memset(kaddr+block_start, 0, from-block_start);
-                       if (block_end > to)
-                               memset(kaddr + to, 0, block_end - to);
-                       flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_USER0);
+                       zero_user_segments(page, block_start, from,
+                                                       to, block_end);
                        continue;
                }
                if (buffer_uptodate(bh))
@@ -2564,16 +2563,15 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
                        struct page *page, void *fsdata)
 {
        struct inode *inode = page->mapping->host;
-       struct buffer_head *head = NULL;
+       struct buffer_head *head = fsdata;
        struct buffer_head *bh;
+       BUG_ON(fsdata != NULL && page_has_buffers(page));
 
-       if (!PageMappedToDisk(page)) {
-               if (unlikely(copied < len) && !page_has_buffers(page))
-                       attach_nobh_buffers(page, head);
-               if (page_has_buffers(page))
-                       return generic_write_end(file, mapping, pos, len,
-                                               copied, page, fsdata);
-       }
+       if (unlikely(copied < len) && !page_has_buffers(page))
+               attach_nobh_buffers(page, head);
+       if (page_has_buffers(page))
+               return generic_write_end(file, mapping, pos, len,
+                                       copied, page, fsdata);
 
        SetPageUptodate(page);
        set_page_dirty(page);
@@ -2585,7 +2583,6 @@ int nobh_write_end(struct file *file, struct address_space *mapping,
        unlock_page(page);
        page_cache_release(page);
 
-       head = fsdata;
        while (head) {
                bh = head;
                head = head->b_this_page;
@@ -2638,7 +2635,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
@@ -2711,7 +2708,7 @@ has_buffers:
                if (page_has_buffers(page))
                        goto has_buffers;
        }
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
        set_page_dirty(page);
        err = 0;
 
@@ -2787,7 +2784,7 @@ int block_truncate_page(struct address_space *mapping,
                        goto unlock;
        }
 
-       zero_user_page(page, offset, length, KM_USER0);
+       zero_user(page, offset, length);
        mark_buffer_dirty(bh);
        err = 0;
 
@@ -2833,7 +2830,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
-       zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
        return __block_write_full_page(inode, page, get_block, wbc);
 }
 
@@ -3039,7 +3036,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
        do {
                struct buffer_head *next = bh->b_this_page;
 
-               if (!list_empty(&bh->b_assoc_buffers))
+               if (bh->b_assoc_map)
                        __remove_assoc_queue(bh);
                bh = next;
        } while (bh != head);
@@ -3171,8 +3168,7 @@ static void recalc_bh_state(void)
        
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_zalloc(bh_cachep,
-                               set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
+       struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
                get_cpu_var(bh_accounting).nr++;
@@ -3215,12 +3211,68 @@ static int buffer_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
+/**
+ * bh_uptodate_or_lock - Test whether the buffer is uptodate
+ * @bh: struct buffer_head
+ *
+ * Return true if the buffer is up-to-date and false,
+ * with the buffer locked, if not.
+ */
+int bh_uptodate_or_lock(struct buffer_head *bh)
+{
+       if (!buffer_uptodate(bh)) {
+               lock_buffer(bh);
+               if (!buffer_uptodate(bh))
+                       return 0;
+               unlock_buffer(bh);
+       }
+       return 1;
+}
+EXPORT_SYMBOL(bh_uptodate_or_lock);
+
+/**
+ * bh_submit_read - Submit a locked buffer for reading
+ * @bh: struct buffer_head
+ *
+ * Returns zero on success and -EIO on error.
+ */
+int bh_submit_read(struct buffer_head *bh)
+{
+       BUG_ON(!buffer_locked(bh));
+
+       if (buffer_uptodate(bh)) {
+               unlock_buffer(bh);
+               return 0;
+       }
+
+       get_bh(bh);
+       bh->b_end_io = end_buffer_read_sync;
+       submit_bh(READ, bh);
+       wait_on_buffer(bh);
+       if (buffer_uptodate(bh))
+               return 0;
+       return -EIO;
+}
+EXPORT_SYMBOL(bh_submit_read);
+
+static void
+init_buffer_head(struct kmem_cache *cachep, void *data)
+{
+       struct buffer_head *bh = data;
+
+       memset(bh, 0, sizeof(*bh));
+       INIT_LIST_HEAD(&bh->b_assoc_buffers);
+}
+
 void __init buffer_init(void)
 {
        int nrpages;
 
-       bh_cachep = KMEM_CACHE(buffer_head,
-                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
+       bh_cachep = kmem_cache_create("buffer_head",
+                       sizeof(struct buffer_head), 0,
+                               (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+                               SLAB_MEM_SPREAD),
+                               init_buffer_head);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL
@@ -3246,7 +3298,6 @@ EXPORT_SYMBOL(end_buffer_write_sync);
 EXPORT_SYMBOL(file_fsync);
 EXPORT_SYMBOL(fsync_bdev);
 EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_commit_write);
 EXPORT_SYMBOL(generic_cont_expand_simple);
 EXPORT_SYMBOL(init_buffer);
 EXPORT_SYMBOL(invalidate_bdev);