WorkStruct: make allyesconfig
[safe/jmp/linux-2.6] / fs / xfs / linux-2.6 / xfs_aops.c
index e998009..8e6b56f 100644 (file)
@@ -21,7 +21,6 @@
 #include "xfs_inum.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir.h"
 #include "xfs_dir2.h"
 #include "xfs_trans.h"
 #include "xfs_dmapi.h"
@@ -29,7 +28,6 @@
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
 #include "xfs_dir2_sf.h"
 #include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 
-STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
+STATIC void
+xfs_count_page_state(
+       struct page             *page,
+       int                     *delalloc,
+       int                     *unmapped,
+       int                     *unwritten)
+{
+       struct buffer_head      *bh, *head;
+
+       *delalloc = *unmapped = *unwritten = 0;
+
+       bh = head = page_buffers(page);
+       do {
+               if (buffer_uptodate(bh) && !buffer_mapped(bh))
+                       (*unmapped) = 1;
+               else if (buffer_unwritten(bh) && !buffer_delay(bh))
+                       clear_buffer_unwritten(bh);
+               else if (buffer_unwritten(bh))
+                       (*unwritten) = 1;
+               else if (buffer_delay(bh))
+                       (*delalloc) = 1;
+       } while ((bh = bh->b_this_page) != head);
+}
 
 #if defined(XFS_RW_TRACE)
 void
@@ -51,11 +71,10 @@ xfs_page_trace(
        int             tag,
        struct inode    *inode,
        struct page     *page,
-       int             mask)
+       unsigned long   pgoff)
 {
        xfs_inode_t     *ip;
-       bhv_desc_t      *bdp;
-       vnode_t         *vp = LINVFS_GET_VP(inode);
+       bhv_vnode_t     *vp = vn_from_inode(inode);
        loff_t          isize = i_size_read(inode);
        loff_t          offset = page_offset(page);
        int             delalloc = -1, unmapped = -1, unwritten = -1;
@@ -63,8 +82,7 @@ xfs_page_trace(
        if (page_has_buffers(page))
                xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
 
-       bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
-       ip = XFS_BHVTOI(bdp);
+       ip = xfs_vtoi(vp);
        if (!ip->i_rwtrace)
                return;
 
@@ -73,7 +91,7 @@ xfs_page_trace(
                (void *)ip,
                (void *)inode,
                (void *)page,
-               (void *)((unsigned long)mask),
+               (void *)pgoff,
                (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
                (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
                (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
@@ -83,11 +101,11 @@ xfs_page_trace(
                (void *)((unsigned long)delalloc),
                (void *)((unsigned long)unmapped),
                (void *)((unsigned long)unwritten),
-               (void *)NULL,
+               (void *)((unsigned long)current_pid()),
                (void *)NULL);
 }
 #else
-#define xfs_page_trace(tag, inode, page, mask)
+#define xfs_page_trace(tag, inode, page, pgoff)
 #endif
 
 /*
@@ -116,9 +134,10 @@ xfs_destroy_ioend(
 
        for (bh = ioend->io_buffer_head; bh; bh = next) {
                next = bh->b_private;
-               bh->b_end_io(bh, ioend->io_uptodate);
+               bh->b_end_io(bh, !ioend->io_error);
        }
-
+       if (unlikely(ioend->io_error))
+               vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
        vn_iowake(ioend->io_vnode);
        mempool_free(ioend, xfs_ioend_pool);
 }
@@ -130,9 +149,10 @@ xfs_destroy_ioend(
  */
 STATIC void
 xfs_end_bio_delalloc(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
 
        xfs_destroy_ioend(ioend);
 }
@@ -142,9 +162,10 @@ xfs_end_bio_delalloc(
  */
 STATIC void
 xfs_end_bio_written(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
 
        xfs_destroy_ioend(ioend);
 }
@@ -157,16 +178,16 @@ xfs_end_bio_written(
  */
 STATIC void
 xfs_end_bio_unwritten(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
-       vnode_t                 *vp = ioend->io_vnode;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
+       bhv_vnode_t             *vp = ioend->io_vnode;
        xfs_off_t               offset = ioend->io_offset;
        size_t                  size = ioend->io_size;
-       int                     error;
 
-       if (ioend->io_uptodate)
-               VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
+       if (likely(!ioend->io_error))
+               bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL);
        xfs_destroy_ioend(ioend);
 }
 
@@ -191,10 +212,10 @@ xfs_alloc_ioend(
         * all the I/O from calling the completion routine too early.
         */
        atomic_set(&ioend->io_remaining, 1);
-       ioend->io_uptodate = 1; /* cleared if any I/O fails */
+       ioend->io_error = 0;
        ioend->io_list = NULL;
        ioend->io_type = type;
-       ioend->io_vnode = LINVFS_GET_VP(inode);
+       ioend->io_vnode = vn_from_inode(inode);
        ioend->io_buffer_head = NULL;
        ioend->io_buffer_tail = NULL;
        atomic_inc(&ioend->io_vnode->v_iocount);
@@ -202,11 +223,11 @@ xfs_alloc_ioend(
        ioend->io_size = 0;
 
        if (type == IOMAP_UNWRITTEN)
-               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
        else if (type == IOMAP_DELAY)
-               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
        else
-               INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_written);
 
        return ioend;
 }
@@ -219,38 +240,22 @@ xfs_map_blocks(
        xfs_iomap_t             *mapp,
        int                     flags)
 {
-       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       bhv_vnode_t             *vp = vn_from_inode(inode);
        int                     error, nmaps = 1;
 
-       VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
+       error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps);
        if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
                VMODIFY(vp);
        return -error;
 }
 
-/*
- * Finds the corresponding mapping in block @map array of the
- * given @offset within a @page.
- */
-STATIC xfs_iomap_t *
-xfs_offset_to_map(
-       struct page             *page,
+STATIC inline int
+xfs_iomap_valid(
        xfs_iomap_t             *iomapp,
-       unsigned long           offset)
+       loff_t                  offset)
 {
-       xfs_off_t               full_offset;    /* offset from start of file */
-
-       ASSERT(offset < PAGE_CACHE_SIZE);
-
-       full_offset = page->index;              /* NB: using 64bit number */
-       full_offset <<= PAGE_CACHE_SHIFT;       /* offset from file start */
-       full_offset += offset;                  /* offset from page start */
-
-       if (full_offset < iomapp->iomap_offset)
-               return NULL;
-       if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
-               return iomapp;
-       return NULL;
+       return offset >= iomapp->iomap_offset &&
+               offset < iomapp->iomap_offset + iomapp->iomap_bsize;
 }
 
 /*
@@ -267,16 +272,14 @@ xfs_end_bio(
        if (bio->bi_size)
                return 1;
 
-       ASSERT(ioend);
        ASSERT(atomic_read(&bio->bi_cnt) >= 1);
+       ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
 
        /* Toss bio and pass work off to an xfsdatad thread */
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               ioend->io_uptodate = 0;
        bio->bi_private = NULL;
        bio->bi_end_io = NULL;
-
        bio_put(bio);
+
        xfs_finish_ioend(ioend);
        return 0;
 }
@@ -354,24 +357,47 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
 }
 
 /*
- * Submit all of the bios for all of the ioends we have saved up,
- * covering the initial writepage page and also any probed pages.
+ * Submit all of the bios for all of the ioends we have saved up, covering the
+ * initial writepage page and also any probed pages.
+ *
+ * Because we may have multiple ioends spanning a page, we need to start
+ * writeback on all the buffers before we submit them for I/O. If we mark the
+ * buffers as we got, then we can end up with a page that only has buffers
+ * marked async write and I/O complete on can occur before we mark the other
+ * buffers async write.
+ *
+ * The end result of this is that we trip a bug in end_page_writeback() because
+ * we call it twice for the one page as the code in end_buffer_async_write()
+ * assumes that all buffers on the page are started at the same time.
+ *
+ * The fix is two passes across the ioend list - one to start writeback on the
+ * buffer_heads, and then submit them for I/O on the second pass.
  */
 STATIC void
 xfs_submit_ioend(
        xfs_ioend_t             *ioend)
 {
+       xfs_ioend_t             *head = ioend;
        xfs_ioend_t             *next;
        struct buffer_head      *bh;
        struct bio              *bio;
        sector_t                lastblock = 0;
 
+       /* Pass 1 - start writeback */
+       do {
+               next = ioend->io_list;
+               for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+                       xfs_start_buffer_writeback(bh);
+               }
+       } while ((ioend = next) != NULL);
+
+       /* Pass 2 - submit I/O */
+       ioend = head;
        do {
                next = ioend->io_list;
                bio = NULL;
 
                for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
-                       xfs_start_buffer_writeback(bh);
 
                        if (!bio) {
  retry:
@@ -430,7 +456,7 @@ STATIC void
 xfs_add_to_ioend(
        struct inode            *inode,
        struct buffer_head      *bh,
-       unsigned int            p_offset,
+       xfs_off_t               offset,
        unsigned int            type,
        xfs_ioend_t             **result,
        int                     need_ioend)
@@ -439,10 +465,7 @@ xfs_add_to_ioend(
 
        if (!ioend || need_ioend || type != ioend->io_type) {
                xfs_ioend_t     *previous = *result;
-               xfs_off_t       offset;
 
-               offset = (xfs_off_t)bh->b_page->index << PAGE_CACHE_SHIFT;
-               offset += p_offset;
                ioend = xfs_alloc_ioend(inode, type);
                ioend->io_offset = offset;
                ioend->io_buffer_head = bh;
@@ -460,36 +483,37 @@ xfs_add_to_ioend(
 }
 
 STATIC void
+xfs_map_buffer(
+       struct buffer_head      *bh,
+       xfs_iomap_t             *mp,
+       xfs_off_t               offset,
+       uint                    block_bits)
+{
+       sector_t                bn;
+
+       ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
+
+       bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
+             ((offset - mp->iomap_offset) >> block_bits);
+
+       ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
+
+       bh->b_blocknr = bn;
+       set_buffer_mapped(bh);
+}
+
+STATIC void
 xfs_map_at_offset(
-       struct page             *page,
        struct buffer_head      *bh,
-       unsigned long           offset,
+       loff_t                  offset,
        int                     block_bits,
-       xfs_iomap_t             *iomapp,
-       xfs_ioend_t             *ioend)
+       xfs_iomap_t             *iomapp)
 {
-       xfs_daddr_t             bn;
-       xfs_off_t               delta;
-       int                     sector_shift;
-
        ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
        ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
-       ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
-
-       delta = page->index;
-       delta <<= PAGE_CACHE_SHIFT;
-       delta += offset;
-       delta -= iomapp->iomap_offset;
-       delta >>= block_bits;
-
-       sector_shift = block_bits - BBSHIFT;
-       bn = iomapp->iomap_bn >> sector_shift;
-       bn += delta;
-       BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
-       ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
 
        lock_buffer(bh);
-       bh->b_blocknr = bn;
+       xfs_map_buffer(bh, iomapp, offset, block_bits);
        bh->b_bdev = iomapp->iomap_target->bt_bdev;
        set_buffer_mapped(bh);
        clear_buffer_delay(bh);
@@ -497,13 +521,13 @@ xfs_map_at_offset(
 }
 
 /*
- * Look for a page at index which is unlocked and not mapped
- * yet - clustering for mmap write case.
+ * Look for a page at index that is suitable for clustering.
  */
 STATIC unsigned int
-xfs_probe_unmapped_page(
+xfs_probe_page(
        struct page             *page,
-       unsigned int            pg_offset)
+       unsigned int            pg_offset,
+       int                     mapped)
 {
        int                     ret = 0;
 
@@ -516,25 +540,28 @@ xfs_probe_unmapped_page(
 
                        bh = head = page_buffers(page);
                        do {
-                               if (buffer_mapped(bh) || !buffer_uptodate(bh))
+                               if (!buffer_uptodate(bh))
+                                       break;
+                               if (mapped != buffer_mapped(bh))
                                        break;
                                ret += bh->b_size;
                                if (ret >= pg_offset)
                                        break;
                        } while ((bh = bh->b_this_page) != head);
                } else
-                       ret = PAGE_CACHE_SIZE;
+                       ret = mapped ? 0 : PAGE_CACHE_SIZE;
        }
 
        return ret;
 }
 
 STATIC size_t
-xfs_probe_unmapped_cluster(
+xfs_probe_cluster(
        struct inode            *inode,
        struct page             *startpage,
        struct buffer_head      *bh,
-       struct buffer_head      *head)
+       struct buffer_head      *head,
+       int                     mapped)
 {
        struct pagevec          pvec;
        pgoff_t                 tindex, tlast, tloff;
@@ -543,7 +570,7 @@ xfs_probe_unmapped_cluster(
 
        /* First sum forwards in this page */
        do {
-               if (buffer_mapped(bh))
+               if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
                        return total;
                total += bh->b_size;
        } while ((bh = bh->b_this_page) != head);
@@ -569,13 +596,15 @@ xfs_probe_unmapped_cluster(
                        if (tindex == tlast) {
                                pg_offset =
                                    i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
-                               if (!pg_offset)
+                               if (!pg_offset) {
+                                       done = 1;
                                        break;
+                               }
                        } else
                                pg_offset = PAGE_CACHE_SIZE;
 
                        if (page->index == tindex && !TestSetPageLocked(page)) {
-                               len = xfs_probe_unmapped_page(page, pg_offset);
+                               len = xfs_probe_page(page, pg_offset, mapped);
                                unlock_page(page);
                        }
 
@@ -585,6 +614,7 @@ xfs_probe_unmapped_cluster(
                        }
 
                        total += len;
+                       tindex++;
                }
 
                pagevec_release(&pvec);
@@ -616,6 +646,8 @@ xfs_is_delayed_page(
                                acceptable = (type == IOMAP_UNWRITTEN);
                        else if (buffer_delay(bh))
                                acceptable = (type == IOMAP_DELAY);
+                       else if (buffer_dirty(bh) && buffer_mapped(bh))
+                               acceptable = (type == 0);
                        else
                                break;
                } while ((bh = bh->b_this_page) != head);
@@ -638,19 +670,20 @@ xfs_convert_page(
        struct inode            *inode,
        struct page             *page,
        loff_t                  tindex,
-       xfs_iomap_t             *iomapp,
+       xfs_iomap_t             *mp,
        xfs_ioend_t             **ioendp,
        struct writeback_control *wbc,
        int                     startio,
        int                     all_bh)
 {
        struct buffer_head      *bh, *head;
-       xfs_iomap_t             *mp = iomapp, *tmp;
-       unsigned long           p_offset, end_offset;
+       xfs_off_t               end_offset;
+       unsigned long           p_offset;
        unsigned int            type;
        int                     bbits = inode->i_blkbits;
        int                     len, page_dirty;
        int                     count = 0, done = 0, uptodate = 1;
+       xfs_off_t               offset = page_offset(page);
 
        if (page->index != tindex)
                goto fail;
@@ -663,21 +696,32 @@ xfs_convert_page(
        if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
                goto fail_unlock_page;
 
-       end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
-
        /*
         * page_dirty is initially a count of buffers on the page before
-        * EOF and is decrememted as we move each into a cleanable state.
+        * EOF and is decremented as we move each into a cleanable state.
+        *
+        * Derivation:
+        *
+        * End offset is the highest offset that this page should represent.
+        * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
+        * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
+        * hence give us the correct page_dirty count. On any other page,
+        * it will be zero and in that case we need page_dirty to be the
+        * count of buffers on the page.
         */
+       end_offset = min_t(unsigned long long,
+                       (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
+                       i_size_read(inode));
+
        len = 1 << inode->i_blkbits;
-       end_offset = max(end_offset, PAGE_CACHE_SIZE);
-       end_offset = roundup(end_offset, len);
-       page_dirty = end_offset / len;
+       p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
+                                       PAGE_CACHE_SIZE);
+       p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
+       page_dirty = p_offset / len;
 
-       p_offset = 0;
        bh = head = page_buffers(page);
        do {
-               if (p_offset >= end_offset)
+               if (offset >= end_offset)
                        break;
                if (!buffer_uptodate(bh))
                        uptodate = 0;
@@ -686,50 +730,61 @@ xfs_convert_page(
                        continue;
                }
 
-               if (buffer_unwritten(bh))
-                       type = IOMAP_UNWRITTEN;
-               else if (buffer_delay(bh))
-                       type = IOMAP_DELAY;
-               else {
-                       type = 0;
-                       if (!(buffer_mapped(bh) && all_bh && startio)) {
+               if (buffer_unwritten(bh) || buffer_delay(bh)) {
+                       if (buffer_unwritten(bh))
+                               type = IOMAP_UNWRITTEN;
+                       else
+                               type = IOMAP_DELAY;
+
+                       if (!xfs_iomap_valid(mp, offset)) {
                                done = 1;
-                       } else if (startio) {
+                               continue;
+                       }
+
+                       ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
+                       ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
+
+                       xfs_map_at_offset(bh, offset, bbits, mp);
+                       if (startio) {
+                               xfs_add_to_ioend(inode, bh, offset,
+                                               type, ioendp, done);
+                       } else {
+                               set_buffer_dirty(bh);
+                               unlock_buffer(bh);
+                               mark_buffer_dirty(bh);
+                       }
+                       page_dirty--;
+                       count++;
+               } else {
+                       type = 0;
+                       if (buffer_mapped(bh) && all_bh && startio) {
                                lock_buffer(bh);
-                               xfs_add_to_ioend(inode, bh, p_offset,
+                               xfs_add_to_ioend(inode, bh, offset,
                                                type, ioendp, done);
                                count++;
                                page_dirty--;
+                       } else {
+                               done = 1;
                        }
-                       continue;
-               }
-               tmp = xfs_offset_to_map(page, mp, p_offset);
-               if (!tmp) {
-                       done = 1;
-                       continue;
                }
-               ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
-               ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
-
-               xfs_map_at_offset(page, bh, p_offset, bbits, tmp, *ioendp);
-               if (startio) {
-                       xfs_add_to_ioend(inode, bh, p_offset,
-                                       type, ioendp, done);
-                       count++;
-               } else {
-                       set_buffer_dirty(bh);
-                       unlock_buffer(bh);
-                       mark_buffer_dirty(bh);
-               }
-               page_dirty--;
-       } while (p_offset += len, (bh = bh->b_this_page) != head);
+       } while (offset += len, (bh = bh->b_this_page) != head);
 
        if (uptodate && bh == head)
                SetPageUptodate(page);
 
        if (startio) {
-               if (count)
+               if (count) {
+                       struct backing_dev_info *bdi;
+
+                       bdi = inode->i_mapping->backing_dev_info;
                        wbc->nr_to_write--;
+                       if (bdi_write_congested(bdi)) {
+                               wbc->encountered_congestion = 1;
+                               done = 1;
+                       } else if (wbc->nr_to_write <= 0) {
+                               done = 1;
+                       }
+               }
                xfs_start_page_writeback(page, wbc, !page_dirty, count);
        }
 
@@ -786,7 +841,7 @@ xfs_cluster_write(
  * page if possible.
  * The bh->b_state's cannot know if any of the blocks or which block for
  * that matter are dirty due to mmap writes, and therefore bh uptodate is
- * only vaild if the page itself isn't completely uptodate.  Some layers
+ * only valid if the page itself isn't completely uptodate.  Some layers
  * may clear the page dirty flag prior to calling write page, under the
  * assumption the entire page will be written out; by not writing out the
  * whole page the page can be reused before all valid dirty data is
@@ -805,20 +860,23 @@ xfs_page_state_convert(
        int             unmapped) /* also implies page uptodate */
 {
        struct buffer_head      *bh, *head;
-       xfs_iomap_t             *iomp, iomap;
+       xfs_iomap_t             iomap;
        xfs_ioend_t             *ioend = NULL, *iohead = NULL;
        loff_t                  offset;
        unsigned long           p_offset = 0;
        unsigned int            type;
        __uint64_t              end_offset;
        pgoff_t                 end_index, last_index, tlast;
-       int                     flags, len, err, done = 1;
-       int                     uptodate = 1;
-       int                     page_dirty, count = 0, trylock_flag = 0;
+       ssize_t                 size, len;
+       int                     flags, err, iomap_valid = 0, uptodate = 1;
+       int                     page_dirty, count = 0;
+       int                     trylock = 0;
+       int                     all_bh = unmapped;
 
-       /* wait for other IO threads? */
-       if (startio && wbc->sync_mode != WB_SYNC_NONE)
-               trylock_flag |= BMAPI_TRYLOCK;
+       if (startio) {
+               if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
+                       trylock |= BMAPI_TRYLOCK;
+       }
 
        /* Is this page beyond the end of the file? */
        offset = i_size_read(inode);
@@ -835,7 +893,7 @@ xfs_page_state_convert(
 
        /*
         * page_dirty is initially a count of buffers on the page before
-        * EOF and is decrememted as we move each into a cleanable state.
+        * EOF and is decremented as we move each into a cleanable state.
         *
         * Derivation:
         *
@@ -854,11 +912,11 @@ xfs_page_state_convert(
        p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
        page_dirty = p_offset / len;
 
-       iomp = NULL;
        bh = head = page_buffers(page);
        offset = page_offset(page);
+       flags = -1;
+       type = 0;
 
-       /* TODO: fix up "done" variable and iomap pointer (boolean) */
        /* TODO: cleanup count and page_dirty */
 
        do {
@@ -867,14 +925,16 @@ xfs_page_state_convert(
                if (!buffer_uptodate(bh))
                        uptodate = 0;
                if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
-                       done = 1;
+                       /*
+                        * the iomap is actually still valid, but the ioend
+                        * isn't.  shouldn't happen too often.
+                        */
+                       iomap_valid = 0;
                        continue;
                }
 
-               if (iomp) {
-                       iomp = xfs_offset_to_map(page, &iomap, p_offset);
-                       done = (iomp == NULL);
-               }
+               if (iomap_valid)
+                       iomap_valid = xfs_iomap_valid(&iomap, offset);
 
                /*
                 * First case, map an unwritten extent and prepare for
@@ -882,34 +942,51 @@ xfs_page_state_convert(
                 *
                 * Second case, allocate space for a delalloc buffer.
                 * We can return EAGAIN here in the release page case.
-                */
-               if (buffer_unwritten(bh) || buffer_delay(bh)) {
+                *
+                * Third case, an unmapped buffer was found, and we are
+                * in a path where we need to write the whole page out.
+                */
+               if (buffer_unwritten(bh) || buffer_delay(bh) ||
+                   ((buffer_uptodate(bh) || PageUptodate(page)) &&
+                    !buffer_mapped(bh) && (unmapped || startio))) {
+                       /*
+                        * Make sure we don't use a read-only iomap
+                        */
+                       if (flags == BMAPI_READ)
+                               iomap_valid = 0;
+
                        if (buffer_unwritten(bh)) {
                                type = IOMAP_UNWRITTEN;
-                               flags = BMAPI_WRITE|BMAPI_IGNSTATE;
-                       } else {
+                               flags = BMAPI_WRITE | BMAPI_IGNSTATE;
+                       } else if (buffer_delay(bh)) {
                                type = IOMAP_DELAY;
-                               flags = BMAPI_ALLOCATE;
-                               if (!startio)
-                                       flags |= trylock_flag;
+                               flags = BMAPI_ALLOCATE | trylock;
+                       } else {
+                               type = IOMAP_NEW;
+                               flags = BMAPI_WRITE | BMAPI_MMAP;
                        }
 
-                       if (!iomp) {
-                               done = 1;
-                               err = xfs_map_blocks(inode, offset, len, &iomap,
-                                               flags);
+                       if (!iomap_valid) {
+                               if (type == IOMAP_NEW) {
+                                       size = xfs_probe_cluster(inode,
+                                                       page, bh, head, 0);
+                               } else {
+                                       size = len;
+                               }
+
+                               err = xfs_map_blocks(inode, offset, size,
+                                               &iomap, flags);
                                if (err)
                                        goto error;
-                               iomp = xfs_offset_to_map(page, &iomap,
-                                                               p_offset);
-                               done = (iomp == NULL);
+                               iomap_valid = xfs_iomap_valid(&iomap, offset);
                        }
-                       if (iomp) {
-                               xfs_map_at_offset(page, bh, p_offset,
-                                               inode->i_blkbits, iomp, ioend);
+                       if (iomap_valid) {
+                               xfs_map_at_offset(bh, offset,
+                                               inode->i_blkbits, &iomap);
                                if (startio) {
-                                       xfs_add_to_ioend(inode, bh, p_offset,
-                                               type, &ioend, done);
+                                       xfs_add_to_ioend(inode, bh, offset,
+                                                       type, &ioend,
+                                                       !iomap_valid);
                                } else {
                                        set_buffer_dirty(bh);
                                        unlock_buffer(bh);
@@ -917,68 +994,39 @@ xfs_page_state_convert(
                                }
                                page_dirty--;
                                count++;
-                       } else {
-                               done = 1;
                        }
-               } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
-                          (unmapped || startio)) {
+               } else if (buffer_uptodate(bh) && startio) {
+                       /*
+                        * we got here because the buffer is already mapped.
+                        * That means it must already have extents allocated
+                        * underneath it. Map the extent by reading it.
+                        */
+                       if (!iomap_valid || type != 0) {
+                               flags = BMAPI_READ;
+                               size = xfs_probe_cluster(inode, page, bh,
+                                                               head, 1);
+                               err = xfs_map_blocks(inode, offset, size,
+                                               &iomap, flags);
+                               if (err)
+                                       goto error;
+                               iomap_valid = xfs_iomap_valid(&iomap, offset);
+                       }
 
                        type = 0;
-                       if (!buffer_mapped(bh)) {
-
-                               /*
-                                * Getting here implies an unmapped buffer
-                                * was found, and we are in a path where we
-                                * need to write the whole page out.
-                                */
-                               if (!iomp) {
-                                       int     size;
-
-                                       size = xfs_probe_unmapped_cluster(
-                                                       inode, page, bh, head);
-                                       err = xfs_map_blocks(inode, offset,
-                                                       size, &iomap,
-                                                       BMAPI_WRITE|BMAPI_MMAP);
-                                       if (err) {
-                                               goto error;
-                                       }
-                                       iomp = xfs_offset_to_map(page, &iomap,
-                                                                    p_offset);
-                                       done = (iomp == NULL);
-                               }
-                               if (iomp) {
-                                       xfs_map_at_offset(page, bh, p_offset,
-                                                       inode->i_blkbits, iomp,
-                                                       ioend);
-                                       if (startio) {
-                                               xfs_add_to_ioend(inode,
-                                                       bh, p_offset, type,
-                                                       &ioend, done);
-                                       } else {
-                                               set_buffer_dirty(bh);
-                                               unlock_buffer(bh);
-                                               mark_buffer_dirty(bh);
-                                       }
-                                       page_dirty--;
-                                       count++;
-                               } else {
-                                       done = 1;
-                               }
-                       } else if (startio) {
-                               if (buffer_uptodate(bh) &&
-                                   !test_and_set_bit(BH_Lock, &bh->b_state)) {
-                                       ASSERT(buffer_mapped(bh));
-                                       xfs_add_to_ioend(inode,
-                                                       bh, p_offset, type,
-                                                       &ioend, done);
-                                       page_dirty--;
-                                       count++;
-                               } else {
-                                       done = 1;
-                               }
+                       if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
+                               ASSERT(buffer_mapped(bh));
+                               if (iomap_valid)
+                                       all_bh = 1;
+                               xfs_add_to_ioend(inode, bh, offset, type,
+                                               &ioend, !iomap_valid);
+                               page_dirty--;
+                               count++;
                        } else {
-                               done = 1;
+                               iomap_valid = 0;
                        }
+               } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
+                          (unmapped || startio)) {
+                       iomap_valid = 0;
                }
 
                if (!iohead)
@@ -992,12 +1040,12 @@ xfs_page_state_convert(
        if (startio)
                xfs_start_page_writeback(page, wbc, 1, count);
 
-       if (ioend && iomp && !done) {
-               offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
+       if (ioend && iomap_valid) {
+               offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
                                        PAGE_CACHE_SHIFT;
                tlast = min_t(pgoff_t, offset, last_index);
-               xfs_cluster_write(inode, page->index + 1, iomp, &ioend,
-                                       wbc, startio, unmapped, tlast);
+               xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
+                                       wbc, startio, all_bh, tlast);
        }
 
        if (iohead)
@@ -1022,54 +1070,203 @@ error:
        return err;
 }
 
+/*
+ * writepage: Called from one of two places:
+ *
+ * 1. we are flushing a delalloc buffer head.
+ *
+ * 2. we are writing out a dirty page. Typically the page dirty
+ *    state is cleared before we get here. In this case is it
+ *    conceivable we have no buffer heads.
+ *
+ * For delalloc space on the page we need to allocate space and
+ * flush it. For unmapped buffer heads on the page we should
+ * allocate space if the page is uptodate. For any other dirty
+ * buffer heads on the page we should flush them.
+ *
+ * If we detect that a transaction would be required to flush
+ * the page, we have to check the process flags first, if we
+ * are already in a transaction or disk I/O during allocations
+ * is off, we need to fail the writepage and redirty the page.
+ */
+
+STATIC int
+xfs_vm_writepage(
+       struct page             *page,
+       struct writeback_control *wbc)
+{
+       int                     error;
+       int                     need_trans;
+       int                     delalloc, unmapped, unwritten;
+       struct inode            *inode = page->mapping->host;
+
+       xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
+
+       /*
+        * We need a transaction if:
+        *  1. There are delalloc buffers on the page
+        *  2. The page is uptodate and we have unmapped buffers
+        *  3. The page is uptodate and we have no buffers
+        *  4. There are unwritten buffers on the page
+        */
+
+       if (!page_has_buffers(page)) {
+               unmapped = 1;
+               need_trans = 1;
+       } else {
+               xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
+               if (!PageUptodate(page))
+                       unmapped = 0;
+               need_trans = delalloc + unmapped + unwritten;
+       }
+
+       /*
+        * If we need a transaction and the process flags say
+        * we are already in a transaction, or no IO is allowed
+        * then mark the page dirty again and leave the page
+        * as is.
+        */
+       if (current_test_flags(PF_FSTRANS) && need_trans)
+               goto out_fail;
+
+       /*
+        * Delay hooking up buffer heads until we have
+        * made our go/no-go decision.
+        */
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+
+       /*
+        * Convert delayed allocate, unwritten or unmapped space
+        * to real space and flush out to disk.
+        */
+       error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
+       if (error == -EAGAIN)
+               goto out_fail;
+       if (unlikely(error < 0))
+               goto out_unlock;
+
+       return 0;
+
+out_fail:
+       redirty_page_for_writepage(wbc, page);
+       unlock_page(page);
+       return 0;
+out_unlock:
+       unlock_page(page);
+       return error;
+}
+
+STATIC int
+xfs_vm_writepages(
+       struct address_space    *mapping,
+       struct writeback_control *wbc)
+{
+       struct bhv_vnode        *vp = vn_from_inode(mapping->host);
+
+       if (VN_TRUNC(vp))
+               VUNTRUNCATE(vp);
+       return generic_writepages(mapping, wbc);
+}
+
+/*
+ * Called to move a page into cleanable state - and from there
+ * to be released. Possibly the page is already clean. We always
+ * have buffer heads in this call.
+ *
+ * Returns 0 if the page is ok to release, 1 otherwise.
+ *
+ * Possible scenarios are:
+ *
+ * 1. We are being called to release a page which has been written
+ *    to via regular I/O. buffer heads will be dirty and possibly
+ *    delalloc. If no delalloc buffer heads in this case then we
+ *    can just return zero.
+ *
+ * 2. We are called to release a page which has been written via
+ *    mmap, all we need to do is ensure there is no delalloc
+ *    state in the buffer heads, if not we can let the caller
+ *    free them and we should come back later via writepage.
+ */
+STATIC int
+xfs_vm_releasepage(
+       struct page             *page,
+       gfp_t                   gfp_mask)
+{
+       struct inode            *inode = page->mapping->host;
+       int                     dirty, delalloc, unmapped, unwritten;
+       struct writeback_control wbc = {
+               .sync_mode = WB_SYNC_ALL,
+               .nr_to_write = 1,
+       };
+
+       xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
+
+       if (!page_has_buffers(page))
+               return 0;
+
+       xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
+       if (!delalloc && !unwritten)
+               goto free_buffers;
+
+       if (!(gfp_mask & __GFP_FS))
+               return 0;
+
+       /* If we are already inside a transaction or the thread cannot
+        * do I/O, we cannot release this page.
+        */
+       if (current_test_flags(PF_FSTRANS))
+               return 0;
+
+       /*
+        * Convert delalloc space to real space, do not flush the
+        * data out to disk, that will be done by the caller.
+        * Never need to allocate space here - we will always
+        * come back to writepage in that case.
+        */
+       dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
+       if (dirty == 0 && !unwritten)
+               goto free_buffers;
+       return 0;
+
+free_buffers:
+       return try_to_free_buffers(page);
+}
+
 STATIC int
-__linvfs_get_block(
+__xfs_get_blocks(
        struct inode            *inode,
        sector_t                iblock,
-       unsigned long           blocks,
        struct buffer_head      *bh_result,
        int                     create,
        int                     direct,
        bmapi_flags_t           flags)
 {
-       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       bhv_vnode_t             *vp = vn_from_inode(inode);
        xfs_iomap_t             iomap;
        xfs_off_t               offset;
        ssize_t                 size;
-       int                     retpbbm = 1;
+       int                     niomap = 1;
        int                     error;
 
        offset = (xfs_off_t)iblock << inode->i_blkbits;
-       if (blocks)
-               size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
-                                       (xfs_off_t)blocks << inode->i_blkbits);
-       else
-               size = 1 << inode->i_blkbits;
-
-       VOP_BMAP(vp, offset, size,
-               create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
+       ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
+       size = bh_result->b_size;
+       error = bhv_vop_bmap(vp, offset, size,
+                            create ? flags : BMAPI_READ, &iomap, &niomap);
        if (error)
                return -error;
-
-       if (retpbbm == 0)
+       if (niomap == 0)
                return 0;
 
        if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
-               xfs_daddr_t     bn;
-               xfs_off_t       delta;
-
-               /* For unwritten extents do not report a disk address on
+               /*
+                * For unwritten extents do not report a disk address on
                 * the read case (treat as if we're reading into a hole).
                 */
                if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
-                       delta = offset - iomap.iomap_offset;
-                       delta >>= inode->i_blkbits;
-
-                       bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
-                       bn += delta;
-                       BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
-                       bh_result->b_blocknr = bn;
-                       set_buffer_mapped(bh_result);
+                       xfs_map_buffer(bh_result, &iomap, offset,
+                                      inode->i_blkbits);
                }
                if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
                        if (direct)
@@ -1079,12 +1276,16 @@ __linvfs_get_block(
                }
        }
 
-       /* If this is a realtime file, data might be on a new device */
+       /*
+        * If this is a realtime file, data may be on a different device.
+        * to that pointed to from the buffer_head b_bdev currently.
+        */
        bh_result->b_bdev = iomap.iomap_target->bt_bdev;
 
-       /* If we previously allocated a block out beyond eof and
-        * we are now coming back to use it then we will need to
-        * flag it as new even if it has a disk address.
+       /*
+        * If we previously allocated a block out beyond eof and we are
+        * now coming back to use it then we will need to flag it as new
+        * even if it has a disk address.
         */
        if (create &&
            ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
@@ -1100,42 +1301,40 @@ __linvfs_get_block(
                }
        }
 
-       if (blocks) {
+       if (direct || size > (1 << inode->i_blkbits)) {
                ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
                offset = min_t(xfs_off_t,
-                               iomap.iomap_bsize - iomap.iomap_delta,
-                               (xfs_off_t)blocks << inode->i_blkbits);
-               bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
+                               iomap.iomap_bsize - iomap.iomap_delta, size);
+               bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
        }
 
        return 0;
 }
 
 int
-linvfs_get_block(
+xfs_get_blocks(
        struct inode            *inode,
        sector_t                iblock,
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return __linvfs_get_block(inode, iblock, 0, bh_result,
-                                       create, 0, BMAPI_WRITE);
+       return __xfs_get_blocks(inode, iblock,
+                               bh_result, create, 0, BMAPI_WRITE);
 }
 
 STATIC int
-linvfs_get_blocks_direct(
+xfs_get_blocks_direct(
        struct inode            *inode,
        sector_t                iblock,
-       unsigned long           max_blocks,
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
-                                       create, 1, BMAPI_WRITE|BMAPI_DIRECT);
+       return __xfs_get_blocks(inode, iblock,
+                               bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
 }
 
 STATIC void
-linvfs_end_io_direct(
+xfs_end_io_direct(
        struct kiocb    *iocb,
        loff_t          offset,
        ssize_t         size,
@@ -1146,9 +1345,9 @@ linvfs_end_io_direct(
        /*
         * Non-NULL private data means we need to issue a transaction to
         * convert a range from unwritten to written extents.  This needs
-        * to happen from process contect but aio+dio I/O completion
+        * to happen from process context but aio+dio I/O completion
         * happens from irq context so we need to defer it to a workqueue.
-        * This is not nessecary for synchronous direct I/O, but we do
+        * This is not necessary for synchronous direct I/O, but we do
         * it anyway to keep the code uniform and simpler.
         *
         * The core direct I/O code might be changed to always call the
@@ -1160,12 +1359,11 @@ linvfs_end_io_direct(
                ioend->io_size = size;
                xfs_finish_ioend(ioend);
        } else {
-               ASSERT(size >= 0);
                xfs_destroy_ioend(ioend);
        }
 
        /*
-        * blockdev_direct_IO can return an error even afer the I/O
+        * blockdev_direct_IO can return an error even after the I/O
         * completion handler was called.  Thus we need to protect
         * against double-freeing.
         */
@@ -1173,7 +1371,7 @@ linvfs_end_io_direct(
 }
 
 STATIC ssize_t
-linvfs_direct_IO(
+xfs_vm_direct_IO(
        int                     rw,
        struct kiocb            *iocb,
        const struct iovec      *iov,
@@ -1182,267 +1380,101 @@ linvfs_direct_IO(
 {
        struct file     *file = iocb->ki_filp;
        struct inode    *inode = file->f_mapping->host;
-       vnode_t         *vp = LINVFS_GET_VP(inode);
+       bhv_vnode_t     *vp = vn_from_inode(inode);
        xfs_iomap_t     iomap;
        int             maps = 1;
        int             error;
        ssize_t         ret;
 
-       VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
+       error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps);
        if (error)
                return -error;
 
        iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
 
-       ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
-               iomap.iomap_target->bt_bdev,
-               iov, offset, nr_segs,
-               linvfs_get_blocks_direct,
-               linvfs_end_io_direct);
+       if (rw == WRITE) {
+               ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
+                       iomap.iomap_target->bt_bdev,
+                       iov, offset, nr_segs,
+                       xfs_get_blocks_direct,
+                       xfs_end_io_direct);
+       } else {
+               ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
+                       iomap.iomap_target->bt_bdev,
+                       iov, offset, nr_segs,
+                       xfs_get_blocks_direct,
+                       xfs_end_io_direct);
+       }
 
        if (unlikely(ret <= 0 && iocb->private))
                xfs_destroy_ioend(iocb->private);
        return ret;
 }
 
+STATIC int
+xfs_vm_prepare_write(
+       struct file             *file,
+       struct page             *page,
+       unsigned int            from,
+       unsigned int            to)
+{
+       return block_prepare_write(page, from, to, xfs_get_blocks);
+}
 
 STATIC sector_t
-linvfs_bmap(
+xfs_vm_bmap(
        struct address_space    *mapping,
        sector_t                block)
 {
        struct inode            *inode = (struct inode *)mapping->host;
-       vnode_t                 *vp = LINVFS_GET_VP(inode);
-       int                     error;
-
-       vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
+       bhv_vnode_t             *vp = vn_from_inode(inode);
 
-       VOP_RWLOCK(vp, VRWLOCK_READ);
-       VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
-       VOP_RWUNLOCK(vp, VRWLOCK_READ);
-       return generic_block_bmap(mapping, block, linvfs_get_block);
+       vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+       bhv_vop_rwlock(vp, VRWLOCK_READ);
+       bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF);
+       bhv_vop_rwunlock(vp, VRWLOCK_READ);
+       return generic_block_bmap(mapping, block, xfs_get_blocks);
 }
 
 STATIC int
-linvfs_readpage(
+xfs_vm_readpage(
        struct file             *unused,
        struct page             *page)
 {
-       return mpage_readpage(page, linvfs_get_block);
+       return mpage_readpage(page, xfs_get_blocks);
 }
 
 STATIC int
-linvfs_readpages(
+xfs_vm_readpages(
        struct file             *unused,
        struct address_space    *mapping,
        struct list_head        *pages,
        unsigned                nr_pages)
 {
-       return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
+       return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
 }
 
 STATIC void
-xfs_count_page_state(
-       struct page             *page,
-       int                     *delalloc,
-       int                     *unmapped,
-       int                     *unwritten)
-{
-       struct buffer_head      *bh, *head;
-
-       *delalloc = *unmapped = *unwritten = 0;
-
-       bh = head = page_buffers(page);
-       do {
-               if (buffer_uptodate(bh) && !buffer_mapped(bh))
-                       (*unmapped) = 1;
-               else if (buffer_unwritten(bh) && !buffer_delay(bh))
-                       clear_buffer_unwritten(bh);
-               else if (buffer_unwritten(bh))
-                       (*unwritten) = 1;
-               else if (buffer_delay(bh))
-                       (*delalloc) = 1;
-       } while ((bh = bh->b_this_page) != head);
-}
-
-
-/*
- * writepage: Called from one of two places:
- *
- * 1. we are flushing a delalloc buffer head.
- *
- * 2. we are writing out a dirty page. Typically the page dirty
- *    state is cleared before we get here. In this case is it
- *    conceivable we have no buffer heads.
- *
- * For delalloc space on the page we need to allocate space and
- * flush it. For unmapped buffer heads on the page we should
- * allocate space if the page is uptodate. For any other dirty
- * buffer heads on the page we should flush them.
- *
- * If we detect that a transaction would be required to flush
- * the page, we have to check the process flags first, if we
- * are already in a transaction or disk I/O during allocations
- * is off, we need to fail the writepage and redirty the page.
- */
-
-STATIC int
-linvfs_writepage(
-       struct page             *page,
-       struct writeback_control *wbc)
-{
-       int                     error;
-       int                     need_trans;
-       int                     delalloc, unmapped, unwritten;
-       struct inode            *inode = page->mapping->host;
-
-       xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
-
-       /*
-        * We need a transaction if:
-        *  1. There are delalloc buffers on the page
-        *  2. The page is uptodate and we have unmapped buffers
-        *  3. The page is uptodate and we have no buffers
-        *  4. There are unwritten buffers on the page
-        */
-
-       if (!page_has_buffers(page)) {
-               unmapped = 1;
-               need_trans = 1;
-       } else {
-               xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
-               if (!PageUptodate(page))
-                       unmapped = 0;
-               need_trans = delalloc + unmapped + unwritten;
-       }
-
-       /*
-        * If we need a transaction and the process flags say
-        * we are already in a transaction, or no IO is allowed
-        * then mark the page dirty again and leave the page
-        * as is.
-        */
-       if (PFLAGS_TEST_FSTRANS() && need_trans)
-               goto out_fail;
-
-       /*
-        * Delay hooking up buffer heads until we have
-        * made our go/no-go decision.
-        */
-       if (!page_has_buffers(page))
-               create_empty_buffers(page, 1 << inode->i_blkbits, 0);
-
-       /*
-        * Convert delayed allocate, unwritten or unmapped space
-        * to real space and flush out to disk.
-        */
-       error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
-       if (error == -EAGAIN)
-               goto out_fail;
-       if (unlikely(error < 0))
-               goto out_unlock;
-
-       return 0;
-
-out_fail:
-       redirty_page_for_writepage(wbc, page);
-       unlock_page(page);
-       return 0;
-out_unlock:
-       unlock_page(page);
-       return error;
-}
-
-STATIC int
-linvfs_invalidate_page(
+xfs_vm_invalidatepage(
        struct page             *page,
        unsigned long           offset)
 {
        xfs_page_trace(XFS_INVALIDPAGE_ENTER,
                        page->mapping->host, page, offset);
-       return block_invalidatepage(page, offset);
-}
-
-/*
- * Called to move a page into cleanable state - and from there
- * to be released. Possibly the page is already clean. We always
- * have buffer heads in this call.
- *
- * Returns 0 if the page is ok to release, 1 otherwise.
- *
- * Possible scenarios are:
- *
- * 1. We are being called to release a page which has been written
- *    to via regular I/O. buffer heads will be dirty and possibly
- *    delalloc. If no delalloc buffer heads in this case then we
- *    can just return zero.
- *
- * 2. We are called to release a page which has been written via
- *    mmap, all we need to do is ensure there is no delalloc
- *    state in the buffer heads, if not we can let the caller
- *    free them and we should come back later via writepage.
- */
-STATIC int
-linvfs_release_page(
-       struct page             *page,
-       gfp_t                   gfp_mask)
-{
-       struct inode            *inode = page->mapping->host;
-       int                     dirty, delalloc, unmapped, unwritten;
-       struct writeback_control wbc = {
-               .sync_mode = WB_SYNC_ALL,
-               .nr_to_write = 1,
-       };
-
-       xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
-
-       xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
-       if (!delalloc && !unwritten)
-               goto free_buffers;
-
-       if (!(gfp_mask & __GFP_FS))
-               return 0;
-
-       /* If we are already inside a transaction or the thread cannot
-        * do I/O, we cannot release this page.
-        */
-       if (PFLAGS_TEST_FSTRANS())
-               return 0;
-
-       /*
-        * Convert delalloc space to real space, do not flush the
-        * data out to disk, that will be done by the caller.
-        * Never need to allocate space here - we will always
-        * come back to writepage in that case.
-        */
-       dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
-       if (dirty == 0 && !unwritten)
-               goto free_buffers;
-       return 0;
-
-free_buffers:
-       return try_to_free_buffers(page);
-}
-
-STATIC int
-linvfs_prepare_write(
-       struct file             *file,
-       struct page             *page,
-       unsigned int            from,
-       unsigned int            to)
-{
-       return block_prepare_write(page, from, to, linvfs_get_block);
+       block_invalidatepage(page, offset);
 }
 
-struct address_space_operations linvfs_aops = {
-       .readpage               = linvfs_readpage,
-       .readpages              = linvfs_readpages,
-       .writepage              = linvfs_writepage,
+const struct address_space_operations xfs_address_space_operations = {
+       .readpage               = xfs_vm_readpage,
+       .readpages              = xfs_vm_readpages,
+       .writepage              = xfs_vm_writepage,
+       .writepages             = xfs_vm_writepages,
        .sync_page              = block_sync_page,
-       .releasepage            = linvfs_release_page,
-       .invalidatepage         = linvfs_invalidate_page,
-       .prepare_write          = linvfs_prepare_write,
+       .releasepage            = xfs_vm_releasepage,
+       .invalidatepage         = xfs_vm_invalidatepage,
+       .prepare_write          = xfs_vm_prepare_write,
        .commit_write           = generic_commit_write,
-       .bmap                   = linvfs_bmap,
-       .direct_IO              = linvfs_direct_IO,
+       .bmap                   = xfs_vm_bmap,
+       .direct_IO              = xfs_vm_direct_IO,
+       .migratepage            = buffer_migrate_page,
 };