mm: rename page trylock
[safe/jmp/linux-2.6] / fs / xfs / linux-2.6 / xfs_aops.c
index 9892268..fa73179 100644 (file)
@@ -21,7 +21,6 @@
 #include "xfs_inum.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir.h"
 #include "xfs_dir2.h"
 #include "xfs_trans.h"
 #include "xfs_dmapi.h"
@@ -29,7 +28,6 @@
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
 #include "xfs_dir2_sf.h"
 #include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_error.h"
 #include "xfs_rw.h"
 #include "xfs_iomap.h"
+#include "xfs_vnodeops.h"
 #include <linux/mpage.h>
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 
-STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
+STATIC void
+xfs_count_page_state(
+       struct page             *page,
+       int                     *delalloc,
+       int                     *unmapped,
+       int                     *unwritten)
+{
+       struct buffer_head      *bh, *head;
+
+       *delalloc = *unmapped = *unwritten = 0;
+
+       bh = head = page_buffers(page);
+       do {
+               if (buffer_uptodate(bh) && !buffer_mapped(bh))
+                       (*unmapped) = 1;
+               else if (buffer_unwritten(bh))
+                       (*unwritten) = 1;
+               else if (buffer_delay(bh))
+                       (*delalloc) = 1;
+       } while ((bh = bh->b_this_page) != head);
+}
 
 #if defined(XFS_RW_TRACE)
 void
@@ -51,10 +70,10 @@ xfs_page_trace(
        int             tag,
        struct inode    *inode,
        struct page     *page,
-       int             mask)
+       unsigned long   pgoff)
 {
        xfs_inode_t     *ip;
-       vnode_t         *vp = LINVFS_GET_VP(inode);
+       bhv_vnode_t     *vp = vn_from_inode(inode);
        loff_t          isize = i_size_read(inode);
        loff_t          offset = page_offset(page);
        int             delalloc = -1, unmapped = -1, unwritten = -1;
@@ -71,7 +90,7 @@ xfs_page_trace(
                (void *)ip,
                (void *)inode,
                (void *)page,
-               (void *)((unsigned long)mask),
+               (void *)pgoff,
                (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
                (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
                (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
@@ -81,23 +100,40 @@ xfs_page_trace(
                (void *)((unsigned long)delalloc),
                (void *)((unsigned long)unmapped),
                (void *)((unsigned long)unwritten),
-               (void *)NULL,
+               (void *)((unsigned long)current_pid()),
                (void *)NULL);
 }
 #else
-#define xfs_page_trace(tag, inode, page, mask)
+#define xfs_page_trace(tag, inode, page, pgoff)
 #endif
 
+STATIC struct block_device *
+xfs_find_bdev_for_inode(
+       struct xfs_inode        *ip)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+
+       if (XFS_IS_REALTIME_INODE(ip))
+               return mp->m_rtdev_targp->bt_bdev;
+       else
+               return mp->m_ddev_targp->bt_bdev;
+}
+
 /*
  * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend.
+ * the final hold on this ioend. If we are asked to wait,
+ * flush the workqueue.
  */
 STATIC void
 xfs_finish_ioend(
-       xfs_ioend_t             *ioend)
+       xfs_ioend_t     *ioend,
+       int             wait)
 {
-       if (atomic_dec_and_test(&ioend->io_remaining))
+       if (atomic_dec_and_test(&ioend->io_remaining)) {
                queue_work(xfsdatad_workqueue, &ioend->io_work);
+               if (wait)
+                       flush_workqueue(xfsdatad_workqueue);
+       }
 }
 
 /*
@@ -114,24 +150,65 @@ xfs_destroy_ioend(
 
        for (bh = ioend->io_buffer_head; bh; bh = next) {
                next = bh->b_private;
-               bh->b_end_io(bh, ioend->io_uptodate);
+               bh->b_end_io(bh, !ioend->io_error);
        }
-
-       vn_iowake(ioend->io_vnode);
+       if (unlikely(ioend->io_error)) {
+               vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error,
+                               __FILE__,__LINE__);
+       }
+       vn_iowake(XFS_I(ioend->io_inode));
        mempool_free(ioend, xfs_ioend_pool);
 }
 
 /*
+ * Update on-disk file size now that data has been written to disk.
+ * The current in-memory file size is i_size.  If a write is beyond
+ * eof i_new_size will be the intended file size until i_size is
+ * updated.  If this write does not extend all the way to the valid
+ * file size then restrict this update to the end of the write.
+ */
+STATIC void
+xfs_setfilesize(
+       xfs_ioend_t             *ioend)
+{
+       xfs_inode_t             *ip = XFS_I(ioend->io_inode);
+       xfs_fsize_t             isize;
+       xfs_fsize_t             bsize;
+
+       ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
+       ASSERT(ioend->io_type != IOMAP_READ);
+
+       if (unlikely(ioend->io_error))
+               return;
+
+       bsize = ioend->io_offset + ioend->io_size;
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+       isize = MAX(ip->i_size, ip->i_new_size);
+       isize = MIN(isize, bsize);
+
+       if (ip->i_d.di_size < isize) {
+               ip->i_d.di_size = isize;
+               ip->i_update_core = 1;
+               ip->i_update_size = 1;
+               mark_inode_dirty_sync(ioend->io_inode);
+       }
+
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+}
+
+/*
  * Buffered IO write completion for delayed allocate extents.
- * TODO: Update ondisk isize now that we know the file data
- * has been flushed (i.e. the notorious "NULL file" problem).
  */
 STATIC void
 xfs_end_bio_delalloc(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
 
+       xfs_setfilesize(ioend);
        xfs_destroy_ioend(ioend);
 }
 
@@ -140,10 +217,12 @@ xfs_end_bio_delalloc(
  */
 STATIC void
 xfs_end_bio_written(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
 
+       xfs_setfilesize(ioend);
        xfs_destroy_ioend(ioend);
 }
 
@@ -155,16 +234,36 @@ xfs_end_bio_written(
  */
 STATIC void
 xfs_end_bio_unwritten(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
-       vnode_t                 *vp = ioend->io_vnode;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
+       struct xfs_inode        *ip = XFS_I(ioend->io_inode);
        xfs_off_t               offset = ioend->io_offset;
        size_t                  size = ioend->io_size;
-       int                     error;
 
-       if (ioend->io_uptodate)
-               VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
+       if (likely(!ioend->io_error)) {
+               if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+                       int error;
+                       error = xfs_iomap_write_unwritten(ip, offset, size);
+                       if (error)
+                               ioend->io_error = error;
+               }
+               xfs_setfilesize(ioend);
+       }
+       xfs_destroy_ioend(ioend);
+}
+
+/*
+ * IO read completion for regular, written extents.
+ */
+STATIC void
+xfs_end_bio_read(
+       struct work_struct      *work)
+{
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
+
        xfs_destroy_ioend(ioend);
 }
 
@@ -189,22 +288,24 @@ xfs_alloc_ioend(
         * all the I/O from calling the completion routine too early.
         */
        atomic_set(&ioend->io_remaining, 1);
-       ioend->io_uptodate = 1; /* cleared if any I/O fails */
+       ioend->io_error = 0;
        ioend->io_list = NULL;
        ioend->io_type = type;
-       ioend->io_vnode = LINVFS_GET_VP(inode);
+       ioend->io_inode = inode;
        ioend->io_buffer_head = NULL;
        ioend->io_buffer_tail = NULL;
-       atomic_inc(&ioend->io_vnode->v_iocount);
+       atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
        ioend->io_offset = 0;
        ioend->io_size = 0;
 
        if (type == IOMAP_UNWRITTEN)
-               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
        else if (type == IOMAP_DELAY)
-               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
+       else if (type == IOMAP_READ)
+               INIT_WORK(&ioend->io_work, xfs_end_bio_read);
        else
-               INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_written);
 
        return ioend;
 }
@@ -217,16 +318,17 @@ xfs_map_blocks(
        xfs_iomap_t             *mapp,
        int                     flags)
 {
-       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       xfs_inode_t             *ip = XFS_I(inode);
        int                     error, nmaps = 1;
 
-       VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
+       error = xfs_iomap(ip, offset, count,
+                               flags, mapp, &nmaps);
        if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
-               VMODIFY(vp);
+               xfs_iflags_set(ip, XFS_IMODIFIED);
        return -error;
 }
 
-STATIC inline int
+STATIC_INLINE int
 xfs_iomap_valid(
        xfs_iomap_t             *iomapp,
        loff_t                  offset)
@@ -238,29 +340,22 @@ xfs_iomap_valid(
 /*
  * BIO completion handler for buffered IO.
  */
-STATIC int
+STATIC void
 xfs_end_bio(
        struct bio              *bio,
-       unsigned int            bytes_done,
        int                     error)
 {
        xfs_ioend_t             *ioend = bio->bi_private;
 
-       if (bio->bi_size)
-               return 1;
-
-       ASSERT(ioend);
        ASSERT(atomic_read(&bio->bi_cnt) >= 1);
+       ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
 
        /* Toss bio and pass work off to an xfsdatad thread */
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               ioend->io_uptodate = 0;
        bio->bi_private = NULL;
        bio->bi_end_io = NULL;
-
        bio_put(bio);
-       xfs_finish_ioend(ioend);
-       return 0;
+
+       xfs_finish_ioend(ioend, 0);
 }
 
 STATIC void
@@ -314,20 +409,18 @@ xfs_start_buffer_writeback(
 STATIC void
 xfs_start_page_writeback(
        struct page             *page,
-       struct writeback_control *wbc,
        int                     clear_dirty,
        int                     buffers)
 {
        ASSERT(PageLocked(page));
        ASSERT(!PageWriteback(page));
-       set_page_writeback(page);
        if (clear_dirty)
-               clear_page_dirty(page);
+               clear_page_dirty_for_io(page);
+       set_page_writeback(page);
        unlock_page(page);
-       if (!buffers) {
+       /* If no buffers on the page are to be written, finish it here */
+       if (!buffers)
                end_page_writeback(page);
-               wbc->pages_skipped++;   /* We didn't write this page */
-       }
 }
 
 static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
@@ -350,7 +443,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  * assumes that all buffers on the page are started at the same time.
  *
  * The fix is two passes across the ioend list - one to start writeback on the
- * bufferheads, and then the second one submit them for I/O.
+ * buffer_heads, and then submit them for I/O on the second pass.
  */
 STATIC void
 xfs_submit_ioend(
@@ -395,7 +488,7 @@ xfs_submit_ioend(
                }
                if (bio)
                        xfs_submit_ioend_bio(ioend, bio);
-               xfs_finish_ioend(ioend);
+               xfs_finish_ioend(ioend, 0);
        } while ((ioend = next) != NULL);
 }
 
@@ -420,7 +513,7 @@ xfs_cancel_ioend(
                        unlock_buffer(bh);
                } while ((bh = next_bh) != NULL);
 
-               vn_iowake(ioend->io_vnode);
+               vn_iowake(XFS_I(ioend->io_inode));
                mempool_free(ioend, xfs_ioend_pool);
        } while ((ioend = next) != NULL);
 }
@@ -462,28 +555,37 @@ xfs_add_to_ioend(
 }
 
 STATIC void
+xfs_map_buffer(
+       struct buffer_head      *bh,
+       xfs_iomap_t             *mp,
+       xfs_off_t               offset,
+       uint                    block_bits)
+{
+       sector_t                bn;
+
+       ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
+
+       bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
+             ((offset - mp->iomap_offset) >> block_bits);
+
+       ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
+
+       bh->b_blocknr = bn;
+       set_buffer_mapped(bh);
+}
+
+STATIC void
 xfs_map_at_offset(
        struct buffer_head      *bh,
        loff_t                  offset,
        int                     block_bits,
        xfs_iomap_t             *iomapp)
 {
-       xfs_daddr_t             bn;
-       int                     sector_shift;
-
        ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
        ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
-       ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
-
-       sector_shift = block_bits - BBSHIFT;
-       bn = (iomapp->iomap_bn >> sector_shift) +
-             ((offset - iomapp->iomap_offset) >> block_bits);
-
-       ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));
-       ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
 
        lock_buffer(bh);
-       bh->b_blocknr = bn;
+       xfs_map_buffer(bh, iomapp, offset, block_bits);
        bh->b_bdev = iomapp->iomap_target->bt_bdev;
        set_buffer_mapped(bh);
        clear_buffer_delay(bh);
@@ -540,7 +642,7 @@ xfs_probe_cluster(
 
        /* First sum forwards in this page */
        do {
-               if (mapped != buffer_mapped(bh))
+               if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
                        return total;
                total += bh->b_size;
        } while ((bh = bh->b_this_page) != head);
@@ -561,7 +663,7 @@ xfs_probe_cluster(
 
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
-                       size_t pg_offset, len = 0;
+                       size_t pg_offset, pg_len = 0;
 
                        if (tindex == tlast) {
                                pg_offset =
@@ -573,17 +675,17 @@ xfs_probe_cluster(
                        } else
                                pg_offset = PAGE_CACHE_SIZE;
 
-                       if (page->index == tindex && !TestSetPageLocked(page)) {
-                               len = xfs_probe_page(page, pg_offset, mapped);
+                       if (page->index == tindex && trylock_page(page)) {
+                               pg_len = xfs_probe_page(page, pg_offset, mapped);
                                unlock_page(page);
                        }
 
-                       if (!len) {
+                       if (!pg_len) {
                                done = 1;
                                break;
                        }
 
-                       total += len;
+                       total += pg_len;
                        tindex++;
                }
 
@@ -616,8 +718,8 @@ xfs_is_delayed_page(
                                acceptable = (type == IOMAP_UNWRITTEN);
                        else if (buffer_delay(bh))
                                acceptable = (type == IOMAP_DELAY);
-                       else if (buffer_mapped(bh))
-                               acceptable = (type == 0);
+                       else if (buffer_dirty(bh) && buffer_mapped(bh))
+                               acceptable = (type == IOMAP_NEW);
                        else
                                break;
                } while ((bh = bh->b_this_page) != head);
@@ -657,7 +759,7 @@ xfs_convert_page(
 
        if (page->index != tindex)
                goto fail;
-       if (TestSetPageLocked(page))
+       if (!trylock_page(page))
                goto fail;
        if (PageWriteback(page))
                goto fail_unlock_page;
@@ -668,7 +770,7 @@ xfs_convert_page(
 
        /*
         * page_dirty is initially a count of buffers on the page before
-        * EOF and is decrememted as we move each into a cleanable state.
+        * EOF and is decremented as we move each into a cleanable state.
         *
         * Derivation:
         *
@@ -726,7 +828,7 @@ xfs_convert_page(
                        page_dirty--;
                        count++;
                } else {
-                       type = 0;
+                       type = IOMAP_NEW;
                        if (buffer_mapped(bh) && all_bh && startio) {
                                lock_buffer(bh);
                                xfs_add_to_ioend(inode, bh, offset,
@@ -747,14 +849,15 @@ xfs_convert_page(
                        struct backing_dev_info *bdi;
 
                        bdi = inode->i_mapping->backing_dev_info;
+                       wbc->nr_to_write--;
                        if (bdi_write_congested(bdi)) {
                                wbc->encountered_congestion = 1;
                                done = 1;
-                       } else if (--wbc->nr_to_write <= 0) {
+                       } else if (wbc->nr_to_write <= 0) {
                                done = 1;
                        }
                }
-               xfs_start_page_writeback(page, wbc, !page_dirty, count);
+               xfs_start_page_writeback(page, !page_dirty, count);
        }
 
        return done;
@@ -810,7 +913,7 @@ xfs_cluster_write(
  * page if possible.
  * The bh->b_state's cannot know if any of the blocks or which block for
  * that matter are dirty due to mmap writes, and therefore bh uptodate is
- * only vaild if the page itself isn't completely uptodate.  Some layers
+ * only valid if the page itself isn't completely uptodate.  Some layers
  * may clear the page dirty flag prior to calling write page, under the
  * assumption the entire page will be written out; by not writing out the
  * whole page the page can be reused before all valid dirty data is
@@ -838,12 +941,14 @@ xfs_page_state_convert(
        pgoff_t                 end_index, last_index, tlast;
        ssize_t                 size, len;
        int                     flags, err, iomap_valid = 0, uptodate = 1;
-       int                     page_dirty, count = 0, trylock_flag = 0;
+       int                     page_dirty, count = 0;
+       int                     trylock = 0;
        int                     all_bh = unmapped;
 
-       /* wait for other IO threads? */
-       if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
-               trylock_flag |= BMAPI_TRYLOCK;
+       if (startio) {
+               if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
+                       trylock |= BMAPI_TRYLOCK;
+       }
 
        /* Is this page beyond the end of the file? */
        offset = i_size_read(inode);
@@ -860,7 +965,7 @@ xfs_page_state_convert(
 
        /*
         * page_dirty is initially a count of buffers on the page before
-        * EOF and is decrememted as we move each into a cleanable state.
+        * EOF and is decremented as we move each into a cleanable state.
         *
         * Derivation:
         *
@@ -881,8 +986,8 @@ xfs_page_state_convert(
 
        bh = head = page_buffers(page);
        offset = page_offset(page);
-       flags = -1;
-       type = 0;
+       flags = BMAPI_READ;
+       type = IOMAP_NEW;
 
        /* TODO: cleanup count and page_dirty */
 
@@ -912,30 +1017,39 @@ xfs_page_state_convert(
                 *
                 * Third case, an unmapped buffer was found, and we are
                 * in a path where we need to write the whole page out.
-                */
+                */
                if (buffer_unwritten(bh) || buffer_delay(bh) ||
                    ((buffer_uptodate(bh) || PageUptodate(page)) &&
                     !buffer_mapped(bh) && (unmapped || startio))) {
-                       /*
+                       int new_ioend = 0;
+
+                       /*
                         * Make sure we don't use a read-only iomap
                         */
-                       if (flags == BMAPI_READ)
+                       if (flags == BMAPI_READ)
                                iomap_valid = 0;
 
                        if (buffer_unwritten(bh)) {
                                type = IOMAP_UNWRITTEN;
-                               flags = BMAPI_WRITE|BMAPI_IGNSTATE;
+                               flags = BMAPI_WRITE | BMAPI_IGNSTATE;
                        } else if (buffer_delay(bh)) {
                                type = IOMAP_DELAY;
-                               flags = BMAPI_ALLOCATE;
-                               if (!startio)
-                                       flags |= trylock_flag;
+                               flags = BMAPI_ALLOCATE | trylock;
                        } else {
                                type = IOMAP_NEW;
-                               flags = BMAPI_WRITE|BMAPI_MMAP;
+                               flags = BMAPI_WRITE | BMAPI_MMAP;
                        }
 
                        if (!iomap_valid) {
+                               /*
+                                * if we didn't have a valid mapping then we
+                                * need to ensure that we put the new mapping
+                                * in a new ioend structure. This needs to be
+                                * done to ensure that the ioends correctly
+                                * reflect the block mappings at io completion
+                                * for unwritten extent conversion.
+                                */
+                               new_ioend = 1;
                                if (type == IOMAP_NEW) {
                                        size = xfs_probe_cluster(inode,
                                                        page, bh, head, 0);
@@ -955,7 +1069,7 @@ xfs_page_state_convert(
                                if (startio) {
                                        xfs_add_to_ioend(inode, bh, offset,
                                                        type, &ioend,
-                                                       !iomap_valid);
+                                                       new_ioend);
                                } else {
                                        set_buffer_dirty(bh);
                                        unlock_buffer(bh);
@@ -970,7 +1084,7 @@ xfs_page_state_convert(
                         * That means it must already have extents allocated
                         * underneath it. Map the extent by reading it.
                         */
-                       if (!iomap_valid || type != 0) {
+                       if (!iomap_valid || flags != BMAPI_READ) {
                                flags = BMAPI_READ;
                                size = xfs_probe_cluster(inode, page, bh,
                                                                head, 1);
@@ -981,7 +1095,15 @@ xfs_page_state_convert(
                                iomap_valid = xfs_iomap_valid(&iomap, offset);
                        }
 
-                       type = 0;
+                       /*
+                        * We set the type to IOMAP_NEW in case we are doing a
+                        * small write at EOF that is extending the file but
+                        * without needing an allocation. We need to update the
+                        * file size on I/O completion in this case so it is
+                        * the same case as having just allocated a new extent
+                        * that we are writing into for the first time.
+                        */
+                       type = IOMAP_NEW;
                        if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
                                ASSERT(buffer_mapped(bh));
                                if (iomap_valid)
@@ -1007,7 +1129,7 @@ xfs_page_state_convert(
                SetPageUptodate(page);
 
        if (startio)
-               xfs_start_page_writeback(page, wbc, 1, count);
+               xfs_start_page_writeback(page, 1, count);
 
        if (ioend && iomap_valid) {
                offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
@@ -1039,250 +1161,6 @@ error:
        return err;
 }
 
-STATIC int
-__linvfs_get_block(
-       struct inode            *inode,
-       sector_t                iblock,
-       unsigned long           blocks,
-       struct buffer_head      *bh_result,
-       int                     create,
-       int                     direct,
-       bmapi_flags_t           flags)
-{
-       vnode_t                 *vp = LINVFS_GET_VP(inode);
-       xfs_iomap_t             iomap;
-       xfs_off_t               offset;
-       ssize_t                 size;
-       int                     retpbbm = 1;
-       int                     error;
-
-       offset = (xfs_off_t)iblock << inode->i_blkbits;
-       if (blocks)
-               size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
-                                       (xfs_off_t)blocks << inode->i_blkbits);
-       else
-               size = 1 << inode->i_blkbits;
-
-       VOP_BMAP(vp, offset, size,
-               create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
-       if (error)
-               return -error;
-
-       if (retpbbm == 0)
-               return 0;
-
-       if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
-               xfs_daddr_t     bn;
-               xfs_off_t       delta;
-
-               /* For unwritten extents do not report a disk address on
-                * the read case (treat as if we're reading into a hole).
-                */
-               if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
-                       delta = offset - iomap.iomap_offset;
-                       delta >>= inode->i_blkbits;
-
-                       bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
-                       bn += delta;
-                       BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
-                       bh_result->b_blocknr = bn;
-                       set_buffer_mapped(bh_result);
-               }
-               if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
-                       if (direct)
-                               bh_result->b_private = inode;
-                       set_buffer_unwritten(bh_result);
-                       set_buffer_delay(bh_result);
-               }
-       }
-
-       /* If this is a realtime file, data might be on a new device */
-       bh_result->b_bdev = iomap.iomap_target->bt_bdev;
-
-       /* If we previously allocated a block out beyond eof and
-        * we are now coming back to use it then we will need to
-        * flag it as new even if it has a disk address.
-        */
-       if (create &&
-           ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
-            (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
-               set_buffer_new(bh_result);
-
-       if (iomap.iomap_flags & IOMAP_DELAY) {
-               BUG_ON(direct);
-               if (create) {
-                       set_buffer_uptodate(bh_result);
-                       set_buffer_mapped(bh_result);
-                       set_buffer_delay(bh_result);
-               }
-       }
-
-       if (blocks) {
-               ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
-               offset = min_t(xfs_off_t,
-                               iomap.iomap_bsize - iomap.iomap_delta,
-                               (xfs_off_t)blocks << inode->i_blkbits);
-               bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
-       }
-
-       return 0;
-}
-
-int
-linvfs_get_block(
-       struct inode            *inode,
-       sector_t                iblock,
-       struct buffer_head      *bh_result,
-       int                     create)
-{
-       return __linvfs_get_block(inode, iblock, 0, bh_result,
-                                       create, 0, BMAPI_WRITE);
-}
-
-STATIC int
-linvfs_get_blocks_direct(
-       struct inode            *inode,
-       sector_t                iblock,
-       unsigned long           max_blocks,
-       struct buffer_head      *bh_result,
-       int                     create)
-{
-       return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
-                                       create, 1, BMAPI_WRITE|BMAPI_DIRECT);
-}
-
-STATIC void
-linvfs_end_io_direct(
-       struct kiocb    *iocb,
-       loff_t          offset,
-       ssize_t         size,
-       void            *private)
-{
-       xfs_ioend_t     *ioend = iocb->private;
-
-       /*
-        * Non-NULL private data means we need to issue a transaction to
-        * convert a range from unwritten to written extents.  This needs
-        * to happen from process contect but aio+dio I/O completion
-        * happens from irq context so we need to defer it to a workqueue.
-        * This is not nessecary for synchronous direct I/O, but we do
-        * it anyway to keep the code uniform and simpler.
-        *
-        * The core direct I/O code might be changed to always call the
-        * completion handler in the future, in which case all this can
-        * go away.
-        */
-       if (private && size > 0) {
-               ioend->io_offset = offset;
-               ioend->io_size = size;
-               xfs_finish_ioend(ioend);
-       } else {
-               ASSERT(size >= 0);
-               xfs_destroy_ioend(ioend);
-       }
-
-       /*
-        * blockdev_direct_IO can return an error even afer the I/O
-        * completion handler was called.  Thus we need to protect
-        * against double-freeing.
-        */
-       iocb->private = NULL;
-}
-
-STATIC ssize_t
-linvfs_direct_IO(
-       int                     rw,
-       struct kiocb            *iocb,
-       const struct iovec      *iov,
-       loff_t                  offset,
-       unsigned long           nr_segs)
-{
-       struct file     *file = iocb->ki_filp;
-       struct inode    *inode = file->f_mapping->host;
-       vnode_t         *vp = LINVFS_GET_VP(inode);
-       xfs_iomap_t     iomap;
-       int             maps = 1;
-       int             error;
-       ssize_t         ret;
-
-       VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
-       if (error)
-               return -error;
-
-       iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
-
-       ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
-               iomap.iomap_target->bt_bdev,
-               iov, offset, nr_segs,
-               linvfs_get_blocks_direct,
-               linvfs_end_io_direct);
-
-       if (unlikely(ret <= 0 && iocb->private))
-               xfs_destroy_ioend(iocb->private);
-       return ret;
-}
-
-
-STATIC sector_t
-linvfs_bmap(
-       struct address_space    *mapping,
-       sector_t                block)
-{
-       struct inode            *inode = (struct inode *)mapping->host;
-       vnode_t                 *vp = LINVFS_GET_VP(inode);
-       int                     error;
-
-       vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
-
-       VOP_RWLOCK(vp, VRWLOCK_READ);
-       VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
-       VOP_RWUNLOCK(vp, VRWLOCK_READ);
-       return generic_block_bmap(mapping, block, linvfs_get_block);
-}
-
-STATIC int
-linvfs_readpage(
-       struct file             *unused,
-       struct page             *page)
-{
-       return mpage_readpage(page, linvfs_get_block);
-}
-
-STATIC int
-linvfs_readpages(
-       struct file             *unused,
-       struct address_space    *mapping,
-       struct list_head        *pages,
-       unsigned                nr_pages)
-{
-       return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
-}
-
-STATIC void
-xfs_count_page_state(
-       struct page             *page,
-       int                     *delalloc,
-       int                     *unmapped,
-       int                     *unwritten)
-{
-       struct buffer_head      *bh, *head;
-
-       *delalloc = *unmapped = *unwritten = 0;
-
-       bh = head = page_buffers(page);
-       do {
-               if (buffer_uptodate(bh) && !buffer_mapped(bh))
-                       (*unmapped) = 1;
-               else if (buffer_unwritten(bh) && !buffer_delay(bh))
-                       clear_buffer_unwritten(bh);
-               else if (buffer_unwritten(bh))
-                       (*unwritten) = 1;
-               else if (buffer_delay(bh))
-                       (*delalloc) = 1;
-       } while ((bh = bh->b_this_page) != head);
-}
-
-
 /*
  * writepage: Called from one of two places:
  *
@@ -1304,7 +1182,7 @@ xfs_count_page_state(
  */
 
 STATIC int
-linvfs_writepage(
+xfs_vm_writepage(
        struct page             *page,
        struct writeback_control *wbc)
 {
@@ -1339,7 +1217,7 @@ linvfs_writepage(
         * then mark the page dirty again and leave the page
         * as is.
         */
-       if (PFLAGS_TEST_FSTRANS() && need_trans)
+       if (current_test_flags(PF_FSTRANS) && need_trans)
                goto out_fail;
 
        /*
@@ -1371,13 +1249,12 @@ out_unlock:
 }
 
 STATIC int
-linvfs_invalidate_page(
-       struct page             *page,
-       unsigned long           offset)
+xfs_vm_writepages(
+       struct address_space    *mapping,
+       struct writeback_control *wbc)
 {
-       xfs_page_trace(XFS_INVALIDPAGE_ENTER,
-                       page->mapping->host, page, offset);
-       return block_invalidatepage(page, offset);
+       xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
+       return generic_writepages(mapping, wbc);
 }
 
 /*
@@ -1400,7 +1277,7 @@ linvfs_invalidate_page(
  *    free them and we should come back later via writepage.
  */
 STATIC int
-linvfs_release_page(
+xfs_vm_releasepage(
        struct page             *page,
        gfp_t                   gfp_mask)
 {
@@ -1411,7 +1288,10 @@ linvfs_release_page(
                .nr_to_write = 1,
        };
 
-       xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
+       xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
+
+       if (!page_has_buffers(page))
+               return 0;
 
        xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
        if (!delalloc && !unwritten)
@@ -1423,7 +1303,7 @@ linvfs_release_page(
        /* If we are already inside a transaction or the thread cannot
         * do I/O, we cannot release this page.
         */
-       if (PFLAGS_TEST_FSTRANS())
+       if (current_test_flags(PF_FSTRANS))
                return 0;
 
        /*
@@ -1442,25 +1322,264 @@ free_buffers:
 }
 
 STATIC int
-linvfs_prepare_write(
+__xfs_get_blocks(
+       struct inode            *inode,
+       sector_t                iblock,
+       struct buffer_head      *bh_result,
+       int                     create,
+       int                     direct,
+       bmapi_flags_t           flags)
+{
+       xfs_iomap_t             iomap;
+       xfs_off_t               offset;
+       ssize_t                 size;
+       int                     niomap = 1;
+       int                     error;
+
+       offset = (xfs_off_t)iblock << inode->i_blkbits;
+       ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
+       size = bh_result->b_size;
+       error = xfs_iomap(XFS_I(inode), offset, size,
+                            create ? flags : BMAPI_READ, &iomap, &niomap);
+       if (error)
+               return -error;
+       if (niomap == 0)
+               return 0;
+
+       if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
+               /*
+                * For unwritten extents do not report a disk address on
+                * the read case (treat as if we're reading into a hole).
+                */
+               if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
+                       xfs_map_buffer(bh_result, &iomap, offset,
+                                      inode->i_blkbits);
+               }
+               if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
+                       if (direct)
+                               bh_result->b_private = inode;
+                       set_buffer_unwritten(bh_result);
+               }
+       }
+
+       /*
+        * If this is a realtime file, data may be on a different device.
+        * to that pointed to from the buffer_head b_bdev currently.
+        */
+       bh_result->b_bdev = iomap.iomap_target->bt_bdev;
+
+       /*
+        * If we previously allocated a block out beyond eof and we are now
+        * coming back to use it then we will need to flag it as new even if it
+        * has a disk address.
+        *
+        * With sub-block writes into unwritten extents we also need to mark
+        * the buffer as new so that the unwritten parts of the buffer gets
+        * correctly zeroed.
+        */
+       if (create &&
+           ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
+            (offset >= i_size_read(inode)) ||
+            (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
+               set_buffer_new(bh_result);
+
+       if (iomap.iomap_flags & IOMAP_DELAY) {
+               BUG_ON(direct);
+               if (create) {
+                       set_buffer_uptodate(bh_result);
+                       set_buffer_mapped(bh_result);
+                       set_buffer_delay(bh_result);
+               }
+       }
+
+       if (direct || size > (1 << inode->i_blkbits)) {
+               ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
+               offset = min_t(xfs_off_t,
+                               iomap.iomap_bsize - iomap.iomap_delta, size);
+               bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
+       }
+
+       return 0;
+}
+
+int
+xfs_get_blocks(
+       struct inode            *inode,
+       sector_t                iblock,
+       struct buffer_head      *bh_result,
+       int                     create)
+{
+       return __xfs_get_blocks(inode, iblock,
+                               bh_result, create, 0, BMAPI_WRITE);
+}
+
+STATIC int
+xfs_get_blocks_direct(
+       struct inode            *inode,
+       sector_t                iblock,
+       struct buffer_head      *bh_result,
+       int                     create)
+{
+       return __xfs_get_blocks(inode, iblock,
+                               bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
+}
+
+STATIC void
+xfs_end_io_direct(
+       struct kiocb    *iocb,
+       loff_t          offset,
+       ssize_t         size,
+       void            *private)
+{
+       xfs_ioend_t     *ioend = iocb->private;
+
+       /*
+        * Non-NULL private data means we need to issue a transaction to
+        * convert a range from unwritten to written extents.  This needs
+        * to happen from process context but aio+dio I/O completion
+        * happens from irq context so we need to defer it to a workqueue.
+        * This is not necessary for synchronous direct I/O, but we do
+        * it anyway to keep the code uniform and simpler.
+        *
+        * Well, if only it were that simple. Because synchronous direct I/O
+        * requires extent conversion to occur *before* we return to userspace,
+        * we have to wait for extent conversion to complete. Look at the
+        * iocb that has been passed to us to determine if this is AIO or
+        * not. If it is synchronous, tell xfs_finish_ioend() to kick the
+        * workqueue and wait for it to complete.
+        *
+        * The core direct I/O code might be changed to always call the
+        * completion handler in the future, in which case all this can
+        * go away.
+        */
+       ioend->io_offset = offset;
+       ioend->io_size = size;
+       if (ioend->io_type == IOMAP_READ) {
+               xfs_finish_ioend(ioend, 0);
+       } else if (private && size > 0) {
+               xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
+       } else {
+               /*
+                * A direct I/O write ioend starts it's life in unwritten
+                * state in case they map an unwritten extent.  This write
+                * didn't map an unwritten extent so switch it's completion
+                * handler.
+                */
+               INIT_WORK(&ioend->io_work, xfs_end_bio_written);
+               xfs_finish_ioend(ioend, 0);
+       }
+
+       /*
+        * blockdev_direct_IO can return an error even after the I/O
+        * completion handler was called.  Thus we need to protect
+        * against double-freeing.
+        */
+       iocb->private = NULL;
+}
+
+STATIC ssize_t
+xfs_vm_direct_IO(
+       int                     rw,
+       struct kiocb            *iocb,
+       const struct iovec      *iov,
+       loff_t                  offset,
+       unsigned long           nr_segs)
+{
+       struct file     *file = iocb->ki_filp;
+       struct inode    *inode = file->f_mapping->host;
+       struct block_device *bdev;
+       ssize_t         ret;
+
+       bdev = xfs_find_bdev_for_inode(XFS_I(inode));
+
+       if (rw == WRITE) {
+               iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
+               ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
+                       bdev, iov, offset, nr_segs,
+                       xfs_get_blocks_direct,
+                       xfs_end_io_direct);
+       } else {
+               iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
+               ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
+                       bdev, iov, offset, nr_segs,
+                       xfs_get_blocks_direct,
+                       xfs_end_io_direct);
+       }
+
+       if (unlikely(ret != -EIOCBQUEUED && iocb->private))
+               xfs_destroy_ioend(iocb->private);
+       return ret;
+}
+
+STATIC int
+xfs_vm_write_begin(
        struct file             *file,
+       struct address_space    *mapping,
+       loff_t                  pos,
+       unsigned                len,
+       unsigned                flags,
+       struct page             **pagep,
+       void                    **fsdata)
+{
+       *pagep = NULL;
+       return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+                                                               xfs_get_blocks);
+}
+
+STATIC sector_t
+xfs_vm_bmap(
+       struct address_space    *mapping,
+       sector_t                block)
+{
+       struct inode            *inode = (struct inode *)mapping->host;
+       struct xfs_inode        *ip = XFS_I(inode);
+
+       xfs_itrace_entry(XFS_I(inode));
+       xfs_ilock(ip, XFS_IOLOCK_SHARED);
+       xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
+       xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+       return generic_block_bmap(mapping, block, xfs_get_blocks);
+}
+
+STATIC int
+xfs_vm_readpage(
+       struct file             *unused,
+       struct page             *page)
+{
+       return mpage_readpage(page, xfs_get_blocks);
+}
+
+STATIC int
+xfs_vm_readpages(
+       struct file             *unused,
+       struct address_space    *mapping,
+       struct list_head        *pages,
+       unsigned                nr_pages)
+{
+       return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
+}
+
+STATIC void
+xfs_vm_invalidatepage(
        struct page             *page,
-       unsigned int            from,
-       unsigned int            to)
+       unsigned long           offset)
 {
-       return block_prepare_write(page, from, to, linvfs_get_block);
+       xfs_page_trace(XFS_INVALIDPAGE_ENTER,
+                       page->mapping->host, page, offset);
+       block_invalidatepage(page, offset);
 }
 
-struct address_space_operations linvfs_aops = {
-       .readpage               = linvfs_readpage,
-       .readpages              = linvfs_readpages,
-       .writepage              = linvfs_writepage,
+const struct address_space_operations xfs_address_space_operations = {
+       .readpage               = xfs_vm_readpage,
+       .readpages              = xfs_vm_readpages,
+       .writepage              = xfs_vm_writepage,
+       .writepages             = xfs_vm_writepages,
        .sync_page              = block_sync_page,
-       .releasepage            = linvfs_release_page,
-       .invalidatepage         = linvfs_invalidate_page,
-       .prepare_write          = linvfs_prepare_write,
-       .commit_write           = generic_commit_write,
-       .bmap                   = linvfs_bmap,
-       .direct_IO              = linvfs_direct_IO,
+       .releasepage            = xfs_vm_releasepage,
+       .invalidatepage         = xfs_vm_invalidatepage,
+       .write_begin            = xfs_vm_write_begin,
+       .write_end              = generic_write_end,
+       .bmap                   = xfs_vm_bmap,
+       .direct_IO              = xfs_vm_direct_IO,
        .migratepage            = buffer_migrate_page,
 };