X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fxfs%2Flinux-2.6%2Fxfs_aops.c;h=c2e30eea74dc2cd6344dd6710806465b029db422;hb=a372bf8b6a12f23f68e716113ccaea4bf646dd0f;hp=34dcb43a7837d8d9b602ba23199c8f6bbe429400;hpb=721259bce2851893155c6cb88a3f8ecb106b348c;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 34dcb43..c2e30ee 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -37,10 +37,45 @@ #include "xfs_error.h" #include "xfs_rw.h" #include "xfs_iomap.h" +#include "xfs_vnodeops.h" #include #include #include + +/* + * Prime number of hash buckets since address is used as the key. + */ +#define NVSYNC 37 +#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC]) +static wait_queue_head_t xfs_ioend_wq[NVSYNC]; + +void __init +xfs_ioend_init(void) +{ + int i; + + for (i = 0; i < NVSYNC; i++) + init_waitqueue_head(&xfs_ioend_wq[i]); +} + +void +xfs_ioend_wait( + xfs_inode_t *ip) +{ + wait_queue_head_t *wq = to_ioend_wq(ip); + + wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); +} + +STATIC void +xfs_ioend_wake( + xfs_inode_t *ip) +{ + if (atomic_dec_and_test(&ip->i_iocount)) + wake_up(to_ioend_wq(ip)); +} + STATIC void xfs_count_page_state( struct page *page, @@ -56,8 +91,6 @@ xfs_count_page_state( do { if (buffer_uptodate(bh) && !buffer_mapped(bh)) (*unmapped) = 1; - else if (buffer_unwritten(bh) && !buffer_delay(bh)) - clear_buffer_unwritten(bh); else if (buffer_unwritten(bh)) (*unwritten) = 1; else if (buffer_delay(bh)) @@ -71,10 +104,9 @@ xfs_page_trace( int tag, struct inode *inode, struct page *page, - int mask) + unsigned long pgoff) { xfs_inode_t *ip; - bhv_vnode_t *vp = vn_from_inode(inode); loff_t isize = i_size_read(inode); loff_t offset = page_offset(page); int delalloc = -1, unmapped = -1, unwritten = -1; @@ -82,7 +114,7 @@ xfs_page_trace( if (page_has_buffers(page)) xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - ip = xfs_vtoi(vp); + ip = XFS_I(inode); if (!ip->i_rwtrace) return; @@ -91,7 +123,7 @@ xfs_page_trace( (void *)ip, (void *)inode, (void *)page, - (void *)((unsigned long)mask), + (void *)pgoff, (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), (void *)((unsigned long)((isize >> 32) & 0xffffffff)), @@ -105,19 +137,19 @@ xfs_page_trace( (void *)NULL); } #else -#define xfs_page_trace(tag, inode, page, mask) +#define xfs_page_trace(tag, inode, page, pgoff) #endif -/* - * Schedule IO completion handling on a xfsdatad if this was - * the final hold on this ioend. - */ -STATIC void -xfs_finish_ioend( - xfs_ioend_t *ioend) +STATIC struct block_device * +xfs_find_bdev_for_inode( + struct xfs_inode *ip) { - if (atomic_dec_and_test(&ioend->io_remaining)) - queue_work(xfsdatad_workqueue, &ioend->io_work); + struct xfs_mount *mp = ip->i_mount; + + if (XFS_IS_REALTIME_INODE(ip)) + return mp->m_rtdev_targp->bt_bdev; + else + return mp->m_ddev_targp->bt_bdev; } /* @@ -131,28 +163,88 @@ xfs_destroy_ioend( xfs_ioend_t *ioend) { struct buffer_head *bh, *next; + struct xfs_inode *ip = XFS_I(ioend->io_inode); for (bh = ioend->io_buffer_head; bh; bh = next) { next = bh->b_private; bh->b_end_io(bh, !ioend->io_error); } - if (unlikely(ioend->io_error)) - vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__); - vn_iowake(ioend->io_vnode); + + /* + * Volume managers supporting multiple paths can send back ENODEV + * when the final path disappears. In this case continuing to fill + * the page cache with dirty data which cannot be written out is + * evil, so prevent that. + */ + if (unlikely(ioend->io_error == -ENODEV)) { + xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, + __FILE__, __LINE__); + } + + xfs_ioend_wake(ip); mempool_free(ioend, xfs_ioend_pool); } /* + * If the end of the current ioend is beyond the current EOF, + * return the new EOF value, otherwise zero. + */ +STATIC xfs_fsize_t +xfs_ioend_new_eof( + xfs_ioend_t *ioend) +{ + xfs_inode_t *ip = XFS_I(ioend->io_inode); + xfs_fsize_t isize; + xfs_fsize_t bsize; + + bsize = ioend->io_offset + ioend->io_size; + isize = MAX(ip->i_size, ip->i_new_size); + isize = MIN(isize, bsize); + return isize > ip->i_d.di_size ? isize : 0; +} + +/* + * Update on-disk file size now that data has been written to disk. + * The current in-memory file size is i_size. If a write is beyond + * eof i_new_size will be the intended file size until i_size is + * updated. If this write does not extend all the way to the valid + * file size then restrict this update to the end of the write. + */ + +STATIC void +xfs_setfilesize( + xfs_ioend_t *ioend) +{ + xfs_inode_t *ip = XFS_I(ioend->io_inode); + xfs_fsize_t isize; + + ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); + ASSERT(ioend->io_type != IOMAP_READ); + + if (unlikely(ioend->io_error)) + return; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + isize = xfs_ioend_new_eof(ioend); + if (isize) { + ip->i_d.di_size = isize; + xfs_mark_inode_dirty_sync(ip); + } + + xfs_iunlock(ip, XFS_ILOCK_EXCL); +} + +/* * Buffered IO write completion for delayed allocate extents. - * TODO: Update ondisk isize now that we know the file data - * has been flushed (i.e. the notorious "NULL file" problem). */ STATIC void xfs_end_bio_delalloc( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); + xfs_setfilesize(ioend); xfs_destroy_ioend(ioend); } @@ -161,10 +253,12 @@ xfs_end_bio_delalloc( */ STATIC void xfs_end_bio_written( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); + xfs_setfilesize(ioend); xfs_destroy_ioend(ioend); } @@ -176,19 +270,61 @@ xfs_end_bio_written( */ STATIC void xfs_end_bio_unwritten( - void *data) + struct work_struct *work) { - xfs_ioend_t *ioend = data; - bhv_vnode_t *vp = ioend->io_vnode; + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); + struct xfs_inode *ip = XFS_I(ioend->io_inode); xfs_off_t offset = ioend->io_offset; size_t size = ioend->io_size; - if (likely(!ioend->io_error)) - bhv_vop_bmap(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL); + if (likely(!ioend->io_error)) { + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + int error; + error = xfs_iomap_write_unwritten(ip, offset, size); + if (error) + ioend->io_error = error; + } + xfs_setfilesize(ioend); + } + xfs_destroy_ioend(ioend); +} + +/* + * IO read completion for regular, written extents. + */ +STATIC void +xfs_end_bio_read( + struct work_struct *work) +{ + xfs_ioend_t *ioend = + container_of(work, xfs_ioend_t, io_work); + xfs_destroy_ioend(ioend); } /* + * Schedule IO completion handling on a xfsdatad if this was + * the final hold on this ioend. If we are asked to wait, + * flush the workqueue. + */ +STATIC void +xfs_finish_ioend( + xfs_ioend_t *ioend, + int wait) +{ + if (atomic_dec_and_test(&ioend->io_remaining)) { + struct workqueue_struct *wq = xfsdatad_workqueue; + if (ioend->io_work.func == xfs_end_bio_unwritten) + wq = xfsconvertd_workqueue; + + queue_work(wq, &ioend->io_work); + if (wait) + flush_workqueue(wq); + } +} + +/* * Allocate and initialise an IO completion structure. * We need to track unwritten extent write completion here initially. * We'll need to extend this for updating the ondisk inode size later @@ -212,19 +348,21 @@ xfs_alloc_ioend( ioend->io_error = 0; ioend->io_list = NULL; ioend->io_type = type; - ioend->io_vnode = vn_from_inode(inode); + ioend->io_inode = inode; ioend->io_buffer_head = NULL; ioend->io_buffer_tail = NULL; - atomic_inc(&ioend->io_vnode->v_iocount); + atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); ioend->io_offset = 0; ioend->io_size = 0; if (type == IOMAP_UNWRITTEN) - INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); else if (type == IOMAP_DELAY) - INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); + else if (type == IOMAP_READ) + INIT_WORK(&ioend->io_work, xfs_end_bio_read); else - INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); + INIT_WORK(&ioend->io_work, xfs_end_bio_written); return ioend; } @@ -237,16 +375,12 @@ xfs_map_blocks( xfs_iomap_t *mapp, int flags) { - bhv_vnode_t *vp = vn_from_inode(inode); - int error, nmaps = 1; + int nmaps = 1; - error = bhv_vop_bmap(vp, offset, count, flags, mapp, &nmaps); - if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE))) - VMODIFY(vp); - return -error; + return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); } -STATIC inline int +STATIC_INLINE int xfs_iomap_valid( xfs_iomap_t *iomapp, loff_t offset) @@ -258,17 +392,13 @@ xfs_iomap_valid( /* * BIO completion handler for buffered IO. */ -STATIC int +STATIC void xfs_end_bio( struct bio *bio, - unsigned int bytes_done, int error) { xfs_ioend_t *ioend = bio->bi_private; - if (bio->bi_size) - return 1; - ASSERT(atomic_read(&bio->bi_cnt) >= 1); ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; @@ -277,8 +407,7 @@ xfs_end_bio( bio->bi_end_io = NULL; bio_put(bio); - xfs_finish_ioend(ioend); - return 0; + xfs_finish_ioend(ioend, 0); } STATIC void @@ -287,10 +416,16 @@ xfs_submit_ioend_bio( struct bio *bio) { atomic_inc(&ioend->io_remaining); - bio->bi_private = ioend; bio->bi_end_io = xfs_end_bio; + /* + * If the I/O is beyond EOF we mark the inode dirty immediately + * but don't update the inode size until I/O completion. + */ + if (xfs_ioend_new_eof(ioend)) + xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode)); + submit_bio(WRITE, bio); ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); bio_put(bio); @@ -332,20 +467,18 @@ xfs_start_buffer_writeback( STATIC void xfs_start_page_writeback( struct page *page, - struct writeback_control *wbc, int clear_dirty, int buffers) { ASSERT(PageLocked(page)); ASSERT(!PageWriteback(page)); - set_page_writeback(page); if (clear_dirty) - clear_page_dirty(page); + clear_page_dirty_for_io(page); + set_page_writeback(page); unlock_page(page); - if (!buffers) { + /* If no buffers on the page are to be written, finish it here */ + if (!buffers) end_page_writeback(page); - wbc->pages_skipped++; /* We didn't write this page */ - } } static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) @@ -413,7 +546,7 @@ xfs_submit_ioend( } if (bio) xfs_submit_ioend_bio(ioend, bio); - xfs_finish_ioend(ioend); + xfs_finish_ioend(ioend, 0); } while ((ioend = next) != NULL); } @@ -438,7 +571,7 @@ xfs_cancel_ioend( unlock_buffer(bh); } while ((bh = next_bh) != NULL); - vn_iowake(ioend->io_vnode); + xfs_ioend_wake(XFS_I(ioend->io_inode)); mempool_free(ioend, xfs_ioend_pool); } while ((ioend = next) != NULL); } @@ -588,7 +721,7 @@ xfs_probe_cluster( for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; - size_t pg_offset, len = 0; + size_t pg_offset, pg_len = 0; if (tindex == tlast) { pg_offset = @@ -600,17 +733,17 @@ xfs_probe_cluster( } else pg_offset = PAGE_CACHE_SIZE; - if (page->index == tindex && !TestSetPageLocked(page)) { - len = xfs_probe_page(page, pg_offset, mapped); + if (page->index == tindex && trylock_page(page)) { + pg_len = xfs_probe_page(page, pg_offset, mapped); unlock_page(page); } - if (!len) { + if (!pg_len) { done = 1; break; } - total += len; + total += pg_len; tindex++; } @@ -644,7 +777,7 @@ xfs_is_delayed_page( else if (buffer_delay(bh)) acceptable = (type == IOMAP_DELAY); else if (buffer_dirty(bh) && buffer_mapped(bh)) - acceptable = (type == 0); + acceptable = (type == IOMAP_NEW); else break; } while ((bh = bh->b_this_page) != head); @@ -684,7 +817,7 @@ xfs_convert_page( if (page->index != tindex) goto fail; - if (TestSetPageLocked(page)) + if (!trylock_page(page)) goto fail; if (PageWriteback(page)) goto fail_unlock_page; @@ -753,7 +886,7 @@ xfs_convert_page( page_dirty--; count++; } else { - type = 0; + type = IOMAP_NEW; if (buffer_mapped(bh) && all_bh && startio) { lock_buffer(bh); xfs_add_to_ioend(inode, bh, offset, @@ -782,7 +915,7 @@ xfs_convert_page( done = 1; } } - xfs_start_page_writeback(page, wbc, !page_dirty, count); + xfs_start_page_writeback(page, !page_dirty, count); } return done; @@ -911,8 +1044,8 @@ xfs_page_state_convert( bh = head = page_buffers(page); offset = page_offset(page); - flags = -1; - type = 0; + flags = BMAPI_READ; + type = IOMAP_NEW; /* TODO: cleanup count and page_dirty */ @@ -942,14 +1075,16 @@ xfs_page_state_convert( * * Third case, an unmapped buffer was found, and we are * in a path where we need to write the whole page out. - */ + */ if (buffer_unwritten(bh) || buffer_delay(bh) || ((buffer_uptodate(bh) || PageUptodate(page)) && !buffer_mapped(bh) && (unmapped || startio))) { - /* + int new_ioend = 0; + + /* * Make sure we don't use a read-only iomap */ - if (flags == BMAPI_READ) + if (flags == BMAPI_READ) iomap_valid = 0; if (buffer_unwritten(bh)) { @@ -964,6 +1099,15 @@ xfs_page_state_convert( } if (!iomap_valid) { + /* + * if we didn't have a valid mapping then we + * need to ensure that we put the new mapping + * in a new ioend structure. This needs to be + * done to ensure that the ioends correctly + * reflect the block mappings at io completion + * for unwritten extent conversion. + */ + new_ioend = 1; if (type == IOMAP_NEW) { size = xfs_probe_cluster(inode, page, bh, head, 0); @@ -983,7 +1127,7 @@ xfs_page_state_convert( if (startio) { xfs_add_to_ioend(inode, bh, offset, type, &ioend, - !iomap_valid); + new_ioend); } else { set_buffer_dirty(bh); unlock_buffer(bh); @@ -998,7 +1142,7 @@ xfs_page_state_convert( * That means it must already have extents allocated * underneath it. Map the extent by reading it. */ - if (!iomap_valid || type != 0) { + if (!iomap_valid || flags != BMAPI_READ) { flags = BMAPI_READ; size = xfs_probe_cluster(inode, page, bh, head, 1); @@ -1009,8 +1153,16 @@ xfs_page_state_convert( iomap_valid = xfs_iomap_valid(&iomap, offset); } - type = 0; - if (!test_and_set_bit(BH_Lock, &bh->b_state)) { + /* + * We set the type to IOMAP_NEW in case we are doing a + * small write at EOF that is extending the file but + * without needing an allocation. We need to update the + * file size on I/O completion in this case so it is + * the same case as having just allocated a new extent + * that we are writing into for the first time. + */ + type = IOMAP_NEW; + if (trylock_buffer(bh)) { ASSERT(buffer_mapped(bh)); if (iomap_valid) all_bh = 1; @@ -1035,7 +1187,7 @@ xfs_page_state_convert( SetPageUptodate(page); if (startio) - xfs_start_page_writeback(page, wbc, 1, count); + xfs_start_page_writeback(page, 1, count); if (ioend && iomap_valid) { offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >> @@ -1133,6 +1285,14 @@ xfs_vm_writepage( if (!page_has_buffers(page)) create_empty_buffers(page, 1 << inode->i_blkbits, 0); + + /* + * VM calculation for nr_to_write seems off. Bump it way + * up, this gets simple streaming writes zippy again. + * To be reviewed again after Jens' writeback changes. + */ + wbc->nr_to_write *= 4; + /* * Convert delayed allocate, unwritten or unmapped space * to real space and flush out to disk. @@ -1159,10 +1319,7 @@ xfs_vm_writepages( struct address_space *mapping, struct writeback_control *wbc) { - struct bhv_vnode *vp = vn_from_inode(mapping->host); - - if (VN_TRUNC(vp)) - VUNTRUNCATE(vp); + xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); return generic_writepages(mapping, wbc); } @@ -1197,7 +1354,7 @@ xfs_vm_releasepage( .nr_to_write = 1, }; - xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); + xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0); if (!page_has_buffers(page)) return 0; @@ -1239,7 +1396,6 @@ __xfs_get_blocks( int direct, bmapi_flags_t flags) { - bhv_vnode_t *vp = vn_from_inode(inode); xfs_iomap_t iomap; xfs_off_t offset; ssize_t size; @@ -1249,7 +1405,11 @@ __xfs_get_blocks( offset = (xfs_off_t)iblock << inode->i_blkbits; ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); size = bh_result->b_size; - error = bhv_vop_bmap(vp, offset, size, + + if (!create && direct && offset >= i_size_read(inode)) + return 0; + + error = xfs_iomap(XFS_I(inode), offset, size, create ? flags : BMAPI_READ, &iomap, &niomap); if (error) return -error; @@ -1269,7 +1429,6 @@ __xfs_get_blocks( if (direct) bh_result->b_private = inode; set_buffer_unwritten(bh_result); - set_buffer_delay(bh_result); } } @@ -1280,13 +1439,18 @@ __xfs_get_blocks( bh_result->b_bdev = iomap.iomap_target->bt_bdev; /* - * If we previously allocated a block out beyond eof and we are - * now coming back to use it then we will need to flag it as new - * even if it has a disk address. + * If we previously allocated a block out beyond eof and we are now + * coming back to use it then we will need to flag it as new even if it + * has a disk address. + * + * With sub-block writes into unwritten extents we also need to mark + * the buffer as new so that the unwritten parts of the buffer gets + * correctly zeroed. */ if (create && ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || - (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) + (offset >= i_size_read(inode)) || + (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN)))) set_buffer_new(bh_result); if (iomap.iomap_flags & IOMAP_DELAY) { @@ -1347,17 +1511,32 @@ xfs_end_io_direct( * This is not necessary for synchronous direct I/O, but we do * it anyway to keep the code uniform and simpler. * + * Well, if only it were that simple. Because synchronous direct I/O + * requires extent conversion to occur *before* we return to userspace, + * we have to wait for extent conversion to complete. Look at the + * iocb that has been passed to us to determine if this is AIO or + * not. If it is synchronous, tell xfs_finish_ioend() to kick the + * workqueue and wait for it to complete. + * * The core direct I/O code might be changed to always call the * completion handler in the future, in which case all this can * go away. */ - if (private && size > 0) { - ioend->io_offset = offset; - ioend->io_size = size; - xfs_finish_ioend(ioend); + ioend->io_offset = offset; + ioend->io_size = size; + if (ioend->io_type == IOMAP_READ) { + xfs_finish_ioend(ioend, 0); + } else if (private && size > 0) { + xfs_finish_ioend(ioend, is_sync_kiocb(iocb)); } else { - ASSERT(size >= 0); - xfs_destroy_ioend(ioend); + /* + * A direct I/O write ioend starts it's life in unwritten + * state in case they map an unwritten extent. This write + * didn't map an unwritten extent so switch it's completion + * handler. + */ + INIT_WORK(&ioend->io_work, xfs_end_bio_written); + xfs_finish_ioend(ioend, 0); } /* @@ -1378,45 +1557,43 @@ xfs_vm_direct_IO( { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - bhv_vnode_t *vp = vn_from_inode(inode); - xfs_iomap_t iomap; - int maps = 1; - int error; + struct block_device *bdev; ssize_t ret; - error = bhv_vop_bmap(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps); - if (error) - return -error; - - iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); + bdev = xfs_find_bdev_for_inode(XFS_I(inode)); if (rw == WRITE) { + iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); ret = blockdev_direct_IO_own_locking(rw, iocb, inode, - iomap.iomap_target->bt_bdev, - iov, offset, nr_segs, + bdev, iov, offset, nr_segs, xfs_get_blocks_direct, xfs_end_io_direct); } else { + iocb->private = xfs_alloc_ioend(inode, IOMAP_READ); ret = blockdev_direct_IO_no_locking(rw, iocb, inode, - iomap.iomap_target->bt_bdev, - iov, offset, nr_segs, + bdev, iov, offset, nr_segs, xfs_get_blocks_direct, xfs_end_io_direct); } - if (unlikely(ret <= 0 && iocb->private)) + if (unlikely(ret != -EIOCBQUEUED && iocb->private)) xfs_destroy_ioend(iocb->private); return ret; } STATIC int -xfs_vm_prepare_write( +xfs_vm_write_begin( struct file *file, - struct page *page, - unsigned int from, - unsigned int to) + struct address_space *mapping, + loff_t pos, + unsigned len, + unsigned flags, + struct page **pagep, + void **fsdata) { - return block_prepare_write(page, from, to, xfs_get_blocks); + *pagep = NULL; + return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, + xfs_get_blocks); } STATIC sector_t @@ -1425,12 +1602,12 @@ xfs_vm_bmap( sector_t block) { struct inode *inode = (struct inode *)mapping->host; - bhv_vnode_t *vp = vn_from_inode(inode); + struct xfs_inode *ip = XFS_I(inode); - vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); - bhv_vop_rwlock(vp, VRWLOCK_READ); - bhv_vop_flush_pages(vp, (xfs_off_t)0, -1, 0, FI_REMAPF); - bhv_vop_rwunlock(vp, VRWLOCK_READ); + xfs_itrace_entry(XFS_I(inode)); + xfs_ilock(ip, XFS_IOLOCK_SHARED); + xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); + xfs_iunlock(ip, XFS_IOLOCK_SHARED); return generic_block_bmap(mapping, block, xfs_get_blocks); } @@ -1470,9 +1647,11 @@ const struct address_space_operations xfs_address_space_operations = { .sync_page = block_sync_page, .releasepage = xfs_vm_releasepage, .invalidatepage = xfs_vm_invalidatepage, - .prepare_write = xfs_vm_prepare_write, - .commit_write = generic_commit_write, + .write_begin = xfs_vm_write_begin, + .write_end = generic_write_end, .bmap = xfs_vm_bmap, .direct_IO = xfs_vm_direct_IO, .migratepage = buffer_migrate_page, + .is_partially_uptodate = block_is_partially_uptodate, + .error_remove_page = generic_error_remove_page, };