2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/lm_interface.h>
23 #include <linux/swap.h>
32 #include "ops_address.h"
41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
42 unsigned int from, unsigned int to)
44 struct buffer_head *head = page_buffers(page);
45 unsigned int bsize = head->b_size;
46 struct buffer_head *bh;
47 unsigned int start, end;
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
52 if (end <= from || start >= to)
54 if (gfs2_is_jdata(ip))
55 set_buffer_uptodate(bh);
56 gfs2_trans_add_bh(ip->i_gl, bh, 0);
61 * gfs2_get_block - Fills in a buffer head with details about a block
63 * @lblock: The block number to look up
64 * @bh_result: The buffer head to return the result in
65 * @create: Non-zero if we may add block to the file
70 int gfs2_get_block(struct inode *inode, sector_t lblock,
71 struct buffer_head *bh_result, int create)
73 return gfs2_block_map(inode, lblock, create, bh_result);
77 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
79 * @lblock: The block number to look up
80 * @bh_result: The buffer head to return the result in
81 * @create: Non-zero if we may add block to the file
86 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
87 struct buffer_head *bh_result, int create)
91 error = gfs2_block_map(inode, lblock, 0, bh_result);
94 if (!buffer_mapped(bh_result))
99 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
100 struct buffer_head *bh_result, int create)
102 return gfs2_block_map(inode, lblock, 0, bh_result);
106 * gfs2_writepage - Write complete page
107 * @page: Page to write
111 * Some of this is copied from block_write_full_page() although we still
112 * call it to do most of the work.
115 static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
117 struct inode *inode = page->mapping->host;
118 struct gfs2_inode *ip = GFS2_I(inode);
119 struct gfs2_sbd *sdp = GFS2_SB(inode);
120 loff_t i_size = i_size_read(inode);
121 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
126 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
130 if (current->journal_info)
133 /* Is the page fully outside i_size? (truncate in progress) */
134 offset = i_size & (PAGE_CACHE_SIZE-1);
135 if (page->index > end_index || (page->index == end_index && !offset)) {
136 page->mapping->a_ops->invalidatepage(page, 0);
138 return 0; /* don't care */
141 if (PageChecked(page)) {
142 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
145 ClearPageChecked(page);
146 if (!page_has_buffers(page)) {
147 create_empty_buffers(page, inode->i_sb->s_blocksize,
148 (1 << BH_Dirty)|(1 << BH_Uptodate));
150 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
153 error = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
159 redirty_page_for_writepage(wbc, page);
165 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
166 * @mapping: The mapping to write
167 * @wbc: Write-back control
169 * For the data=writeback case we can already ignore buffer heads
170 * and write whole extents at once. This is a big reduction in the
171 * number of I/O requests we send and the bmap calls we make in this case.
173 static int gfs2_writeback_writepages(struct address_space *mapping,
174 struct writeback_control *wbc)
176 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
180 * stuffed_readpage - Fill in a Linux page with stuffed file data
187 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
189 struct buffer_head *dibh;
194 * Due to the order of unstuffing files and ->nopage(), we can be
195 * asked for a zero page in the case of a stuffed file being extended,
196 * so we need to supply one here. It doesn't happen often.
198 if (unlikely(page->index)) {
199 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
203 error = gfs2_meta_inode_buffer(ip, &dibh);
207 kaddr = kmap_atomic(page, KM_USER0);
208 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
210 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
211 kunmap_atomic(kaddr, KM_USER0);
212 flush_dcache_page(page);
214 SetPageUptodate(page);
221 * __gfs2_readpage - readpage
222 * @file: The file to read a page for
223 * @page: The page to read
225 * This is the core of gfs2's readpage. Its used by the internal file
226 * reading code as in that case we already hold the glock. Also its
227 * called by gfs2_readpage() once the required lock has been granted.
231 static int __gfs2_readpage(void *file, struct page *page)
233 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
234 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
237 if (gfs2_is_stuffed(ip)) {
238 error = stuffed_readpage(ip, page);
241 error = mpage_readpage(page, gfs2_get_block);
244 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
251 * gfs2_readpage - read a page of a file
252 * @file: The file to read
253 * @page: The page of the file
255 * This deals with the locking required. We use a trylock in order to
256 * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
257 * in the event that we are unable to get the lock.
260 static int gfs2_readpage(struct file *file, struct page *page)
262 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
263 struct gfs2_holder gh;
266 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
267 error = gfs2_glock_nq_atime(&gh);
268 if (unlikely(error)) {
272 error = __gfs2_readpage(file, page);
275 gfs2_holder_uninit(&gh);
276 if (error == GLR_TRYFAILED) {
278 return AOP_TRUNCATED_PAGE;
284 * gfs2_internal_read - read an internal file
285 * @ip: The gfs2 inode
286 * @ra_state: The readahead state (or NULL for no readahead)
287 * @buf: The buffer to fill
288 * @pos: The file position
289 * @size: The amount to read
293 int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
294 char *buf, loff_t *pos, unsigned size)
296 struct address_space *mapping = ip->i_inode.i_mapping;
297 unsigned long index = *pos / PAGE_CACHE_SIZE;
298 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
306 if (offset + size > PAGE_CACHE_SIZE)
307 amt = PAGE_CACHE_SIZE - offset;
308 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
310 return PTR_ERR(page);
311 p = kmap_atomic(page, KM_USER0);
312 memcpy(buf + copied, p + offset, amt);
313 kunmap_atomic(p, KM_USER0);
314 mark_page_accessed(page);
315 page_cache_release(page);
319 } while(copied < size);
325 * gfs2_readpages - Read a bunch of pages at once
328 * 1. This is only for readahead, so we can simply ignore any things
329 * which are slightly inconvenient (such as locking conflicts between
330 * the page lock and the glock) and return having done no I/O. Its
331 * obviously not something we'd want to do on too regular a basis.
332 * Any I/O we ignore at this time will be done via readpage later.
333 * 2. We don't handle stuffed files here we let readpage do the honours.
334 * 3. mpage_readpages() does most of the heavy lifting in the common case.
335 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
338 static int gfs2_readpages(struct file *file, struct address_space *mapping,
339 struct list_head *pages, unsigned nr_pages)
341 struct inode *inode = mapping->host;
342 struct gfs2_inode *ip = GFS2_I(inode);
343 struct gfs2_sbd *sdp = GFS2_SB(inode);
344 struct gfs2_holder gh;
347 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
348 ret = gfs2_glock_nq_atime(&gh);
351 if (!gfs2_is_stuffed(ip))
352 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
355 gfs2_holder_uninit(&gh);
356 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
362 * gfs2_write_begin - Begin to write to a file
363 * @file: The file to write to
364 * @mapping: The mapping in which to write
365 * @pos: The file offset at which to start writing
366 * @len: Length of the write
367 * @flags: Various flags
368 * @pagep: Pointer to return the page
369 * @fsdata: Pointer to return fs data (unused by GFS2)
374 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
375 loff_t pos, unsigned len, unsigned flags,
376 struct page **pagep, void **fsdata)
378 struct gfs2_inode *ip = GFS2_I(mapping->host);
379 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
380 unsigned int data_blocks, ind_blocks, rblocks;
383 struct gfs2_alloc *al;
384 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
385 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
386 unsigned to = from + len;
389 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
390 error = gfs2_glock_nq_atime(&ip->i_gh);
395 page = __grab_cache_page(mapping, index);
400 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
402 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
407 ip->i_alloc.al_requested = 0;
408 if (alloc_required) {
409 al = gfs2_alloc_get(ip);
411 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
415 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
419 al->al_requested = data_blocks + ind_blocks;
420 error = gfs2_inplace_reserve(ip);
425 rblocks = RES_DINODE + ind_blocks;
426 if (gfs2_is_jdata(ip))
427 rblocks += data_blocks ? data_blocks : 1;
428 if (ind_blocks || data_blocks)
429 rblocks += RES_STATFS + RES_QUOTA;
431 error = gfs2_trans_begin(sdp, rblocks,
432 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
436 if (gfs2_is_stuffed(ip)) {
437 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
438 error = gfs2_unstuff_dinode(ip, page);
441 } else if (!PageUptodate(page))
442 error = stuffed_readpage(ip, page);
447 error = block_prepare_write(page, from, to, gfs2_get_block);
453 if (alloc_required) {
454 gfs2_inplace_release(ip);
456 gfs2_quota_unlock(ip);
461 page_cache_release(page);
462 if (pos + len > ip->i_inode.i_size)
463 vmtruncate(&ip->i_inode, ip->i_inode.i_size);
465 gfs2_glock_dq_m(1, &ip->i_gh);
467 gfs2_holder_uninit(&ip->i_gh);
474 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
475 * @inode: the rindex inode
477 static void adjust_fs_space(struct inode *inode)
479 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
480 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
481 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
482 u64 fs_total, new_free;
484 /* Total up the file system space, according to the latest rindex. */
485 fs_total = gfs2_ri_total(sdp);
487 spin_lock(&sdp->sd_statfs_spin);
488 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
489 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
492 spin_unlock(&sdp->sd_statfs_spin);
493 fs_warn(sdp, "File system extended by %llu blocks.\n",
494 (unsigned long long)new_free);
495 gfs2_statfs_change(sdp, new_free, new_free, 0);
499 * gfs2_stuffed_write_end - Write end for stuffed files
501 * @dibh: The buffer_head containing the on-disk inode
502 * @pos: The file position
503 * @len: The length of the write
504 * @copied: How much was actually copied by the VFS
507 * This copies the data from the page into the inode block after
508 * the inode data structure itself.
512 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
513 loff_t pos, unsigned len, unsigned copied,
516 struct gfs2_inode *ip = GFS2_I(inode);
517 struct gfs2_sbd *sdp = GFS2_SB(inode);
518 u64 to = pos + copied;
520 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
521 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
523 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
524 kaddr = kmap_atomic(page, KM_USER0);
525 memcpy(buf + pos, kaddr + pos, copied);
526 memset(kaddr + pos + copied, 0, len - copied);
527 flush_dcache_page(page);
528 kunmap_atomic(kaddr, KM_USER0);
530 if (!PageUptodate(page))
531 SetPageUptodate(page);
533 page_cache_release(page);
535 if (inode->i_size < to) {
536 i_size_write(inode, to);
537 ip->i_di.di_size = inode->i_size;
538 di->di_size = cpu_to_be64(inode->i_size);
539 mark_inode_dirty(inode);
542 if (inode == sdp->sd_rindex)
543 adjust_fs_space(inode);
547 gfs2_glock_dq(&ip->i_gh);
548 gfs2_holder_uninit(&ip->i_gh);
554 * @file: The file to write to
555 * @mapping: The address space to write to
556 * @pos: The file position
557 * @len: The length of the data
559 * @page: The page that has been written
560 * @fsdata: The fsdata (unused in GFS2)
562 * The main write_end function for GFS2. We have a separate one for
563 * stuffed files as they are slightly different, otherwise we just
564 * put our locking around the VFS provided functions.
569 static int gfs2_write_end(struct file *file, struct address_space *mapping,
570 loff_t pos, unsigned len, unsigned copied,
571 struct page *page, void *fsdata)
573 struct inode *inode = page->mapping->host;
574 struct gfs2_inode *ip = GFS2_I(inode);
575 struct gfs2_sbd *sdp = GFS2_SB(inode);
576 struct buffer_head *dibh;
577 struct gfs2_alloc *al = &ip->i_alloc;
578 struct gfs2_dinode *di;
579 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
580 unsigned int to = from + len;
583 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == 0);
585 ret = gfs2_meta_inode_buffer(ip, &dibh);
588 page_cache_release(page);
592 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
594 if (gfs2_is_stuffed(ip))
595 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
597 if (!gfs2_is_writeback(ip))
598 gfs2_page_add_databufs(ip, page, from, to);
600 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
602 if (likely(ret >= 0)) {
604 if ((pos + copied) > inode->i_size) {
605 di = (struct gfs2_dinode *)dibh->b_data;
606 ip->i_di.di_size = inode->i_size;
607 di->di_size = cpu_to_be64(inode->i_size);
608 mark_inode_dirty(inode);
612 if (inode == sdp->sd_rindex)
613 adjust_fs_space(inode);
618 if (al->al_requested) {
619 gfs2_inplace_release(ip);
620 gfs2_quota_unlock(ip);
623 gfs2_glock_dq(&ip->i_gh);
624 gfs2_holder_uninit(&ip->i_gh);
629 * gfs2_set_page_dirty - Page dirtying function
630 * @page: The page to dirty
632 * Returns: 1 if it dirtyed the page, or 0 otherwise
635 static int gfs2_set_page_dirty(struct page *page)
637 SetPageChecked(page);
638 return __set_page_dirty_buffers(page);
642 * gfs2_bmap - Block map function
643 * @mapping: Address space info
644 * @lblock: The block to map
646 * Returns: The disk address for the block or 0 on hole or error
649 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
651 struct gfs2_inode *ip = GFS2_I(mapping->host);
652 struct gfs2_holder i_gh;
656 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
660 if (!gfs2_is_stuffed(ip))
661 dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
663 gfs2_glock_dq_uninit(&i_gh);
668 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
670 struct gfs2_bufdata *bd;
674 clear_buffer_dirty(bh);
677 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
678 list_del_init(&bd->bd_le.le_list);
680 gfs2_remove_from_journal(bh, current->journal_info, 0);
683 clear_buffer_mapped(bh);
684 clear_buffer_req(bh);
685 clear_buffer_new(bh);
686 gfs2_log_unlock(sdp);
690 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
692 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
693 struct buffer_head *bh, *head;
694 unsigned long pos = 0;
696 BUG_ON(!PageLocked(page));
698 ClearPageChecked(page);
699 if (!page_has_buffers(page))
702 bh = head = page_buffers(page);
705 gfs2_discard(sdp, bh);
707 bh = bh->b_this_page;
708 } while (bh != head);
711 try_to_release_page(page, 0);
715 * gfs2_ok_for_dio - check that dio is valid on this file
718 * @offset: The offset at which we are reading or writing
720 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
721 * 1 (to accept the i/o request)
723 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
726 * Should we return an error here? I can't see that O_DIRECT for
727 * a stuffed file makes any sense. For now we'll silently fall
728 * back to buffered I/O
730 if (gfs2_is_stuffed(ip))
733 if (offset > i_size_read(&ip->i_inode))
740 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
741 const struct iovec *iov, loff_t offset,
742 unsigned long nr_segs)
744 struct file *file = iocb->ki_filp;
745 struct inode *inode = file->f_mapping->host;
746 struct gfs2_inode *ip = GFS2_I(inode);
747 struct gfs2_holder gh;
751 * Deferred lock, even if its a write, since we do no allocation
752 * on this path. All we need change is atime, and this lock mode
753 * ensures that other nodes have flushed their buffered read caches
754 * (i.e. their page cache entries for this inode). We do not,
755 * unfortunately have the option of only flushing a range like
758 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
759 rv = gfs2_glock_nq_atime(&gh);
762 rv = gfs2_ok_for_dio(ip, rw, offset);
764 goto out; /* dio not valid, fall back to buffered i/o */
766 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
767 iov, offset, nr_segs,
768 gfs2_get_block_direct, NULL);
770 gfs2_glock_dq_m(1, &gh);
771 gfs2_holder_uninit(&gh);
776 * gfs2_releasepage - free the metadata associated with a page
777 * @page: the page that's being released
778 * @gfp_mask: passed from Linux VFS, ignored by us
780 * Call try_to_free_buffers() if the buffers in this page can be
786 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
788 struct inode *aspace = page->mapping->host;
789 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
790 struct buffer_head *bh, *head;
791 struct gfs2_bufdata *bd;
793 if (!page_has_buffers(page))
797 head = bh = page_buffers(page);
799 if (atomic_read(&bh->b_count))
802 if (bd && bd->bd_ail)
804 gfs2_assert_warn(sdp, !buffer_pinned(bh));
805 gfs2_assert_warn(sdp, !buffer_dirty(bh));
806 bh = bh->b_this_page;
808 gfs2_log_unlock(sdp);
810 head = bh = page_buffers(page);
815 gfs2_assert_warn(sdp, bd->bd_bh == bh);
816 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
817 if (!list_empty(&bd->bd_le.le_list)) {
818 if (!buffer_pinned(bh))
819 list_del_init(&bd->bd_le.le_list);
825 bh->b_private = NULL;
827 gfs2_log_unlock(sdp);
829 kmem_cache_free(gfs2_bufdata_cachep, bd);
831 bh = bh->b_this_page;
832 } while (bh != head);
834 return try_to_free_buffers(page);
836 gfs2_log_unlock(sdp);
840 static const struct address_space_operations gfs2_writeback_aops = {
841 .writepage = gfs2_writepage,
842 .writepages = gfs2_writeback_writepages,
843 .readpage = gfs2_readpage,
844 .readpages = gfs2_readpages,
845 .sync_page = block_sync_page,
846 .write_begin = gfs2_write_begin,
847 .write_end = gfs2_write_end,
849 .invalidatepage = gfs2_invalidatepage,
850 .releasepage = gfs2_releasepage,
851 .direct_IO = gfs2_direct_IO,
854 static const struct address_space_operations gfs2_ordered_aops = {
855 .writepage = gfs2_writepage,
856 .readpage = gfs2_readpage,
857 .readpages = gfs2_readpages,
858 .sync_page = block_sync_page,
859 .write_begin = gfs2_write_begin,
860 .write_end = gfs2_write_end,
861 .set_page_dirty = gfs2_set_page_dirty,
863 .invalidatepage = gfs2_invalidatepage,
864 .releasepage = gfs2_releasepage,
865 .direct_IO = gfs2_direct_IO,
868 static const struct address_space_operations gfs2_jdata_aops = {
869 .writepage = gfs2_writepage,
870 .readpage = gfs2_readpage,
871 .readpages = gfs2_readpages,
872 .sync_page = block_sync_page,
873 .write_begin = gfs2_write_begin,
874 .write_end = gfs2_write_end,
875 .set_page_dirty = gfs2_set_page_dirty,
877 .invalidatepage = gfs2_invalidatepage,
878 .releasepage = gfs2_releasepage,
881 void gfs2_set_aops(struct inode *inode)
883 struct gfs2_inode *ip = GFS2_I(inode);
885 if (gfs2_is_writeback(ip))
886 inode->i_mapping->a_ops = &gfs2_writeback_aops;
887 else if (gfs2_is_ordered(ip))
888 inode->i_mapping->a_ops = &gfs2_ordered_aops;
889 else if (gfs2_is_jdata(ip))
890 inode->i_mapping->a_ops = &gfs2_jdata_aops;