2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/lm_interface.h>
23 #include <linux/swap.h>
24 #include <linux/pagevec.h>
33 #include "ops_address.h"
42 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
43 unsigned int from, unsigned int to)
45 struct buffer_head *head = page_buffers(page);
46 unsigned int bsize = head->b_size;
47 struct buffer_head *bh;
48 unsigned int start, end;
50 for (bh = head, start = 0; bh != head || !start;
51 bh = bh->b_this_page, start = end) {
53 if (end <= from || start >= to)
55 if (gfs2_is_jdata(ip))
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_bh(ip->i_gl, bh, 0);
62 * gfs2_get_block - Fills in a buffer head with details about a block
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
71 int gfs2_get_block(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
74 return gfs2_block_map(inode, lblock, create, bh_result);
78 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
80 * @lblock: The block number to look up
81 * @bh_result: The buffer head to return the result in
82 * @create: Non-zero if we may add block to the file
87 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
88 struct buffer_head *bh_result, int create)
92 error = gfs2_block_map(inode, lblock, 0, bh_result);
95 if (!buffer_mapped(bh_result))
100 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
101 struct buffer_head *bh_result, int create)
103 return gfs2_block_map(inode, lblock, 0, bh_result);
107 * gfs2_writepage_common - Common bits of writepage
108 * @page: The page to be written
109 * @wbc: The writeback control
111 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
114 static int gfs2_writepage_common(struct page *page,
115 struct writeback_control *wbc)
117 struct inode *inode = page->mapping->host;
118 struct gfs2_inode *ip = GFS2_I(inode);
119 struct gfs2_sbd *sdp = GFS2_SB(inode);
120 loff_t i_size = i_size_read(inode);
121 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
125 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
128 if (current->journal_info)
130 /* Is the page fully outside i_size? (truncate in progress) */
131 offset = i_size & (PAGE_CACHE_SIZE-1);
132 if (page->index > end_index || (page->index == end_index && !offset)) {
133 page->mapping->a_ops->invalidatepage(page, 0);
138 redirty_page_for_writepage(wbc, page);
145 * gfs2_writeback_writepage - Write page for writeback mappings
147 * @wbc: The writeback control
151 static int gfs2_writeback_writepage(struct page *page,
152 struct writeback_control *wbc)
156 ret = gfs2_writepage_common(page, wbc);
160 ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
162 ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
167 * gfs2_ordered_writepage - Write page for ordered data files
168 * @page: The page to write
169 * @wbc: The writeback control
173 static int gfs2_ordered_writepage(struct page *page,
174 struct writeback_control *wbc)
176 struct inode *inode = page->mapping->host;
177 struct gfs2_inode *ip = GFS2_I(inode);
180 ret = gfs2_writepage_common(page, wbc);
184 if (!page_has_buffers(page)) {
185 create_empty_buffers(page, inode->i_sb->s_blocksize,
186 (1 << BH_Dirty)|(1 << BH_Uptodate));
188 gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
189 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
193 * __gfs2_jdata_writepage - The core of jdata writepage
194 * @page: The page to write
195 * @wbc: The writeback control
197 * This is shared between writepage and writepages and implements the
198 * core of the writepage operation. If a transaction is required then
199 * PageChecked will have been set and the transaction will have
200 * already been started before this is called.
203 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
205 struct inode *inode = page->mapping->host;
206 struct gfs2_inode *ip = GFS2_I(inode);
207 struct gfs2_sbd *sdp = GFS2_SB(inode);
209 if (PageChecked(page)) {
210 ClearPageChecked(page);
211 if (!page_has_buffers(page)) {
212 create_empty_buffers(page, inode->i_sb->s_blocksize,
213 (1 << BH_Dirty)|(1 << BH_Uptodate));
215 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
217 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
221 * gfs2_jdata_writepage - Write complete page
222 * @page: Page to write
228 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
230 struct inode *inode = page->mapping->host;
231 struct gfs2_sbd *sdp = GFS2_SB(inode);
235 error = gfs2_writepage_common(page, wbc);
239 if (PageChecked(page)) {
240 if (wbc->sync_mode != WB_SYNC_ALL)
242 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
247 error = __gfs2_jdata_writepage(page, wbc);
253 redirty_page_for_writepage(wbc, page);
259 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
260 * @mapping: The mapping to write
261 * @wbc: Write-back control
263 * For the data=writeback case we can already ignore buffer heads
264 * and write whole extents at once. This is a big reduction in the
265 * number of I/O requests we send and the bmap calls we make in this case.
267 static int gfs2_writeback_writepages(struct address_space *mapping,
268 struct writeback_control *wbc)
270 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
274 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
275 * @mapping: The mapping
276 * @wbc: The writeback control
277 * @writepage: The writepage function to call for each page
278 * @pvec: The vector of pages
279 * @nr_pages: The number of pages to write
281 * Returns: non-zero if loop should terminate, zero otherwise
284 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
285 struct writeback_control *wbc,
286 struct pagevec *pvec,
287 int nr_pages, pgoff_t end)
289 struct inode *inode = mapping->host;
290 struct gfs2_sbd *sdp = GFS2_SB(inode);
291 loff_t i_size = i_size_read(inode);
292 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
293 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
294 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
295 struct backing_dev_info *bdi = mapping->backing_dev_info;
299 ret = gfs2_trans_begin(sdp, nrblocks, 0);
303 for(i = 0; i < nr_pages; i++) {
304 struct page *page = pvec->pages[i];
308 if (unlikely(page->mapping != mapping)) {
313 if (!wbc->range_cyclic && page->index > end) {
319 if (wbc->sync_mode != WB_SYNC_NONE)
320 wait_on_page_writeback(page);
322 if (PageWriteback(page) ||
323 !clear_page_dirty_for_io(page)) {
328 /* Is the page fully outside i_size? (truncate in progress) */
329 if (page->index > end_index || (page->index == end_index && !offset)) {
330 page->mapping->a_ops->invalidatepage(page, 0);
335 ret = __gfs2_jdata_writepage(page, wbc);
337 if (ret || (--(wbc->nr_to_write) <= 0))
339 if (wbc->nonblocking && bdi_write_congested(bdi)) {
340 wbc->encountered_congestion = 1;
350 * gfs2_write_cache_jdata - Like write_cache_pages but different
351 * @mapping: The mapping to write
352 * @wbc: The writeback control
353 * @writepage: The writepage function to call
354 * @data: The data to pass to writepage
356 * The reason that we use our own function here is that we need to
357 * start transactions before we grab page locks. This allows us
358 * to get the ordering right.
361 static int gfs2_write_cache_jdata(struct address_space *mapping,
362 struct writeback_control *wbc)
364 struct backing_dev_info *bdi = mapping->backing_dev_info;
374 if (wbc->nonblocking && bdi_write_congested(bdi)) {
375 wbc->encountered_congestion = 1;
379 pagevec_init(&pvec, 0);
380 if (wbc->range_cyclic) {
381 index = mapping->writeback_index; /* Start from prev offset */
384 index = wbc->range_start >> PAGE_CACHE_SHIFT;
385 end = wbc->range_end >> PAGE_CACHE_SHIFT;
386 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
392 while (!done && (index <= end) &&
393 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
395 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
397 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
403 pagevec_release(&pvec);
407 if (!scanned && !done) {
409 * We hit the last page and there is more work to be done: wrap
410 * back to the start of the file
417 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
418 mapping->writeback_index = index;
424 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
425 * @mapping: The mapping to write
426 * @wbc: The writeback control
430 static int gfs2_jdata_writepages(struct address_space *mapping,
431 struct writeback_control *wbc)
433 struct gfs2_inode *ip = GFS2_I(mapping->host);
434 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
437 ret = gfs2_write_cache_jdata(mapping, wbc);
438 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
439 gfs2_log_flush(sdp, ip->i_gl);
440 ret = gfs2_write_cache_jdata(mapping, wbc);
446 * stuffed_readpage - Fill in a Linux page with stuffed file data
453 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
455 struct buffer_head *dibh;
460 * Due to the order of unstuffing files and ->nopage(), we can be
461 * asked for a zero page in the case of a stuffed file being extended,
462 * so we need to supply one here. It doesn't happen often.
464 if (unlikely(page->index)) {
465 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
469 error = gfs2_meta_inode_buffer(ip, &dibh);
473 kaddr = kmap_atomic(page, KM_USER0);
474 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
476 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
477 kunmap_atomic(kaddr, KM_USER0);
478 flush_dcache_page(page);
480 SetPageUptodate(page);
487 * __gfs2_readpage - readpage
488 * @file: The file to read a page for
489 * @page: The page to read
491 * This is the core of gfs2's readpage. Its used by the internal file
492 * reading code as in that case we already hold the glock. Also its
493 * called by gfs2_readpage() once the required lock has been granted.
497 static int __gfs2_readpage(void *file, struct page *page)
499 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
500 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
503 if (gfs2_is_stuffed(ip)) {
504 error = stuffed_readpage(ip, page);
507 error = mpage_readpage(page, gfs2_get_block);
510 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
517 * gfs2_readpage - read a page of a file
518 * @file: The file to read
519 * @page: The page of the file
521 * This deals with the locking required. We use a trylock in order to
522 * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
523 * in the event that we are unable to get the lock.
526 static int gfs2_readpage(struct file *file, struct page *page)
528 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
529 struct gfs2_holder gh;
532 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
533 error = gfs2_glock_nq_atime(&gh);
534 if (unlikely(error)) {
538 error = __gfs2_readpage(file, page);
541 gfs2_holder_uninit(&gh);
542 if (error == GLR_TRYFAILED) {
544 return AOP_TRUNCATED_PAGE;
550 * gfs2_internal_read - read an internal file
551 * @ip: The gfs2 inode
552 * @ra_state: The readahead state (or NULL for no readahead)
553 * @buf: The buffer to fill
554 * @pos: The file position
555 * @size: The amount to read
559 int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
560 char *buf, loff_t *pos, unsigned size)
562 struct address_space *mapping = ip->i_inode.i_mapping;
563 unsigned long index = *pos / PAGE_CACHE_SIZE;
564 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
572 if (offset + size > PAGE_CACHE_SIZE)
573 amt = PAGE_CACHE_SIZE - offset;
574 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
576 return PTR_ERR(page);
577 p = kmap_atomic(page, KM_USER0);
578 memcpy(buf + copied, p + offset, amt);
579 kunmap_atomic(p, KM_USER0);
580 mark_page_accessed(page);
581 page_cache_release(page);
585 } while(copied < size);
591 * gfs2_readpages - Read a bunch of pages at once
594 * 1. This is only for readahead, so we can simply ignore any things
595 * which are slightly inconvenient (such as locking conflicts between
596 * the page lock and the glock) and return having done no I/O. Its
597 * obviously not something we'd want to do on too regular a basis.
598 * Any I/O we ignore at this time will be done via readpage later.
599 * 2. We don't handle stuffed files here we let readpage do the honours.
600 * 3. mpage_readpages() does most of the heavy lifting in the common case.
601 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
604 static int gfs2_readpages(struct file *file, struct address_space *mapping,
605 struct list_head *pages, unsigned nr_pages)
607 struct inode *inode = mapping->host;
608 struct gfs2_inode *ip = GFS2_I(inode);
609 struct gfs2_sbd *sdp = GFS2_SB(inode);
610 struct gfs2_holder gh;
613 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
614 ret = gfs2_glock_nq_atime(&gh);
617 if (!gfs2_is_stuffed(ip))
618 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
621 gfs2_holder_uninit(&gh);
622 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
628 * gfs2_write_begin - Begin to write to a file
629 * @file: The file to write to
630 * @mapping: The mapping in which to write
631 * @pos: The file offset at which to start writing
632 * @len: Length of the write
633 * @flags: Various flags
634 * @pagep: Pointer to return the page
635 * @fsdata: Pointer to return fs data (unused by GFS2)
640 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
641 loff_t pos, unsigned len, unsigned flags,
642 struct page **pagep, void **fsdata)
644 struct gfs2_inode *ip = GFS2_I(mapping->host);
645 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
646 unsigned int data_blocks, ind_blocks, rblocks;
649 struct gfs2_alloc *al;
650 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
651 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
652 unsigned to = from + len;
655 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
656 error = gfs2_glock_nq_atime(&ip->i_gh);
661 page = __grab_cache_page(mapping, index);
666 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
668 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
673 ip->i_alloc.al_requested = 0;
674 if (alloc_required) {
675 al = gfs2_alloc_get(ip);
677 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
681 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
685 al->al_requested = data_blocks + ind_blocks;
686 error = gfs2_inplace_reserve(ip);
691 rblocks = RES_DINODE + ind_blocks;
692 if (gfs2_is_jdata(ip))
693 rblocks += data_blocks ? data_blocks : 1;
694 if (ind_blocks || data_blocks)
695 rblocks += RES_STATFS + RES_QUOTA;
697 error = gfs2_trans_begin(sdp, rblocks,
698 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
702 if (gfs2_is_stuffed(ip)) {
703 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
704 error = gfs2_unstuff_dinode(ip, page);
707 } else if (!PageUptodate(page))
708 error = stuffed_readpage(ip, page);
713 error = block_prepare_write(page, from, to, gfs2_get_block);
719 if (alloc_required) {
720 gfs2_inplace_release(ip);
722 gfs2_quota_unlock(ip);
727 page_cache_release(page);
728 if (pos + len > ip->i_inode.i_size)
729 vmtruncate(&ip->i_inode, ip->i_inode.i_size);
731 gfs2_glock_dq_m(1, &ip->i_gh);
733 gfs2_holder_uninit(&ip->i_gh);
740 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
741 * @inode: the rindex inode
743 static void adjust_fs_space(struct inode *inode)
745 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
746 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
747 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
748 u64 fs_total, new_free;
750 /* Total up the file system space, according to the latest rindex. */
751 fs_total = gfs2_ri_total(sdp);
753 spin_lock(&sdp->sd_statfs_spin);
754 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
755 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
758 spin_unlock(&sdp->sd_statfs_spin);
759 fs_warn(sdp, "File system extended by %llu blocks.\n",
760 (unsigned long long)new_free);
761 gfs2_statfs_change(sdp, new_free, new_free, 0);
765 * gfs2_stuffed_write_end - Write end for stuffed files
767 * @dibh: The buffer_head containing the on-disk inode
768 * @pos: The file position
769 * @len: The length of the write
770 * @copied: How much was actually copied by the VFS
773 * This copies the data from the page into the inode block after
774 * the inode data structure itself.
778 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
779 loff_t pos, unsigned len, unsigned copied,
782 struct gfs2_inode *ip = GFS2_I(inode);
783 struct gfs2_sbd *sdp = GFS2_SB(inode);
784 u64 to = pos + copied;
786 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
787 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
789 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
790 kaddr = kmap_atomic(page, KM_USER0);
791 memcpy(buf + pos, kaddr + pos, copied);
792 memset(kaddr + pos + copied, 0, len - copied);
793 flush_dcache_page(page);
794 kunmap_atomic(kaddr, KM_USER0);
796 if (!PageUptodate(page))
797 SetPageUptodate(page);
799 page_cache_release(page);
801 if (inode->i_size < to) {
802 i_size_write(inode, to);
803 ip->i_di.di_size = inode->i_size;
804 di->di_size = cpu_to_be64(inode->i_size);
805 mark_inode_dirty(inode);
808 if (inode == sdp->sd_rindex)
809 adjust_fs_space(inode);
813 gfs2_glock_dq(&ip->i_gh);
814 gfs2_holder_uninit(&ip->i_gh);
820 * @file: The file to write to
821 * @mapping: The address space to write to
822 * @pos: The file position
823 * @len: The length of the data
825 * @page: The page that has been written
826 * @fsdata: The fsdata (unused in GFS2)
828 * The main write_end function for GFS2. We have a separate one for
829 * stuffed files as they are slightly different, otherwise we just
830 * put our locking around the VFS provided functions.
835 static int gfs2_write_end(struct file *file, struct address_space *mapping,
836 loff_t pos, unsigned len, unsigned copied,
837 struct page *page, void *fsdata)
839 struct inode *inode = page->mapping->host;
840 struct gfs2_inode *ip = GFS2_I(inode);
841 struct gfs2_sbd *sdp = GFS2_SB(inode);
842 struct buffer_head *dibh;
843 struct gfs2_alloc *al = &ip->i_alloc;
844 struct gfs2_dinode *di;
845 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
846 unsigned int to = from + len;
849 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == 0);
851 ret = gfs2_meta_inode_buffer(ip, &dibh);
854 page_cache_release(page);
858 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
860 if (gfs2_is_stuffed(ip))
861 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
863 if (!gfs2_is_writeback(ip))
864 gfs2_page_add_databufs(ip, page, from, to);
866 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
868 if (likely(ret >= 0)) {
870 if ((pos + copied) > inode->i_size) {
871 di = (struct gfs2_dinode *)dibh->b_data;
872 ip->i_di.di_size = inode->i_size;
873 di->di_size = cpu_to_be64(inode->i_size);
874 mark_inode_dirty(inode);
878 if (inode == sdp->sd_rindex)
879 adjust_fs_space(inode);
884 if (al->al_requested) {
885 gfs2_inplace_release(ip);
886 gfs2_quota_unlock(ip);
889 gfs2_glock_dq(&ip->i_gh);
890 gfs2_holder_uninit(&ip->i_gh);
895 * gfs2_set_page_dirty - Page dirtying function
896 * @page: The page to dirty
898 * Returns: 1 if it dirtyed the page, or 0 otherwise
901 static int gfs2_set_page_dirty(struct page *page)
903 SetPageChecked(page);
904 return __set_page_dirty_buffers(page);
908 * gfs2_bmap - Block map function
909 * @mapping: Address space info
910 * @lblock: The block to map
912 * Returns: The disk address for the block or 0 on hole or error
915 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
917 struct gfs2_inode *ip = GFS2_I(mapping->host);
918 struct gfs2_holder i_gh;
922 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
926 if (!gfs2_is_stuffed(ip))
927 dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
929 gfs2_glock_dq_uninit(&i_gh);
934 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
936 struct gfs2_bufdata *bd;
940 clear_buffer_dirty(bh);
943 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
944 list_del_init(&bd->bd_le.le_list);
946 gfs2_remove_from_journal(bh, current->journal_info, 0);
949 clear_buffer_mapped(bh);
950 clear_buffer_req(bh);
951 clear_buffer_new(bh);
952 gfs2_log_unlock(sdp);
956 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
958 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
959 struct buffer_head *bh, *head;
960 unsigned long pos = 0;
962 BUG_ON(!PageLocked(page));
964 ClearPageChecked(page);
965 if (!page_has_buffers(page))
968 bh = head = page_buffers(page);
971 gfs2_discard(sdp, bh);
973 bh = bh->b_this_page;
974 } while (bh != head);
977 try_to_release_page(page, 0);
981 * gfs2_ok_for_dio - check that dio is valid on this file
984 * @offset: The offset at which we are reading or writing
986 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
987 * 1 (to accept the i/o request)
989 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
992 * Should we return an error here? I can't see that O_DIRECT for
993 * a stuffed file makes any sense. For now we'll silently fall
994 * back to buffered I/O
996 if (gfs2_is_stuffed(ip))
999 if (offset > i_size_read(&ip->i_inode))
1006 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
1007 const struct iovec *iov, loff_t offset,
1008 unsigned long nr_segs)
1010 struct file *file = iocb->ki_filp;
1011 struct inode *inode = file->f_mapping->host;
1012 struct gfs2_inode *ip = GFS2_I(inode);
1013 struct gfs2_holder gh;
1017 * Deferred lock, even if its a write, since we do no allocation
1018 * on this path. All we need change is atime, and this lock mode
1019 * ensures that other nodes have flushed their buffered read caches
1020 * (i.e. their page cache entries for this inode). We do not,
1021 * unfortunately have the option of only flushing a range like
1024 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
1025 rv = gfs2_glock_nq_atime(&gh);
1028 rv = gfs2_ok_for_dio(ip, rw, offset);
1030 goto out; /* dio not valid, fall back to buffered i/o */
1032 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
1033 iov, offset, nr_segs,
1034 gfs2_get_block_direct, NULL);
1036 gfs2_glock_dq_m(1, &gh);
1037 gfs2_holder_uninit(&gh);
1042 * gfs2_releasepage - free the metadata associated with a page
1043 * @page: the page that's being released
1044 * @gfp_mask: passed from Linux VFS, ignored by us
1046 * Call try_to_free_buffers() if the buffers in this page can be
1052 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1054 struct inode *aspace = page->mapping->host;
1055 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
1056 struct buffer_head *bh, *head;
1057 struct gfs2_bufdata *bd;
1059 if (!page_has_buffers(page))
1063 head = bh = page_buffers(page);
1065 if (atomic_read(&bh->b_count))
1066 goto cannot_release;
1068 if (bd && bd->bd_ail)
1069 goto cannot_release;
1070 gfs2_assert_warn(sdp, !buffer_pinned(bh));
1071 gfs2_assert_warn(sdp, !buffer_dirty(bh));
1072 bh = bh->b_this_page;
1073 } while(bh != head);
1074 gfs2_log_unlock(sdp);
1076 head = bh = page_buffers(page);
1081 gfs2_assert_warn(sdp, bd->bd_bh == bh);
1082 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
1083 if (!list_empty(&bd->bd_le.le_list)) {
1084 if (!buffer_pinned(bh))
1085 list_del_init(&bd->bd_le.le_list);
1091 bh->b_private = NULL;
1093 gfs2_log_unlock(sdp);
1095 kmem_cache_free(gfs2_bufdata_cachep, bd);
1097 bh = bh->b_this_page;
1098 } while (bh != head);
1100 return try_to_free_buffers(page);
1102 gfs2_log_unlock(sdp);
1106 static const struct address_space_operations gfs2_writeback_aops = {
1107 .writepage = gfs2_writeback_writepage,
1108 .writepages = gfs2_writeback_writepages,
1109 .readpage = gfs2_readpage,
1110 .readpages = gfs2_readpages,
1111 .sync_page = block_sync_page,
1112 .write_begin = gfs2_write_begin,
1113 .write_end = gfs2_write_end,
1115 .invalidatepage = gfs2_invalidatepage,
1116 .releasepage = gfs2_releasepage,
1117 .direct_IO = gfs2_direct_IO,
1120 static const struct address_space_operations gfs2_ordered_aops = {
1121 .writepage = gfs2_ordered_writepage,
1122 .readpage = gfs2_readpage,
1123 .readpages = gfs2_readpages,
1124 .sync_page = block_sync_page,
1125 .write_begin = gfs2_write_begin,
1126 .write_end = gfs2_write_end,
1127 .set_page_dirty = gfs2_set_page_dirty,
1129 .invalidatepage = gfs2_invalidatepage,
1130 .releasepage = gfs2_releasepage,
1131 .direct_IO = gfs2_direct_IO,
1134 static const struct address_space_operations gfs2_jdata_aops = {
1135 .writepage = gfs2_jdata_writepage,
1136 .writepages = gfs2_jdata_writepages,
1137 .readpage = gfs2_readpage,
1138 .readpages = gfs2_readpages,
1139 .sync_page = block_sync_page,
1140 .write_begin = gfs2_write_begin,
1141 .write_end = gfs2_write_end,
1142 .set_page_dirty = gfs2_set_page_dirty,
1144 .invalidatepage = gfs2_invalidatepage,
1145 .releasepage = gfs2_releasepage,
1148 void gfs2_set_aops(struct inode *inode)
1150 struct gfs2_inode *ip = GFS2_I(inode);
1152 if (gfs2_is_writeback(ip))
1153 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1154 else if (gfs2_is_ordered(ip))
1155 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1156 else if (gfs2_is_jdata(ip))
1157 inode->i_mapping->a_ops = &gfs2_jdata_aops;