2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
39 #include "xfs_trans.h"
40 #include "xfs_dmapi.h"
41 #include "xfs_mount.h"
42 #include "xfs_bmap_btree.h"
43 #include "xfs_alloc_btree.h"
44 #include "xfs_ialloc_btree.h"
45 #include "xfs_alloc.h"
46 #include "xfs_btree.h"
47 #include "xfs_attr_sf.h"
48 #include "xfs_dir_sf.h"
49 #include "xfs_dir2_sf.h"
50 #include "xfs_dinode.h"
51 #include "xfs_inode.h"
52 #include "xfs_error.h"
54 #include "xfs_iomap.h"
55 #include <linux/mpage.h>
56 #include <linux/writeback.h>
58 STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
59 STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
60 struct writeback_control *wbc, void *, int, int);
62 #if defined(XFS_RW_TRACE)
72 vnode_t *vp = LINVFS_GET_VP(inode);
73 loff_t isize = i_size_read(inode);
74 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
75 int delalloc = -1, unmapped = -1, unwritten = -1;
77 if (page_has_buffers(page))
78 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
80 bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
85 ktrace_enter(ip->i_rwtrace,
86 (void *)((unsigned long)tag),
90 (void *)((unsigned long)mask),
91 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
92 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
93 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
94 (void *)((unsigned long)(isize & 0xffffffff)),
95 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(offset & 0xffffffff)),
97 (void *)((unsigned long)delalloc),
98 (void *)((unsigned long)unmapped),
99 (void *)((unsigned long)unwritten),
104 #define xfs_page_trace(tag, inode, page, mask)
108 * Schedule IO completion handling on a xfsdatad if this was
109 * the final hold on this ioend.
115 if (atomic_dec_and_test(&ioend->io_remaining))
116 queue_work(xfsdatad_workqueue, &ioend->io_work);
123 vn_iowake(ioend->io_vnode);
124 mempool_free(ioend, xfs_ioend_pool);
128 * Issue transactions to convert a buffer range from unwritten
129 * to written extents.
132 xfs_end_bio_unwritten(
135 xfs_ioend_t *ioend = data;
136 vnode_t *vp = ioend->io_vnode;
137 xfs_off_t offset = ioend->io_offset;
138 size_t size = ioend->io_size;
141 if (ioend->io_uptodate)
142 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
143 xfs_destroy_ioend(ioend);
147 * Allocate and initialise an IO completion structure.
148 * We need to track unwritten extent write completion here initially.
149 * We'll need to extend this for updating the ondisk inode size later
158 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
161 * Set the count to 1 initially, which will prevent an I/O
162 * completion callback from happening before we have started
163 * all the I/O from calling the completion routine too early.
165 atomic_set(&ioend->io_remaining, 1);
166 ioend->io_uptodate = 1; /* cleared if any I/O fails */
167 ioend->io_vnode = LINVFS_GET_VP(inode);
168 atomic_inc(&ioend->io_vnode->v_iocount);
169 ioend->io_offset = 0;
172 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
178 linvfs_unwritten_done(
179 struct buffer_head *bh,
182 xfs_ioend_t *ioend = bh->b_private;
184 ASSERT(buffer_unwritten(bh));
186 clear_buffer_unwritten(bh);
188 ioend->io_uptodate = 0;
190 xfs_finish_ioend(ioend);
191 end_buffer_async_write(bh, uptodate);
202 vnode_t *vp = LINVFS_GET_VP(inode);
203 int error, nmaps = 1;
205 VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
206 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)))
212 * Finds the corresponding mapping in block @map array of the
213 * given @offset within a @page.
219 unsigned long offset)
221 loff_t full_offset; /* offset from start of file */
223 ASSERT(offset < PAGE_CACHE_SIZE);
225 full_offset = page->index; /* NB: using 64bit number */
226 full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */
227 full_offset += offset; /* offset from page start */
229 if (full_offset < iomapp->iomap_offset)
231 if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
239 struct buffer_head *bh,
240 unsigned long offset,
248 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
249 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
250 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
253 delta <<= PAGE_CACHE_SHIFT;
255 delta -= iomapp->iomap_offset;
256 delta >>= block_bits;
258 sector_shift = block_bits - BBSHIFT;
259 bn = iomapp->iomap_bn >> sector_shift;
261 BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
262 ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
266 bh->b_bdev = iomapp->iomap_target->pbr_bdev;
267 set_buffer_mapped(bh);
268 clear_buffer_delay(bh);
272 * Look for a page at index which is unlocked and contains our
273 * unwritten extent flagged buffers at its head. Returns page
274 * locked and with an extra reference count, and length of the
275 * unwritten extent component on this page that we can write,
276 * in units of filesystem blocks.
279 xfs_probe_unwritten_page(
280 struct address_space *mapping,
284 unsigned long max_offset,
290 page = find_trylock_page(mapping, index);
293 if (PageWriteback(page))
296 if (page->mapping && page_has_buffers(page)) {
297 struct buffer_head *bh, *head;
298 unsigned long p_offset = 0;
301 bh = head = page_buffers(page);
303 if (!buffer_unwritten(bh) || !buffer_uptodate(bh))
305 if (!xfs_offset_to_map(page, iomapp, p_offset))
307 if (p_offset >= max_offset)
309 xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
310 set_buffer_unwritten_io(bh);
311 bh->b_private = ioend;
312 p_offset += bh->b_size;
314 } while ((bh = bh->b_this_page) != head);
326 * Look for a page at index which is unlocked and not mapped
327 * yet - clustering for mmap write case.
330 xfs_probe_unmapped_page(
331 struct address_space *mapping,
333 unsigned int pg_offset)
338 page = find_trylock_page(mapping, index);
341 if (PageWriteback(page))
344 if (page->mapping && PageDirty(page)) {
345 if (page_has_buffers(page)) {
346 struct buffer_head *bh, *head;
348 bh = head = page_buffers(page);
350 if (buffer_mapped(bh) || !buffer_uptodate(bh))
353 if (ret >= pg_offset)
355 } while ((bh = bh->b_this_page) != head);
357 ret = PAGE_CACHE_SIZE;
366 xfs_probe_unmapped_cluster(
368 struct page *startpage,
369 struct buffer_head *bh,
370 struct buffer_head *head)
372 pgoff_t tindex, tlast, tloff;
373 unsigned int pg_offset, len, total = 0;
374 struct address_space *mapping = inode->i_mapping;
376 /* First sum forwards in this page */
378 if (buffer_mapped(bh))
381 } while ((bh = bh->b_this_page) != head);
383 /* If we reached the end of the page, sum forwards in
387 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
388 /* Prune this back to avoid pathological behavior */
389 tloff = min(tlast, startpage->index + 64);
390 for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
391 len = xfs_probe_unmapped_page(mapping, tindex,
397 if (tindex == tlast &&
398 (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
399 total += xfs_probe_unmapped_page(mapping,
407 * Probe for a given page (index) in the inode and test if it is delayed
408 * and without unwritten buffers. Returns page locked and with an extra
412 xfs_probe_delalloc_page(
418 page = find_trylock_page(inode->i_mapping, index);
421 if (PageWriteback(page))
424 if (page->mapping && page_has_buffers(page)) {
425 struct buffer_head *bh, *head;
428 bh = head = page_buffers(page);
430 if (buffer_unwritten(bh)) {
433 } else if (buffer_delay(bh)) {
436 } while ((bh = bh->b_this_page) != head);
450 struct page *start_page,
451 struct buffer_head *head,
452 struct buffer_head *curr,
453 unsigned long p_offset,
456 struct writeback_control *wbc,
460 struct buffer_head *bh = curr;
464 unsigned long nblocks = 0;
466 offset = start_page->index;
467 offset <<= PAGE_CACHE_SHIFT;
470 ioend = xfs_alloc_ioend(inode);
472 /* First map forwards in the page consecutive buffers
473 * covering this unwritten extent
476 if (!buffer_unwritten(bh))
478 tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
481 xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
482 set_buffer_unwritten_io(bh);
483 bh->b_private = ioend;
484 p_offset += bh->b_size;
486 } while ((bh = bh->b_this_page) != head);
488 atomic_add(nblocks, &ioend->io_remaining);
490 /* If we reached the end of the page, map forwards in any
491 * following pages which are also covered by this extent.
494 struct address_space *mapping = inode->i_mapping;
495 pgoff_t tindex, tloff, tlast;
497 unsigned int pg_offset, bbits = inode->i_blkbits;
500 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
501 tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
502 tloff = min(tlast, tloff);
503 for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
504 page = xfs_probe_unwritten_page(mapping,
505 tindex, iomapp, ioend,
506 PAGE_CACHE_SIZE, &bs, bbits);
510 atomic_add(bs, &ioend->io_remaining);
511 xfs_convert_page(inode, page, iomapp, wbc, ioend,
513 /* stop if converting the next page might add
514 * enough blocks that the corresponding byte
515 * count won't fit in our ulong page buf length */
516 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
520 if (tindex == tlast &&
521 (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
522 page = xfs_probe_unwritten_page(mapping,
523 tindex, iomapp, ioend,
524 pg_offset, &bs, bbits);
527 atomic_add(bs, &ioend->io_remaining);
528 xfs_convert_page(inode, page, iomapp, wbc, ioend,
530 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
537 ioend->io_size = (xfs_off_t)nblocks << block_bits;
538 ioend->io_offset = offset;
539 xfs_finish_ioend(ioend);
546 struct writeback_control *wbc,
547 struct buffer_head *bh_arr[],
552 struct buffer_head *bh;
555 BUG_ON(PageWriteback(page));
557 set_page_writeback(page);
559 clear_page_dirty(page);
563 for (i = 0; i < bh_count; i++) {
565 mark_buffer_async_write(bh);
566 if (buffer_unwritten(bh))
567 set_buffer_unwritten_io(bh);
568 set_buffer_uptodate(bh);
569 clear_buffer_dirty(bh);
572 for (i = 0; i < bh_count; i++)
573 submit_bh(WRITE, bh_arr[i]);
575 if (probed_page && clear_dirty)
576 wbc->nr_to_write--; /* Wrote an "extra" page */
581 * Allocate & map buffers for page given the extent map. Write it out.
582 * except for the original page of a writepage, this is called on
583 * delalloc/unwritten pages only, for the original page it is possible
584 * that the page has no mapping at all.
591 struct writeback_control *wbc,
596 struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
597 xfs_iomap_t *mp = iomapp, *tmp;
598 unsigned long offset, end_offset;
600 int bbits = inode->i_blkbits;
603 end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
606 * page_dirty is initially a count of buffers on the page before
607 * EOF and is decrememted as we move each into a cleanable state.
609 len = 1 << inode->i_blkbits;
610 end_offset = max(end_offset, PAGE_CACHE_SIZE);
611 end_offset = roundup(end_offset, len);
612 page_dirty = end_offset / len;
615 bh = head = page_buffers(page);
617 if (offset >= end_offset)
619 if (!(PageUptodate(page) || buffer_uptodate(bh)))
621 if (buffer_mapped(bh) && all_bh &&
622 !(buffer_unwritten(bh) || buffer_delay(bh))) {
625 bh_arr[index++] = bh;
630 tmp = xfs_offset_to_map(page, mp, offset);
633 ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
634 ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
636 /* If this is a new unwritten extent buffer (i.e. one
637 * that we haven't passed in private data for, we must
638 * now map this buffer too.
640 if (buffer_unwritten(bh) && !bh->b_end_io) {
641 ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
642 xfs_map_unwritten(inode, page, head, bh, offset,
643 bbits, tmp, wbc, startio, all_bh);
644 } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
645 xfs_map_at_offset(page, bh, offset, bbits, tmp);
646 if (buffer_unwritten(bh)) {
647 set_buffer_unwritten_io(bh);
648 bh->b_private = private;
653 bh_arr[index++] = bh;
655 set_buffer_dirty(bh);
657 mark_buffer_dirty(bh);
660 } while (offset += len, (bh = bh->b_this_page) != head);
662 if (startio && index) {
663 xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
670 * Convert & write out a cluster of pages in the same extent as defined
671 * by mp and following the start page.
678 struct writeback_control *wbc,
685 for (; tindex <= tlast; tindex++) {
686 page = xfs_probe_delalloc_page(inode, tindex);
689 xfs_convert_page(inode, page, iomapp, wbc, NULL,
695 * Calling this without startio set means we are being asked to make a dirty
696 * page ready for freeing it's buffers. When called with startio set then
697 * we are coming from writepage.
699 * When called with startio set it is important that we write the WHOLE
701 * The bh->b_state's cannot know if any of the blocks or which block for
702 * that matter are dirty due to mmap writes, and therefore bh uptodate is
703 * only vaild if the page itself isn't completely uptodate. Some layers
704 * may clear the page dirty flag prior to calling write page, under the
705 * assumption the entire page will be written out; by not writing out the
706 * whole page the page can be reused before all valid dirty data is
707 * written out. Note: in the case of a page that has been dirty'd by
708 * mapwrite and but partially setup by block_prepare_write the
709 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
710 * valid state, thus the whole page must be written out thing.
714 xfs_page_state_convert(
717 struct writeback_control *wbc,
719 int unmapped) /* also implies page uptodate */
721 struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
722 xfs_iomap_t *iomp, iomap;
724 unsigned long p_offset = 0;
725 __uint64_t end_offset;
726 pgoff_t end_index, last_index, tlast;
727 int len, err, i, cnt = 0, uptodate = 1;
731 /* wait for other IO threads? */
732 flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK;
734 /* Is this page beyond the end of the file? */
735 offset = i_size_read(inode);
736 end_index = offset >> PAGE_CACHE_SHIFT;
737 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
738 if (page->index >= end_index) {
739 if ((page->index >= end_index + 1) ||
740 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
746 end_offset = min_t(unsigned long long,
747 (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
748 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
751 * page_dirty is initially a count of buffers on the page before
752 * EOF and is decrememted as we move each into a cleanable state.
754 len = 1 << inode->i_blkbits;
755 p_offset = max(p_offset, PAGE_CACHE_SIZE);
756 p_offset = roundup(p_offset, len);
757 page_dirty = p_offset / len;
761 bh = head = page_buffers(page);
764 if (offset >= end_offset)
766 if (!buffer_uptodate(bh))
768 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
772 iomp = xfs_offset_to_map(page, &iomap, p_offset);
776 * First case, map an unwritten extent and prepare for
777 * extent state conversion transaction on completion.
779 if (buffer_unwritten(bh)) {
783 err = xfs_map_blocks(inode, offset, len, &iomap,
784 BMAPI_READ|BMAPI_IGNSTATE);
788 iomp = xfs_offset_to_map(page, &iomap,
793 err = xfs_map_unwritten(inode, page,
795 inode->i_blkbits, iomp,
796 wbc, startio, unmapped);
801 set_bit(BH_Lock, &bh->b_state);
803 BUG_ON(!buffer_locked(bh));
808 * Second case, allocate space for a delalloc buffer.
809 * We can return EAGAIN here in the release page case.
811 } else if (buffer_delay(bh)) {
813 err = xfs_map_blocks(inode, offset, len, &iomap,
814 BMAPI_ALLOCATE | flags);
818 iomp = xfs_offset_to_map(page, &iomap,
822 xfs_map_at_offset(page, bh, p_offset,
823 inode->i_blkbits, iomp);
827 set_buffer_dirty(bh);
829 mark_buffer_dirty(bh);
833 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
834 (unmapped || startio)) {
836 if (!buffer_mapped(bh)) {
840 * Getting here implies an unmapped buffer
841 * was found, and we are in a path where we
842 * need to write the whole page out.
845 size = xfs_probe_unmapped_cluster(
846 inode, page, bh, head);
847 err = xfs_map_blocks(inode, offset,
849 BMAPI_WRITE|BMAPI_MMAP);
853 iomp = xfs_offset_to_map(page, &iomap,
857 xfs_map_at_offset(page,
859 inode->i_blkbits, iomp);
863 set_buffer_dirty(bh);
865 mark_buffer_dirty(bh);
869 } else if (startio) {
870 if (buffer_uptodate(bh) &&
871 !test_and_set_bit(BH_Lock, &bh->b_state)) {
877 } while (offset += len, p_offset += len,
878 ((bh = bh->b_this_page) != head));
880 if (uptodate && bh == head)
881 SetPageUptodate(page);
884 xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
888 offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
890 tlast = min_t(pgoff_t, offset, last_index);
891 xfs_cluster_write(inode, page->index + 1, iomp, wbc,
892 startio, unmapped, tlast);
898 for (i = 0; i < cnt; i++) {
899 unlock_buffer(bh_arr[i]);
903 * If it's delalloc and we have nowhere to put it,
904 * throw it away, unless the lower layers told
907 if (err != -EAGAIN) {
909 block_invalidatepage(page, 0);
911 ClearPageUptodate(page);
920 unsigned long blocks,
921 struct buffer_head *bh_result,
926 vnode_t *vp = LINVFS_GET_VP(inode);
931 loff_t offset = (loff_t)iblock << inode->i_blkbits;
934 size = blocks << inode->i_blkbits;
936 size = 1 << inode->i_blkbits;
938 VOP_BMAP(vp, offset, size,
939 create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
946 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
950 /* For unwritten extents do not report a disk address on
951 * the read case (treat as if we're reading into a hole).
953 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
954 delta = offset - iomap.iomap_offset;
955 delta >>= inode->i_blkbits;
957 bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
959 BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME));
960 bh_result->b_blocknr = bn;
961 set_buffer_mapped(bh_result);
963 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
965 bh_result->b_private = inode;
966 set_buffer_unwritten(bh_result);
967 set_buffer_delay(bh_result);
971 /* If this is a realtime file, data might be on a new device */
972 bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
974 /* If we previously allocated a block out beyond eof and
975 * we are now coming back to use it then we will need to
976 * flag it as new even if it has a disk address.
979 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
980 (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) {
981 set_buffer_new(bh_result);
984 if (iomap.iomap_flags & IOMAP_DELAY) {
987 set_buffer_uptodate(bh_result);
988 set_buffer_mapped(bh_result);
989 set_buffer_delay(bh_result);
994 bh_result->b_size = (ssize_t)min(
995 (loff_t)(iomap.iomap_bsize - iomap.iomap_delta),
996 (loff_t)(blocks << inode->i_blkbits));
1004 struct inode *inode,
1006 struct buffer_head *bh_result,
1009 return __linvfs_get_block(inode, iblock, 0, bh_result,
1010 create, 0, BMAPI_WRITE);
1014 linvfs_get_blocks_direct(
1015 struct inode *inode,
1017 unsigned long max_blocks,
1018 struct buffer_head *bh_result,
1021 return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
1022 create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1026 linvfs_end_io_direct(
1032 xfs_ioend_t *ioend = iocb->private;
1035 * Non-NULL private data means we need to issue a transaction to
1036 * convert a range from unwritten to written extents. This needs
1037 * to happen from process contect but aio+dio I/O completion
1038 * happens from irq context so we need to defer it to a workqueue.
1039 * This is not nessecary for synchronous direct I/O, but we do
1040 * it anyway to keep the code uniform and simpler.
1042 * The core direct I/O code might be changed to always call the
1043 * completion handler in the future, in which case all this can
1046 if (private && size > 0) {
1047 ioend->io_offset = offset;
1048 ioend->io_size = size;
1049 xfs_finish_ioend(ioend);
1052 xfs_destroy_ioend(ioend);
1056 * blockdev_direct_IO can return an error even afer the I/O
1057 * completion handler was called. Thus we need to protect
1058 * against double-freeing.
1060 iocb->private = NULL;
1067 const struct iovec *iov,
1069 unsigned long nr_segs)
1071 struct file *file = iocb->ki_filp;
1072 struct inode *inode = file->f_mapping->host;
1073 vnode_t *vp = LINVFS_GET_VP(inode);
1079 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
1083 iocb->private = xfs_alloc_ioend(inode);
1085 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1086 iomap.iomap_target->pbr_bdev,
1087 iov, offset, nr_segs,
1088 linvfs_get_blocks_direct,
1089 linvfs_end_io_direct);
1091 if (unlikely(ret <= 0 && iocb->private))
1092 xfs_destroy_ioend(iocb->private);
1099 struct address_space *mapping,
1102 struct inode *inode = (struct inode *)mapping->host;
1103 vnode_t *vp = LINVFS_GET_VP(inode);
1106 vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
1108 VOP_RWLOCK(vp, VRWLOCK_READ);
1109 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
1110 VOP_RWUNLOCK(vp, VRWLOCK_READ);
1111 return generic_block_bmap(mapping, block, linvfs_get_block);
1116 struct file *unused,
1119 return mpage_readpage(page, linvfs_get_block);
1124 struct file *unused,
1125 struct address_space *mapping,
1126 struct list_head *pages,
1129 return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
1133 xfs_count_page_state(
1139 struct buffer_head *bh, *head;
1141 *delalloc = *unmapped = *unwritten = 0;
1143 bh = head = page_buffers(page);
1145 if (buffer_uptodate(bh) && !buffer_mapped(bh))
1147 else if (buffer_unwritten(bh) && !buffer_delay(bh))
1148 clear_buffer_unwritten(bh);
1149 else if (buffer_unwritten(bh))
1151 else if (buffer_delay(bh))
1153 } while ((bh = bh->b_this_page) != head);
1158 * writepage: Called from one of two places:
1160 * 1. we are flushing a delalloc buffer head.
1162 * 2. we are writing out a dirty page. Typically the page dirty
1163 * state is cleared before we get here. In this case is it
1164 * conceivable we have no buffer heads.
1166 * For delalloc space on the page we need to allocate space and
1167 * flush it. For unmapped buffer heads on the page we should
1168 * allocate space if the page is uptodate. For any other dirty
1169 * buffer heads on the page we should flush them.
1171 * If we detect that a transaction would be required to flush
1172 * the page, we have to check the process flags first, if we
1173 * are already in a transaction or disk I/O during allocations
1174 * is off, we need to fail the writepage and redirty the page.
1180 struct writeback_control *wbc)
1184 int delalloc, unmapped, unwritten;
1185 struct inode *inode = page->mapping->host;
1187 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1190 * We need a transaction if:
1191 * 1. There are delalloc buffers on the page
1192 * 2. The page is uptodate and we have unmapped buffers
1193 * 3. The page is uptodate and we have no buffers
1194 * 4. There are unwritten buffers on the page
1197 if (!page_has_buffers(page)) {
1201 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1202 if (!PageUptodate(page))
1204 need_trans = delalloc + unmapped + unwritten;
1208 * If we need a transaction and the process flags say
1209 * we are already in a transaction, or no IO is allowed
1210 * then mark the page dirty again and leave the page
1213 if (PFLAGS_TEST_FSTRANS() && need_trans)
1217 * Delay hooking up buffer heads until we have
1218 * made our go/no-go decision.
1220 if (!page_has_buffers(page))
1221 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1224 * Convert delayed allocate, unwritten or unmapped space
1225 * to real space and flush out to disk.
1227 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1228 if (error == -EAGAIN)
1230 if (unlikely(error < 0))
1236 redirty_page_for_writepage(wbc, page);
1245 linvfs_invalidate_page(
1247 unsigned long offset)
1249 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1250 page->mapping->host, page, offset);
1251 return block_invalidatepage(page, offset);
1255 * Called to move a page into cleanable state - and from there
1256 * to be released. Possibly the page is already clean. We always
1257 * have buffer heads in this call.
1259 * Returns 0 if the page is ok to release, 1 otherwise.
1261 * Possible scenarios are:
1263 * 1. We are being called to release a page which has been written
1264 * to via regular I/O. buffer heads will be dirty and possibly
1265 * delalloc. If no delalloc buffer heads in this case then we
1266 * can just return zero.
1268 * 2. We are called to release a page which has been written via
1269 * mmap, all we need to do is ensure there is no delalloc
1270 * state in the buffer heads, if not we can let the caller
1271 * free them and we should come back later via writepage.
1274 linvfs_release_page(
1278 struct inode *inode = page->mapping->host;
1279 int dirty, delalloc, unmapped, unwritten;
1280 struct writeback_control wbc = {
1281 .sync_mode = WB_SYNC_ALL,
1285 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
1287 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1288 if (!delalloc && !unwritten)
1291 if (!(gfp_mask & __GFP_FS))
1294 /* If we are already inside a transaction or the thread cannot
1295 * do I/O, we cannot release this page.
1297 if (PFLAGS_TEST_FSTRANS())
1301 * Convert delalloc space to real space, do not flush the
1302 * data out to disk, that will be done by the caller.
1303 * Never need to allocate space here - we will always
1304 * come back to writepage in that case.
1306 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1307 if (dirty == 0 && !unwritten)
1312 return try_to_free_buffers(page);
1316 linvfs_prepare_write(
1322 return block_prepare_write(page, from, to, linvfs_get_block);
1325 struct address_space_operations linvfs_aops = {
1326 .readpage = linvfs_readpage,
1327 .readpages = linvfs_readpages,
1328 .writepage = linvfs_writepage,
1329 .sync_page = block_sync_page,
1330 .releasepage = linvfs_release_page,
1331 .invalidatepage = linvfs_invalidate_page,
1332 .prepare_write = linvfs_prepare_write,
1333 .commit_write = generic_commit_write,
1334 .bmap = linvfs_bmap,
1335 .direct_IO = linvfs_direct_IO,