2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/log2.h>
22 #include "xfs_types.h"
27 #include "xfs_trans.h"
28 #include "xfs_trans_priv.h"
32 #include "xfs_dmapi.h"
33 #include "xfs_mount.h"
34 #include "xfs_bmap_btree.h"
35 #include "xfs_alloc_btree.h"
36 #include "xfs_ialloc_btree.h"
37 #include "xfs_dir2_sf.h"
38 #include "xfs_attr_sf.h"
39 #include "xfs_dinode.h"
40 #include "xfs_inode.h"
41 #include "xfs_buf_item.h"
42 #include "xfs_inode_item.h"
43 #include "xfs_btree.h"
44 #include "xfs_alloc.h"
45 #include "xfs_ialloc.h"
48 #include "xfs_error.h"
49 #include "xfs_utils.h"
50 #include "xfs_dir2_trace.h"
51 #include "xfs_quota.h"
53 #include "xfs_filestream.h"
54 #include "xfs_vnodeops.h"
56 kmem_zone_t *xfs_ifork_zone;
57 kmem_zone_t *xfs_inode_zone;
58 kmem_zone_t *xfs_icluster_zone;
61 * Used in xfs_itruncate(). This is the maximum number of extents
62 * freed from a file in a single transaction.
64 #define XFS_ITRUNC_MAX_EXTENTS 2
66 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
67 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
68 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
69 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
73 * Make sure that the extents in the given memory buffer
83 xfs_bmbt_rec_host_t rec;
86 for (i = 0; i < nrecs; i++) {
87 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
88 rec.l0 = get_unaligned(&ep->l0);
89 rec.l1 = get_unaligned(&ep->l1);
90 xfs_bmbt_get_all(&rec, &irec);
91 if (fmt == XFS_EXTFMT_NOSTATE)
92 ASSERT(irec.br_state == XFS_EXT_NORM);
96 #define xfs_validate_extents(ifp, nrecs, fmt)
100 * Check that none of the inode's in the buffer have a next
101 * unlinked field of 0.
113 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
115 for (i = 0; i < j; i++) {
116 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
117 i * mp->m_sb.sb_inodesize);
118 if (!dip->di_next_unlinked) {
119 xfs_fs_cmn_err(CE_ALERT, mp,
120 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.",
122 ASSERT(dip->di_next_unlinked);
129 * Find the buffer associated with the given inode map
130 * We do basic validation checks on the buffer once it has been
131 * retrieved from disk.
147 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
148 (int)imap->im_len, XFS_BUF_LOCK, &bp);
150 cmn_err(CE_WARN, "xfs_imap_to_bp: xfs_trans_read_buf()returned "
151 "an error %d on %s. Returning error.",
152 error, mp->m_fsname);
157 * Validate the magic number and version of every inode in the buffer
158 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
161 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog;
162 #else /* usual case */
166 for (i = 0; i < ni; i++) {
170 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
171 (i << mp->m_sb.sb_inodelog));
172 di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC &&
173 XFS_DINODE_GOOD_VERSION(dip->di_core.di_version);
174 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
175 XFS_ERRTAG_ITOBP_INOTOBP,
176 XFS_RANDOM_ITOBP_INOTOBP))) {
177 if (imap_flags & XFS_IMAP_BULKSTAT) {
178 xfs_trans_brelse(tp, bp);
179 return XFS_ERROR(EINVAL);
181 XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
182 XFS_ERRLEVEL_HIGH, mp, dip);
185 "Device %s - bad inode magic/vsn "
186 "daddr %lld #%d (magic=%x)",
187 XFS_BUFTARG_NAME(mp->m_ddev_targp),
188 (unsigned long long)imap->im_blkno, i,
189 be16_to_cpu(dip->di_core.di_magic));
191 xfs_trans_brelse(tp, bp);
192 return XFS_ERROR(EFSCORRUPTED);
196 xfs_inobp_check(mp, bp);
199 * Mark the buffer as an inode buffer now that it looks good
201 XFS_BUF_SET_VTYPE(bp, B_FS_INO);
208 * This routine is called to map an inode number within a file
209 * system to the buffer containing the on-disk version of the
210 * inode. It returns a pointer to the buffer containing the
211 * on-disk inode in the bpp parameter, and in the dip parameter
212 * it returns a pointer to the on-disk inode within that buffer.
214 * If a non-zero error is returned, then the contents of bpp and
215 * dipp are undefined.
217 * Use xfs_imap() to determine the size and location of the
218 * buffer to read from disk.
234 error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
238 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, 0);
242 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
244 *offset = imap.im_boffset;
250 * This routine is called to map an inode to the buffer containing
251 * the on-disk version of the inode. It returns a pointer to the
252 * buffer containing the on-disk inode in the bpp parameter, and in
253 * the dip parameter it returns a pointer to the on-disk inode within
256 * If a non-zero error is returned, then the contents of bpp and
257 * dipp are undefined.
259 * If the inode is new and has not yet been initialized, use xfs_imap()
260 * to determine the size and location of the buffer to read from disk.
261 * If the inode has already been mapped to its buffer and read in once,
262 * then use the mapping information stored in the inode rather than
263 * calling xfs_imap(). This allows us to avoid the overhead of looking
264 * at the inode btree for small block file systems (see xfs_dilocate()).
265 * We can tell whether the inode has been mapped in before by comparing
266 * its disk block address to 0. Only uninitialized inodes will have
267 * 0 for the disk block address.
283 if (ip->i_blkno == (xfs_daddr_t)0) {
285 error = xfs_imap(mp, tp, ip->i_ino, &imap,
286 XFS_IMAP_LOOKUP | imap_flags);
291 * Fill in the fields in the inode that will be used to
292 * map the inode to its buffer from now on.
294 ip->i_blkno = imap.im_blkno;
295 ip->i_len = imap.im_len;
296 ip->i_boffset = imap.im_boffset;
299 * We've already mapped the inode once, so just use the
300 * mapping that we saved the first time.
302 imap.im_blkno = ip->i_blkno;
303 imap.im_len = ip->i_len;
304 imap.im_boffset = ip->i_boffset;
306 ASSERT(bno == 0 || bno == imap.im_blkno);
308 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags);
312 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
318 * Move inode type and inode format specific information from the
319 * on-disk inode to the in-core inode. For fifos, devs, and sockets
320 * this means set if_rdev to the proper value. For files, directories,
321 * and symlinks this means to bring in the in-line data or extent
322 * pointers. For a file in B-tree format, only the root is immediately
323 * brought in-core. The rest will be in-lined in if_extents when it
324 * is first referenced (see xfs_iread_extents()).
331 xfs_attr_shortform_t *atp;
335 ip->i_df.if_ext_max =
336 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
339 if (unlikely(be32_to_cpu(dip->di_core.di_nextents) +
340 be16_to_cpu(dip->di_core.di_anextents) >
341 be64_to_cpu(dip->di_core.di_nblocks))) {
342 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
343 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
344 (unsigned long long)ip->i_ino,
345 (int)(be32_to_cpu(dip->di_core.di_nextents) +
346 be16_to_cpu(dip->di_core.di_anextents)),
348 be64_to_cpu(dip->di_core.di_nblocks));
349 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
351 return XFS_ERROR(EFSCORRUPTED);
354 if (unlikely(dip->di_core.di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
355 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
356 "corrupt dinode %Lu, forkoff = 0x%x.",
357 (unsigned long long)ip->i_ino,
358 dip->di_core.di_forkoff);
359 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
361 return XFS_ERROR(EFSCORRUPTED);
364 switch (ip->i_d.di_mode & S_IFMT) {
369 if (unlikely(dip->di_core.di_format != XFS_DINODE_FMT_DEV)) {
370 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
372 return XFS_ERROR(EFSCORRUPTED);
376 ip->i_df.if_u2.if_rdev = be32_to_cpu(dip->di_u.di_dev);
382 switch (dip->di_core.di_format) {
383 case XFS_DINODE_FMT_LOCAL:
385 * no local regular files yet
387 if (unlikely((be16_to_cpu(dip->di_core.di_mode) & S_IFMT) == S_IFREG)) {
388 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
390 "(local format for regular file).",
391 (unsigned long long) ip->i_ino);
392 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
395 return XFS_ERROR(EFSCORRUPTED);
398 di_size = be64_to_cpu(dip->di_core.di_size);
399 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
400 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
402 "(bad size %Ld for local inode).",
403 (unsigned long long) ip->i_ino,
404 (long long) di_size);
405 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
408 return XFS_ERROR(EFSCORRUPTED);
412 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
414 case XFS_DINODE_FMT_EXTENTS:
415 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
417 case XFS_DINODE_FMT_BTREE:
418 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
421 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
423 return XFS_ERROR(EFSCORRUPTED);
428 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
429 return XFS_ERROR(EFSCORRUPTED);
434 if (!XFS_DFORK_Q(dip))
436 ASSERT(ip->i_afp == NULL);
437 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
438 ip->i_afp->if_ext_max =
439 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
440 switch (dip->di_core.di_aformat) {
441 case XFS_DINODE_FMT_LOCAL:
442 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
443 size = be16_to_cpu(atp->hdr.totsize);
444 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
446 case XFS_DINODE_FMT_EXTENTS:
447 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
449 case XFS_DINODE_FMT_BTREE:
450 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
453 error = XFS_ERROR(EFSCORRUPTED);
457 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
459 xfs_idestroy_fork(ip, XFS_DATA_FORK);
465 * The file is in-lined in the on-disk inode.
466 * If it fits into if_inline_data, then copy
467 * it there, otherwise allocate a buffer for it
468 * and copy the data there. Either way, set
469 * if_data to point at the data.
470 * If we allocate a buffer for the data, make
471 * sure that its size is a multiple of 4 and
472 * record the real size in i_real_bytes.
485 * If the size is unreasonable, then something
486 * is wrong and we just bail out rather than crash in
487 * kmem_alloc() or memcpy() below.
489 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
490 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
492 "(bad size %d for local fork, size = %d).",
493 (unsigned long long) ip->i_ino, size,
494 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
495 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
497 return XFS_ERROR(EFSCORRUPTED);
499 ifp = XFS_IFORK_PTR(ip, whichfork);
502 ifp->if_u1.if_data = NULL;
503 else if (size <= sizeof(ifp->if_u2.if_inline_data))
504 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
506 real_size = roundup(size, 4);
507 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
509 ifp->if_bytes = size;
510 ifp->if_real_bytes = real_size;
512 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
513 ifp->if_flags &= ~XFS_IFEXTENTS;
514 ifp->if_flags |= XFS_IFINLINE;
519 * The file consists of a set of extents all
520 * of which fit into the on-disk inode.
521 * If there are few enough extents to fit into
522 * the if_inline_ext, then copy them there.
523 * Otherwise allocate a buffer for them and copy
524 * them into it. Either way, set if_extents
525 * to point at the extents.
539 ifp = XFS_IFORK_PTR(ip, whichfork);
540 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
541 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
544 * If the number of extents is unreasonable, then something
545 * is wrong and we just bail out rather than crash in
546 * kmem_alloc() or memcpy() below.
548 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
549 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
550 "corrupt inode %Lu ((a)extents = %d).",
551 (unsigned long long) ip->i_ino, nex);
552 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
554 return XFS_ERROR(EFSCORRUPTED);
557 ifp->if_real_bytes = 0;
559 ifp->if_u1.if_extents = NULL;
560 else if (nex <= XFS_INLINE_EXTS)
561 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
563 xfs_iext_add(ifp, 0, nex);
565 ifp->if_bytes = size;
567 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
568 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
569 for (i = 0; i < nex; i++, dp++) {
570 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
571 ep->l0 = be64_to_cpu(get_unaligned(&dp->l0));
572 ep->l1 = be64_to_cpu(get_unaligned(&dp->l1));
574 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
575 if (whichfork != XFS_DATA_FORK ||
576 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
577 if (unlikely(xfs_check_nostate_extents(
579 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
582 return XFS_ERROR(EFSCORRUPTED);
585 ifp->if_flags |= XFS_IFEXTENTS;
590 * The file has too many extents to fit into
591 * the inode, so they are in B-tree format.
592 * Allocate a buffer for the root of the B-tree
593 * and copy the root into it. The i_extents
594 * field will remain NULL until all of the
595 * extents are read in (when they are needed).
603 xfs_bmdr_block_t *dfp;
609 ifp = XFS_IFORK_PTR(ip, whichfork);
610 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
611 size = XFS_BMAP_BROOT_SPACE(dfp);
612 nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
615 * blow out if -- fork has less extents than can fit in
616 * fork (fork shouldn't be a btree format), root btree
617 * block has more records than can fit into the fork,
618 * or the number of extents is greater than the number of
621 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
622 || XFS_BMDR_SPACE_CALC(nrecs) >
623 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
624 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
625 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
626 "corrupt inode %Lu (btree).",
627 (unsigned long long) ip->i_ino);
628 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
630 return XFS_ERROR(EFSCORRUPTED);
633 ifp->if_broot_bytes = size;
634 ifp->if_broot = kmem_alloc(size, KM_SLEEP);
635 ASSERT(ifp->if_broot != NULL);
637 * Copy and convert from the on-disk structure
638 * to the in-memory structure.
640 xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
641 ifp->if_broot, size);
642 ifp->if_flags &= ~XFS_IFEXTENTS;
643 ifp->if_flags |= XFS_IFBROOT;
649 xfs_dinode_from_disk(
651 xfs_dinode_core_t *from)
653 to->di_magic = be16_to_cpu(from->di_magic);
654 to->di_mode = be16_to_cpu(from->di_mode);
655 to->di_version = from ->di_version;
656 to->di_format = from->di_format;
657 to->di_onlink = be16_to_cpu(from->di_onlink);
658 to->di_uid = be32_to_cpu(from->di_uid);
659 to->di_gid = be32_to_cpu(from->di_gid);
660 to->di_nlink = be32_to_cpu(from->di_nlink);
661 to->di_projid = be16_to_cpu(from->di_projid);
662 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
663 to->di_flushiter = be16_to_cpu(from->di_flushiter);
664 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
665 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
666 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
667 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
668 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
669 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
670 to->di_size = be64_to_cpu(from->di_size);
671 to->di_nblocks = be64_to_cpu(from->di_nblocks);
672 to->di_extsize = be32_to_cpu(from->di_extsize);
673 to->di_nextents = be32_to_cpu(from->di_nextents);
674 to->di_anextents = be16_to_cpu(from->di_anextents);
675 to->di_forkoff = from->di_forkoff;
676 to->di_aformat = from->di_aformat;
677 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
678 to->di_dmstate = be16_to_cpu(from->di_dmstate);
679 to->di_flags = be16_to_cpu(from->di_flags);
680 to->di_gen = be32_to_cpu(from->di_gen);
685 xfs_dinode_core_t *to,
686 xfs_icdinode_t *from)
688 to->di_magic = cpu_to_be16(from->di_magic);
689 to->di_mode = cpu_to_be16(from->di_mode);
690 to->di_version = from ->di_version;
691 to->di_format = from->di_format;
692 to->di_onlink = cpu_to_be16(from->di_onlink);
693 to->di_uid = cpu_to_be32(from->di_uid);
694 to->di_gid = cpu_to_be32(from->di_gid);
695 to->di_nlink = cpu_to_be32(from->di_nlink);
696 to->di_projid = cpu_to_be16(from->di_projid);
697 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
698 to->di_flushiter = cpu_to_be16(from->di_flushiter);
699 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
700 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
701 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
702 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
703 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
704 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
705 to->di_size = cpu_to_be64(from->di_size);
706 to->di_nblocks = cpu_to_be64(from->di_nblocks);
707 to->di_extsize = cpu_to_be32(from->di_extsize);
708 to->di_nextents = cpu_to_be32(from->di_nextents);
709 to->di_anextents = cpu_to_be16(from->di_anextents);
710 to->di_forkoff = from->di_forkoff;
711 to->di_aformat = from->di_aformat;
712 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
713 to->di_dmstate = cpu_to_be16(from->di_dmstate);
714 to->di_flags = cpu_to_be16(from->di_flags);
715 to->di_gen = cpu_to_be32(from->di_gen);
724 if (di_flags & XFS_DIFLAG_ANY) {
725 if (di_flags & XFS_DIFLAG_REALTIME)
726 flags |= XFS_XFLAG_REALTIME;
727 if (di_flags & XFS_DIFLAG_PREALLOC)
728 flags |= XFS_XFLAG_PREALLOC;
729 if (di_flags & XFS_DIFLAG_IMMUTABLE)
730 flags |= XFS_XFLAG_IMMUTABLE;
731 if (di_flags & XFS_DIFLAG_APPEND)
732 flags |= XFS_XFLAG_APPEND;
733 if (di_flags & XFS_DIFLAG_SYNC)
734 flags |= XFS_XFLAG_SYNC;
735 if (di_flags & XFS_DIFLAG_NOATIME)
736 flags |= XFS_XFLAG_NOATIME;
737 if (di_flags & XFS_DIFLAG_NODUMP)
738 flags |= XFS_XFLAG_NODUMP;
739 if (di_flags & XFS_DIFLAG_RTINHERIT)
740 flags |= XFS_XFLAG_RTINHERIT;
741 if (di_flags & XFS_DIFLAG_PROJINHERIT)
742 flags |= XFS_XFLAG_PROJINHERIT;
743 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
744 flags |= XFS_XFLAG_NOSYMLINKS;
745 if (di_flags & XFS_DIFLAG_EXTSIZE)
746 flags |= XFS_XFLAG_EXTSIZE;
747 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
748 flags |= XFS_XFLAG_EXTSZINHERIT;
749 if (di_flags & XFS_DIFLAG_NODEFRAG)
750 flags |= XFS_XFLAG_NODEFRAG;
751 if (di_flags & XFS_DIFLAG_FILESTREAM)
752 flags |= XFS_XFLAG_FILESTREAM;
762 xfs_icdinode_t *dic = &ip->i_d;
764 return _xfs_dic2xflags(dic->di_flags) |
765 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
772 xfs_dinode_core_t *dic = &dip->di_core;
774 return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) |
775 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
779 * Given a mount structure and an inode number, return a pointer
780 * to a newly allocated in-core inode corresponding to the given
783 * Initialize the inode's attributes and extent pointers if it
784 * already has them (it will not if the inode has no links).
800 ASSERT(xfs_inode_zone != NULL);
802 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
805 atomic_set(&ip->i_iocount, 0);
806 spin_lock_init(&ip->i_flags_lock);
809 * Get pointer's to the on-disk inode and the buffer containing it.
810 * If the inode number refers to a block outside the file system
811 * then xfs_itobp() will return NULL. In this case we should
812 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
813 * know that this is a new incore inode.
815 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags);
817 kmem_zone_free(xfs_inode_zone, ip);
822 * Initialize inode's trace buffers.
823 * Do this before xfs_iformat in case it adds entries.
825 #ifdef XFS_INODE_TRACE
826 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP);
828 #ifdef XFS_BMAP_TRACE
829 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
831 #ifdef XFS_BMBT_TRACE
832 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
835 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
837 #ifdef XFS_ILOCK_TRACE
838 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
840 #ifdef XFS_DIR2_TRACE
841 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
845 * If we got something that isn't an inode it means someone
846 * (nfs or dmi) has a stale handle.
848 if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC) {
849 kmem_zone_free(xfs_inode_zone, ip);
850 xfs_trans_brelse(tp, bp);
852 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
853 "dip->di_core.di_magic (0x%x) != "
854 "XFS_DINODE_MAGIC (0x%x)",
855 be16_to_cpu(dip->di_core.di_magic),
858 return XFS_ERROR(EINVAL);
862 * If the on-disk inode is already linked to a directory
863 * entry, copy all of the inode into the in-core inode.
864 * xfs_iformat() handles copying in the inode format
865 * specific information.
866 * Otherwise, just get the truly permanent information.
868 if (dip->di_core.di_mode) {
869 xfs_dinode_from_disk(&ip->i_d, &dip->di_core);
870 error = xfs_iformat(ip, dip);
872 kmem_zone_free(xfs_inode_zone, ip);
873 xfs_trans_brelse(tp, bp);
875 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
876 "xfs_iformat() returned error %d",
882 ip->i_d.di_magic = be16_to_cpu(dip->di_core.di_magic);
883 ip->i_d.di_version = dip->di_core.di_version;
884 ip->i_d.di_gen = be32_to_cpu(dip->di_core.di_gen);
885 ip->i_d.di_flushiter = be16_to_cpu(dip->di_core.di_flushiter);
887 * Make sure to pull in the mode here as well in
888 * case the inode is released without being used.
889 * This ensures that xfs_inactive() will see that
890 * the inode is already free and not try to mess
891 * with the uninitialized part of it.
895 * Initialize the per-fork minima and maxima for a new
896 * inode here. xfs_iformat will do it for old inodes.
898 ip->i_df.if_ext_max =
899 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
902 INIT_LIST_HEAD(&ip->i_reclaim);
905 * The inode format changed when we moved the link count and
906 * made it 32 bits long. If this is an old format inode,
907 * convert it in memory to look like a new one. If it gets
908 * flushed to disk we will convert back before flushing or
909 * logging it. We zero out the new projid field and the old link
910 * count field. We'll handle clearing the pad field (the remains
911 * of the old uuid field) when we actually convert the inode to
912 * the new format. We don't change the version number so that we
913 * can distinguish this from a real new format inode.
915 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
916 ip->i_d.di_nlink = ip->i_d.di_onlink;
917 ip->i_d.di_onlink = 0;
918 ip->i_d.di_projid = 0;
921 ip->i_delayed_blks = 0;
922 ip->i_size = ip->i_d.di_size;
925 * Mark the buffer containing the inode as something to keep
926 * around for a while. This helps to keep recently accessed
927 * meta-data in-core longer.
929 XFS_BUF_SET_REF(bp, XFS_INO_REF);
932 * Use xfs_trans_brelse() to release the buffer containing the
933 * on-disk inode, because it was acquired with xfs_trans_read_buf()
934 * in xfs_itobp() above. If tp is NULL, this is just a normal
935 * brelse(). If we're within a transaction, then xfs_trans_brelse()
936 * will only release the buffer if it is not dirty within the
937 * transaction. It will be OK to release the buffer in this case,
938 * because inodes on disk are never destroyed and we will be
939 * locking the new in-core inode before putting it in the hash
940 * table where other processes can find it. Thus we don't have
941 * to worry about the inode being changed just because we released
944 xfs_trans_brelse(tp, bp);
950 * Read in extents from a btree-format inode.
951 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
961 xfs_extnum_t nextents;
964 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
965 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
967 return XFS_ERROR(EFSCORRUPTED);
969 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
970 size = nextents * sizeof(xfs_bmbt_rec_t);
971 ifp = XFS_IFORK_PTR(ip, whichfork);
974 * We know that the size is valid (it's checked in iformat_btree)
976 ifp->if_lastex = NULLEXTNUM;
977 ifp->if_bytes = ifp->if_real_bytes = 0;
978 ifp->if_flags |= XFS_IFEXTENTS;
979 xfs_iext_add(ifp, 0, nextents);
980 error = xfs_bmap_read_extents(tp, ip, whichfork);
982 xfs_iext_destroy(ifp);
983 ifp->if_flags &= ~XFS_IFEXTENTS;
986 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
991 * Allocate an inode on disk and return a copy of its in-core version.
992 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
993 * appropriately within the inode. The uid and gid for the inode are
994 * set according to the contents of the given cred structure.
996 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
997 * has a free inode available, call xfs_iget()
998 * to obtain the in-core version of the allocated inode. Finally,
999 * fill in the inode and log its initial contents. In this case,
1000 * ialloc_context would be set to NULL and call_again set to false.
1002 * If xfs_dialloc() does not have an available inode,
1003 * it will replenish its supply by doing an allocation. Since we can
1004 * only do one allocation within a transaction without deadlocks, we
1005 * must commit the current transaction before returning the inode itself.
1006 * In this case, therefore, we will set call_again to true and return.
1007 * The caller should then commit the current transaction, start a new
1008 * transaction, and call xfs_ialloc() again to actually get the inode.
1010 * To ensure that some other process does not grab the inode that
1011 * was allocated during the first call to xfs_ialloc(), this routine
1012 * also returns the [locked] bp pointing to the head of the freelist
1013 * as ialloc_context. The caller should hold this buffer across
1014 * the commit and pass it back into this routine on the second call.
1016 * If we are allocating quota inodes, we do not have a parent inode
1017 * to attach to or associate with (i.e. pip == NULL) because they
1018 * are not linked into the directory structure - they are attached
1019 * directly to the superblock - and so have no parent.
1031 xfs_buf_t **ialloc_context,
1032 boolean_t *call_again,
1042 * Call the space management code to pick
1043 * the on-disk inode to be allocated.
1045 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1046 ialloc_context, call_again, &ino);
1050 if (*call_again || ino == NULLFSINO) {
1054 ASSERT(*ialloc_context == NULL);
1057 * Get the in-core inode with the lock held exclusively.
1058 * This is because we're setting fields here we need
1059 * to prevent others from looking at until we're done.
1061 error = xfs_trans_iget(tp->t_mountp, tp, ino,
1062 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1069 ip->i_d.di_mode = (__uint16_t)mode;
1070 ip->i_d.di_onlink = 0;
1071 ip->i_d.di_nlink = nlink;
1072 ASSERT(ip->i_d.di_nlink == nlink);
1073 ip->i_d.di_uid = current_fsuid(cr);
1074 ip->i_d.di_gid = current_fsgid(cr);
1075 ip->i_d.di_projid = prid;
1076 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1079 * If the superblock version is up to where we support new format
1080 * inodes and this is currently an old format inode, then change
1081 * the inode version number now. This way we only do the conversion
1082 * here rather than here and in the flush/logging code.
1084 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) &&
1085 ip->i_d.di_version == XFS_DINODE_VERSION_1) {
1086 ip->i_d.di_version = XFS_DINODE_VERSION_2;
1088 * We've already zeroed the old link count, the projid field,
1089 * and the pad field.
1094 * Project ids won't be stored on disk if we are using a version 1 inode.
1096 if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
1097 xfs_bump_ino_vers2(tp, ip);
1099 if (pip && XFS_INHERIT_GID(pip)) {
1100 ip->i_d.di_gid = pip->i_d.di_gid;
1101 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1102 ip->i_d.di_mode |= S_ISGID;
1107 * If the group ID of the new file does not match the effective group
1108 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1109 * (and only if the irix_sgid_inherit compatibility variable is set).
1111 if ((irix_sgid_inherit) &&
1112 (ip->i_d.di_mode & S_ISGID) &&
1113 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1114 ip->i_d.di_mode &= ~S_ISGID;
1117 ip->i_d.di_size = 0;
1119 ip->i_d.di_nextents = 0;
1120 ASSERT(ip->i_d.di_nblocks == 0);
1121 xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
1123 * di_gen will have been taken care of in xfs_iread.
1125 ip->i_d.di_extsize = 0;
1126 ip->i_d.di_dmevmask = 0;
1127 ip->i_d.di_dmstate = 0;
1128 ip->i_d.di_flags = 0;
1129 flags = XFS_ILOG_CORE;
1130 switch (mode & S_IFMT) {
1135 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1136 ip->i_df.if_u2.if_rdev = rdev;
1137 ip->i_df.if_flags = 0;
1138 flags |= XFS_ILOG_DEV;
1141 if (pip && xfs_inode_is_filestream(pip)) {
1142 error = xfs_filestream_associate(pip, ip);
1146 xfs_iflags_set(ip, XFS_IFILESTREAM);
1150 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1153 if ((mode & S_IFMT) == S_IFDIR) {
1154 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1155 di_flags |= XFS_DIFLAG_RTINHERIT;
1156 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1157 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1158 ip->i_d.di_extsize = pip->i_d.di_extsize;
1160 } else if ((mode & S_IFMT) == S_IFREG) {
1161 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1162 di_flags |= XFS_DIFLAG_REALTIME;
1163 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1164 di_flags |= XFS_DIFLAG_EXTSIZE;
1165 ip->i_d.di_extsize = pip->i_d.di_extsize;
1168 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1169 xfs_inherit_noatime)
1170 di_flags |= XFS_DIFLAG_NOATIME;
1171 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1173 di_flags |= XFS_DIFLAG_NODUMP;
1174 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1176 di_flags |= XFS_DIFLAG_SYNC;
1177 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1178 xfs_inherit_nosymlinks)
1179 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1180 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1181 di_flags |= XFS_DIFLAG_PROJINHERIT;
1182 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1183 xfs_inherit_nodefrag)
1184 di_flags |= XFS_DIFLAG_NODEFRAG;
1185 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1186 di_flags |= XFS_DIFLAG_FILESTREAM;
1187 ip->i_d.di_flags |= di_flags;
1191 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1192 ip->i_df.if_flags = XFS_IFEXTENTS;
1193 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1194 ip->i_df.if_u1.if_extents = NULL;
1200 * Attribute fork settings for new inode.
1202 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1203 ip->i_d.di_anextents = 0;
1206 * Log the new values stuffed into the inode.
1208 xfs_trans_log_inode(tp, ip, flags);
1210 /* now that we have an i_mode we can setup inode ops and unlock */
1211 xfs_initialize_vnode(tp->t_mountp, vp, ip);
1218 * Check to make sure that there are no blocks allocated to the
1219 * file beyond the size of the file. We don't check this for
1220 * files with fixed size extents or real time extents, but we
1221 * at least do it for regular files.
1230 xfs_fileoff_t map_first;
1232 xfs_bmbt_irec_t imaps[2];
1234 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1237 if (XFS_IS_REALTIME_INODE(ip))
1240 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
1244 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1246 * The filesystem could be shutting down, so bmapi may return
1249 if (xfs_bmapi(NULL, ip, map_first,
1251 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1253 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1256 ASSERT(nimaps == 1);
1257 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1262 * Calculate the last possible buffered byte in a file. This must
1263 * include data that was buffered beyond the EOF by the write code.
1264 * This also needs to deal with overflowing the xfs_fsize_t type
1265 * which can happen for sizes near the limit.
1267 * We also need to take into account any blocks beyond the EOF. It
1268 * may be the case that they were buffered by a write which failed.
1269 * In that case the pages will still be in memory, but the inode size
1270 * will never have been updated.
1277 xfs_fsize_t last_byte;
1278 xfs_fileoff_t last_block;
1279 xfs_fileoff_t size_last_block;
1282 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));
1286 * Only check for blocks beyond the EOF if the extents have
1287 * been read in. This eliminates the need for the inode lock,
1288 * and it also saves us from looking when it really isn't
1291 if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1292 error = xfs_bmap_last_offset(NULL, ip, &last_block,
1300 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
1301 last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1303 last_byte = XFS_FSB_TO_B(mp, last_block);
1304 if (last_byte < 0) {
1305 return XFS_MAXIOFFSET(mp);
1307 last_byte += (1 << mp->m_writeio_log);
1308 if (last_byte < 0) {
1309 return XFS_MAXIOFFSET(mp);
1314 #if defined(XFS_RW_TRACE)
1320 xfs_fsize_t new_size,
1321 xfs_off_t toss_start,
1322 xfs_off_t toss_finish)
1324 if (ip->i_rwtrace == NULL) {
1328 ktrace_enter(ip->i_rwtrace,
1331 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1332 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1333 (void*)((long)flag),
1334 (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1335 (void*)(unsigned long)(new_size & 0xffffffff),
1336 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1337 (void*)(unsigned long)(toss_start & 0xffffffff),
1338 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1339 (void*)(unsigned long)(toss_finish & 0xffffffff),
1340 (void*)(unsigned long)current_cpu(),
1341 (void*)(unsigned long)current_pid(),
1347 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1351 * Start the truncation of the file to new_size. The new size
1352 * must be smaller than the current size. This routine will
1353 * clear the buffer and page caches of file data in the removed
1354 * range, and xfs_itruncate_finish() will remove the underlying
1357 * The inode must have its I/O lock locked EXCLUSIVELY, and it
1358 * must NOT have the inode lock held at all. This is because we're
1359 * calling into the buffer/page cache code and we can't hold the
1360 * inode lock when we do so.
1362 * We need to wait for any direct I/Os in flight to complete before we
1363 * proceed with the truncate. This is needed to prevent the extents
1364 * being read or written by the direct I/Os from being removed while the
1365 * I/O is in flight as there is no other method of synchronising
1366 * direct I/O with the truncate operation. Also, because we hold
1367 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1368 * started until the truncate completes and drops the lock. Essentially,
1369 * the vn_iowait() call forms an I/O barrier that provides strict ordering
1370 * between direct I/Os and the truncate operation.
1372 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1373 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
1374 * in the case that the caller is locking things out of order and
1375 * may not be able to call xfs_itruncate_finish() with the inode lock
1376 * held without dropping the I/O lock. If the caller must drop the
1377 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1378 * must be called again with all the same restrictions as the initial
1382 xfs_itruncate_start(
1385 xfs_fsize_t new_size)
1387 xfs_fsize_t last_byte;
1388 xfs_off_t toss_start;
1393 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1394 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1395 ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1396 (flags == XFS_ITRUNC_MAYBE));
1401 /* wait for the completion of any pending DIOs */
1402 if (new_size < ip->i_size)
1406 * Call toss_pages or flushinval_pages to get rid of pages
1407 * overlapping the region being removed. We have to use
1408 * the less efficient flushinval_pages in the case that the
1409 * caller may not be able to finish the truncate without
1410 * dropping the inode's I/O lock. Make sure
1411 * to catch any pages brought in by buffers overlapping
1412 * the EOF by searching out beyond the isize by our
1413 * block size. We round new_size up to a block boundary
1414 * so that we don't toss things on the same block as
1415 * new_size but before it.
1417 * Before calling toss_page or flushinval_pages, make sure to
1418 * call remapf() over the same region if the file is mapped.
1419 * This frees up mapped file references to the pages in the
1420 * given range and for the flushinval_pages case it ensures
1421 * that we get the latest mapped changes flushed out.
1423 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1424 toss_start = XFS_FSB_TO_B(mp, toss_start);
1425 if (toss_start < 0) {
1427 * The place to start tossing is beyond our maximum
1428 * file size, so there is no way that the data extended
1433 last_byte = xfs_file_last_byte(ip);
1434 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
1436 if (last_byte > toss_start) {
1437 if (flags & XFS_ITRUNC_DEFINITE) {
1438 xfs_tosspages(ip, toss_start,
1439 -1, FI_REMAPF_LOCKED);
1441 error = xfs_flushinval_pages(ip, toss_start,
1442 -1, FI_REMAPF_LOCKED);
1447 if (new_size == 0) {
1448 ASSERT(VN_CACHED(vp) == 0);
1455 * Shrink the file to the given new_size. The new
1456 * size must be smaller than the current size.
1457 * This will free up the underlying blocks
1458 * in the removed range after a call to xfs_itruncate_start()
1459 * or xfs_atruncate_start().
1461 * The transaction passed to this routine must have made
1462 * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES.
1463 * This routine may commit the given transaction and
1464 * start new ones, so make sure everything involved in
1465 * the transaction is tidy before calling here.
1466 * Some transaction will be returned to the caller to be
1467 * committed. The incoming transaction must already include
1468 * the inode, and both inode locks must be held exclusively.
1469 * The inode must also be "held" within the transaction. On
1470 * return the inode will be "held" within the returned transaction.
1471 * This routine does NOT require any disk space to be reserved
1472 * for it within the transaction.
1474 * The fork parameter must be either xfs_attr_fork or xfs_data_fork,
1475 * and it indicates the fork which is to be truncated. For the
1476 * attribute fork we only support truncation to size 0.
1478 * We use the sync parameter to indicate whether or not the first
1479 * transaction we perform might have to be synchronous. For the attr fork,
1480 * it needs to be so if the unlink of the inode is not yet known to be
1481 * permanent in the log. This keeps us from freeing and reusing the
1482 * blocks of the attribute fork before the unlink of the inode becomes
1485 * For the data fork, we normally have to run synchronously if we're
1486 * being called out of the inactive path or we're being called
1487 * out of the create path where we're truncating an existing file.
1488 * Either way, the truncate needs to be sync so blocks don't reappear
1489 * in the file with altered data in case of a crash. wsync filesystems
1490 * can run the first case async because anything that shrinks the inode
1491 * has to run sync so by the time we're called here from inactive, the
1492 * inode size is permanently set to 0.
1494 * Calls from the truncate path always need to be sync unless we're
1495 * in a wsync filesystem and the file has already been unlinked.
1497 * The caller is responsible for correctly setting the sync parameter.
1498 * It gets too hard for us to guess here which path we're being called
1499 * out of just based on inode state.
1502 xfs_itruncate_finish(
1505 xfs_fsize_t new_size,
1509 xfs_fsblock_t first_block;
1510 xfs_fileoff_t first_unmap_block;
1511 xfs_fileoff_t last_block;
1512 xfs_filblks_t unmap_len=0;
1517 xfs_bmap_free_t free_list;
1520 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1521 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
1522 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1523 ASSERT(*tp != NULL);
1524 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1525 ASSERT(ip->i_transp == *tp);
1526 ASSERT(ip->i_itemp != NULL);
1527 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1531 mp = (ntp)->t_mountp;
1532 ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1535 * We only support truncating the entire attribute fork.
1537 if (fork == XFS_ATTR_FORK) {
1540 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1541 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
1543 * The first thing we do is set the size to new_size permanently
1544 * on disk. This way we don't have to worry about anyone ever
1545 * being able to look at the data being freed even in the face
1546 * of a crash. What we're getting around here is the case where
1547 * we free a block, it is allocated to another file, it is written
1548 * to, and then we crash. If the new data gets written to the
1549 * file but the log buffers containing the free and reallocation
1550 * don't, then we'd end up with garbage in the blocks being freed.
1551 * As long as we make the new_size permanent before actually
1552 * freeing any blocks it doesn't matter if they get writtten to.
1554 * The callers must signal into us whether or not the size
1555 * setting here must be synchronous. There are a few cases
1556 * where it doesn't have to be synchronous. Those cases
1557 * occur if the file is unlinked and we know the unlink is
1558 * permanent or if the blocks being truncated are guaranteed
1559 * to be beyond the inode eof (regardless of the link count)
1560 * and the eof value is permanent. Both of these cases occur
1561 * only on wsync-mounted filesystems. In those cases, we're
1562 * guaranteed that no user will ever see the data in the blocks
1563 * that are being truncated so the truncate can run async.
1564 * In the free beyond eof case, the file may wind up with
1565 * more blocks allocated to it than it needs if we crash
1566 * and that won't get fixed until the next time the file
1567 * is re-opened and closed but that's ok as that shouldn't
1568 * be too many blocks.
1570 * However, we can't just make all wsync xactions run async
1571 * because there's one call out of the create path that needs
1572 * to run sync where it's truncating an existing file to size
1573 * 0 whose size is > 0.
1575 * It's probably possible to come up with a test in this
1576 * routine that would correctly distinguish all the above
1577 * cases from the values of the function parameters and the
1578 * inode state but for sanity's sake, I've decided to let the
1579 * layers above just tell us. It's simpler to correctly figure
1580 * out in the layer above exactly under what conditions we
1581 * can run async and I think it's easier for others read and
1582 * follow the logic in case something has to be changed.
1583 * cscope is your friend -- rcc.
1585 * The attribute fork is much simpler.
1587 * For the attribute fork we allow the caller to tell us whether
1588 * the unlink of the inode that led to this call is yet permanent
1589 * in the on disk log. If it is not and we will be freeing extents
1590 * in this inode then we make the first transaction synchronous
1591 * to make sure that the unlink is permanent by the time we free
1594 if (fork == XFS_DATA_FORK) {
1595 if (ip->i_d.di_nextents > 0) {
1597 * If we are not changing the file size then do
1598 * not update the on-disk file size - we may be
1599 * called from xfs_inactive_free_eofblocks(). If we
1600 * update the on-disk file size and then the system
1601 * crashes before the contents of the file are
1602 * flushed to disk then the files may be full of
1603 * holes (ie NULL files bug).
1605 if (ip->i_size != new_size) {
1606 ip->i_d.di_size = new_size;
1607 ip->i_size = new_size;
1608 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1612 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1613 if (ip->i_d.di_anextents > 0)
1614 xfs_trans_set_sync(ntp);
1616 ASSERT(fork == XFS_DATA_FORK ||
1617 (fork == XFS_ATTR_FORK &&
1618 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1619 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1622 * Since it is possible for space to become allocated beyond
1623 * the end of the file (in a crash where the space is allocated
1624 * but the inode size is not yet updated), simply remove any
1625 * blocks which show up between the new EOF and the maximum
1626 * possible file size. If the first block to be removed is
1627 * beyond the maximum file size (ie it is the same as last_block),
1628 * then there is nothing to do.
1630 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1631 ASSERT(first_unmap_block <= last_block);
1633 if (last_block == first_unmap_block) {
1636 unmap_len = last_block - first_unmap_block + 1;
1640 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
1641 * will tell us whether it freed the entire range or
1642 * not. If this is a synchronous mount (wsync),
1643 * then we can tell bunmapi to keep all the
1644 * transactions asynchronous since the unlink
1645 * transaction that made this inode inactive has
1646 * already hit the disk. There's no danger of
1647 * the freed blocks being reused, there being a
1648 * crash, and the reused blocks suddenly reappearing
1649 * in this file with garbage in them once recovery
1652 XFS_BMAP_INIT(&free_list, &first_block);
1653 error = xfs_bunmapi(ntp, ip,
1654 first_unmap_block, unmap_len,
1655 XFS_BMAPI_AFLAG(fork) |
1656 (sync ? 0 : XFS_BMAPI_ASYNC),
1657 XFS_ITRUNC_MAX_EXTENTS,
1658 &first_block, &free_list,
1662 * If the bunmapi call encounters an error,
1663 * return to the caller where the transaction
1664 * can be properly aborted. We just need to
1665 * make sure we're not holding any resources
1666 * that we were not when we came in.
1668 xfs_bmap_cancel(&free_list);
1673 * Duplicate the transaction that has the permanent
1674 * reservation and commit the old transaction.
1676 error = xfs_bmap_finish(tp, &free_list, &committed);
1680 * If the bmap finish call encounters an error,
1681 * return to the caller where the transaction
1682 * can be properly aborted. We just need to
1683 * make sure we're not holding any resources
1684 * that we were not when we came in.
1686 * Aborting from this point might lose some
1687 * blocks in the file system, but oh well.
1689 xfs_bmap_cancel(&free_list);
1692 * If the passed in transaction committed
1693 * in xfs_bmap_finish(), then we want to
1694 * add the inode to this one before returning.
1695 * This keeps things simple for the higher
1696 * level code, because it always knows that
1697 * the inode is locked and held in the
1698 * transaction that returns to it whether
1699 * errors occur or not. We don't mark the
1700 * inode dirty so that this transaction can
1701 * be easily aborted if possible.
1703 xfs_trans_ijoin(ntp, ip,
1704 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1705 xfs_trans_ihold(ntp, ip);
1712 * The first xact was committed,
1713 * so add the inode to the new one.
1714 * Mark it dirty so it will be logged
1715 * and moved forward in the log as
1716 * part of every commit.
1718 xfs_trans_ijoin(ntp, ip,
1719 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1720 xfs_trans_ihold(ntp, ip);
1721 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1723 ntp = xfs_trans_dup(ntp);
1724 (void) xfs_trans_commit(*tp, 0);
1726 error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
1727 XFS_TRANS_PERM_LOG_RES,
1728 XFS_ITRUNCATE_LOG_COUNT);
1730 * Add the inode being truncated to the next chained
1733 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1734 xfs_trans_ihold(ntp, ip);
1739 * Only update the size in the case of the data fork, but
1740 * always re-log the inode so that our permanent transaction
1741 * can keep on rolling it forward in the log.
1743 if (fork == XFS_DATA_FORK) {
1744 xfs_isize_check(mp, ip, new_size);
1746 * If we are not changing the file size then do
1747 * not update the on-disk file size - we may be
1748 * called from xfs_inactive_free_eofblocks(). If we
1749 * update the on-disk file size and then the system
1750 * crashes before the contents of the file are
1751 * flushed to disk then the files may be full of
1752 * holes (ie NULL files bug).
1754 if (ip->i_size != new_size) {
1755 ip->i_d.di_size = new_size;
1756 ip->i_size = new_size;
1759 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1760 ASSERT((new_size != 0) ||
1761 (fork == XFS_ATTR_FORK) ||
1762 (ip->i_delayed_blks == 0));
1763 ASSERT((new_size != 0) ||
1764 (fork == XFS_ATTR_FORK) ||
1765 (ip->i_d.di_nextents == 0));
1766 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
1774 * Do the first part of growing a file: zero any data in the last
1775 * block that is beyond the old EOF. We need to do this before
1776 * the inode is joined to the transaction to modify the i_size.
1777 * That way we can drop the inode lock and call into the buffer
1778 * cache to get the buffer mapping the EOF.
1783 xfs_fsize_t new_size,
1786 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1787 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1788 ASSERT(new_size > ip->i_size);
1791 * Zero any pages that may have been created by
1792 * xfs_write_file() beyond the end of the file
1793 * and any blocks between the old and new file sizes.
1795 return xfs_zero_eof(ip, new_size, ip->i_size);
1801 * This routine is called to extend the size of a file.
1802 * The inode must have both the iolock and the ilock locked
1803 * for update and it must be a part of the current transaction.
1804 * The xfs_igrow_start() function must have been called previously.
1805 * If the change_flag is not zero, the inode change timestamp will
1812 xfs_fsize_t new_size,
1815 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1816 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1817 ASSERT(ip->i_transp == tp);
1818 ASSERT(new_size > ip->i_size);
1821 * Update the file size. Update the inode change timestamp
1822 * if change_flag set.
1824 ip->i_d.di_size = new_size;
1825 ip->i_size = new_size;
1827 xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
1828 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1834 * This is called when the inode's link count goes to 0.
1835 * We place the on-disk inode on a list in the AGI. It
1836 * will be pulled from this list when the inode is freed.
1848 xfs_agnumber_t agno;
1849 xfs_daddr_t agdaddr;
1856 ASSERT(ip->i_d.di_nlink == 0);
1857 ASSERT(ip->i_d.di_mode != 0);
1858 ASSERT(ip->i_transp == tp);
1862 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1863 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1866 * Get the agi buffer first. It ensures lock ordering
1869 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1870 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1875 * Validate the magic number of the agi block.
1877 agi = XFS_BUF_TO_AGI(agibp);
1879 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1880 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1881 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
1882 XFS_RANDOM_IUNLINK))) {
1883 XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
1884 xfs_trans_brelse(tp, agibp);
1885 return XFS_ERROR(EFSCORRUPTED);
1888 * Get the index into the agi hash table for the
1889 * list this inode will go on.
1891 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1893 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1894 ASSERT(agi->agi_unlinked[bucket_index]);
1895 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1897 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1899 * There is already another inode in the bucket we need
1900 * to add ourselves to. Add us at the front of the list.
1901 * Here we put the head pointer into our next pointer,
1902 * and then we fall through to point the head at us.
1904 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
1908 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO);
1909 /* both on-disk, don't endian flip twice */
1910 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1911 offset = ip->i_boffset +
1912 offsetof(xfs_dinode_t, di_next_unlinked);
1913 xfs_trans_inode_buf(tp, ibp);
1914 xfs_trans_log_buf(tp, ibp, offset,
1915 (offset + sizeof(xfs_agino_t) - 1));
1916 xfs_inobp_check(mp, ibp);
1920 * Point the bucket head pointer at the inode being inserted.
1923 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1924 offset = offsetof(xfs_agi_t, agi_unlinked) +
1925 (sizeof(xfs_agino_t) * bucket_index);
1926 xfs_trans_log_buf(tp, agibp, offset,
1927 (offset + sizeof(xfs_agino_t) - 1));
1932 * Pull the on-disk inode from the AGI unlinked list.
1945 xfs_agnumber_t agno;
1946 xfs_daddr_t agdaddr;
1948 xfs_agino_t next_agino;
1949 xfs_buf_t *last_ibp;
1950 xfs_dinode_t *last_dip = NULL;
1952 int offset, last_offset = 0;
1957 * First pull the on-disk inode from the AGI unlinked list.
1961 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1962 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1965 * Get the agi buffer first. It ensures lock ordering
1968 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1969 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1972 "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.",
1973 error, mp->m_fsname);
1977 * Validate the magic number of the agi block.
1979 agi = XFS_BUF_TO_AGI(agibp);
1981 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1982 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1983 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
1984 XFS_RANDOM_IUNLINK_REMOVE))) {
1985 XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
1987 xfs_trans_brelse(tp, agibp);
1989 "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.",
1991 return XFS_ERROR(EFSCORRUPTED);
1994 * Get the index into the agi hash table for the
1995 * list this inode will go on.
1997 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1999 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2000 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
2001 ASSERT(agi->agi_unlinked[bucket_index]);
2003 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2005 * We're at the head of the list. Get the inode's
2006 * on-disk buffer to see if there is anyone after us
2007 * on the list. Only modify our next pointer if it
2008 * is not already NULLAGINO. This saves us the overhead
2009 * of dealing with the buffer when there is no need to
2012 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2015 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2016 error, mp->m_fsname);
2019 next_agino = be32_to_cpu(dip->di_next_unlinked);
2020 ASSERT(next_agino != 0);
2021 if (next_agino != NULLAGINO) {
2022 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2023 offset = ip->i_boffset +
2024 offsetof(xfs_dinode_t, di_next_unlinked);
2025 xfs_trans_inode_buf(tp, ibp);
2026 xfs_trans_log_buf(tp, ibp, offset,
2027 (offset + sizeof(xfs_agino_t) - 1));
2028 xfs_inobp_check(mp, ibp);
2030 xfs_trans_brelse(tp, ibp);
2033 * Point the bucket head pointer at the next inode.
2035 ASSERT(next_agino != 0);
2036 ASSERT(next_agino != agino);
2037 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2038 offset = offsetof(xfs_agi_t, agi_unlinked) +
2039 (sizeof(xfs_agino_t) * bucket_index);
2040 xfs_trans_log_buf(tp, agibp, offset,
2041 (offset + sizeof(xfs_agino_t) - 1));
2044 * We need to search the list for the inode being freed.
2046 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2048 while (next_agino != agino) {
2050 * If the last inode wasn't the one pointing to
2051 * us, then release its buffer since we're not
2052 * going to do anything with it.
2054 if (last_ibp != NULL) {
2055 xfs_trans_brelse(tp, last_ibp);
2057 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2058 error = xfs_inotobp(mp, tp, next_ino, &last_dip,
2059 &last_ibp, &last_offset);
2062 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.",
2063 error, mp->m_fsname);
2066 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
2067 ASSERT(next_agino != NULLAGINO);
2068 ASSERT(next_agino != 0);
2071 * Now last_ibp points to the buffer previous to us on
2072 * the unlinked list. Pull us from the list.
2074 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2077 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2078 error, mp->m_fsname);
2081 next_agino = be32_to_cpu(dip->di_next_unlinked);
2082 ASSERT(next_agino != 0);
2083 ASSERT(next_agino != agino);
2084 if (next_agino != NULLAGINO) {
2085 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2086 offset = ip->i_boffset +
2087 offsetof(xfs_dinode_t, di_next_unlinked);
2088 xfs_trans_inode_buf(tp, ibp);
2089 xfs_trans_log_buf(tp, ibp, offset,
2090 (offset + sizeof(xfs_agino_t) - 1));
2091 xfs_inobp_check(mp, ibp);
2093 xfs_trans_brelse(tp, ibp);
2096 * Point the previous inode on the list to the next inode.
2098 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
2099 ASSERT(next_agino != 0);
2100 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2101 xfs_trans_inode_buf(tp, last_ibp);
2102 xfs_trans_log_buf(tp, last_ibp, offset,
2103 (offset + sizeof(xfs_agino_t) - 1));
2104 xfs_inobp_check(mp, last_ibp);
2109 STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip)
2111 return (((ip->i_itemp == NULL) ||
2112 !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
2113 (ip->i_update_core == 0));
2118 xfs_inode_t *free_ip,
2122 xfs_mount_t *mp = free_ip->i_mount;
2123 int blks_per_cluster;
2126 int i, j, found, pre_flushed;
2129 xfs_inode_t *ip, **ip_found;
2130 xfs_inode_log_item_t *iip;
2131 xfs_log_item_t *lip;
2132 xfs_perag_t *pag = xfs_get_perag(mp, inum);
2134 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
2135 blks_per_cluster = 1;
2136 ninodes = mp->m_sb.sb_inopblock;
2137 nbufs = XFS_IALLOC_BLOCKS(mp);
2139 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2140 mp->m_sb.sb_blocksize;
2141 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2142 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2145 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
2147 for (j = 0; j < nbufs; j++, inum += ninodes) {
2148 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2149 XFS_INO_TO_AGBNO(mp, inum));
2153 * Look for each inode in memory and attempt to lock it,
2154 * we can be racing with flush and tail pushing here.
2155 * any inode we get the locks on, add to an array of
2156 * inode items to process later.
2158 * The get the buffer lock, we could beat a flush
2159 * or tail pushing thread to the lock here, in which
2160 * case they will go looking for the inode buffer
2161 * and fail, we need some other form of interlock
2165 for (i = 0; i < ninodes; i++) {
2166 read_lock(&pag->pag_ici_lock);
2167 ip = radix_tree_lookup(&pag->pag_ici_root,
2168 XFS_INO_TO_AGINO(mp, (inum + i)));
2170 /* Inode not in memory or we found it already,
2173 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
2174 read_unlock(&pag->pag_ici_lock);
2178 if (xfs_inode_clean(ip)) {
2179 read_unlock(&pag->pag_ici_lock);
2183 /* If we can get the locks then add it to the
2184 * list, otherwise by the time we get the bp lock
2185 * below it will already be attached to the
2189 /* This inode will already be locked - by us, lets
2193 if (ip == free_ip) {
2194 if (xfs_iflock_nowait(ip)) {
2195 xfs_iflags_set(ip, XFS_ISTALE);
2196 if (xfs_inode_clean(ip)) {
2199 ip_found[found++] = ip;
2202 read_unlock(&pag->pag_ici_lock);
2206 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2207 if (xfs_iflock_nowait(ip)) {
2208 xfs_iflags_set(ip, XFS_ISTALE);
2210 if (xfs_inode_clean(ip)) {
2212 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2214 ip_found[found++] = ip;
2217 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2220 read_unlock(&pag->pag_ici_lock);
2223 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2224 mp->m_bsize * blks_per_cluster,
2228 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2230 if (lip->li_type == XFS_LI_INODE) {
2231 iip = (xfs_inode_log_item_t *)lip;
2232 ASSERT(iip->ili_logged == 1);
2233 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
2234 spin_lock(&mp->m_ail_lock);
2235 iip->ili_flush_lsn = iip->ili_item.li_lsn;
2236 spin_unlock(&mp->m_ail_lock);
2237 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2240 lip = lip->li_bio_list;
2243 for (i = 0; i < found; i++) {
2248 ip->i_update_core = 0;
2250 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2254 iip->ili_last_fields = iip->ili_format.ilf_fields;
2255 iip->ili_format.ilf_fields = 0;
2256 iip->ili_logged = 1;
2257 spin_lock(&mp->m_ail_lock);
2258 iip->ili_flush_lsn = iip->ili_item.li_lsn;
2259 spin_unlock(&mp->m_ail_lock);
2261 xfs_buf_attach_iodone(bp,
2262 (void(*)(xfs_buf_t*,xfs_log_item_t*))
2263 xfs_istale_done, (xfs_log_item_t *)iip);
2264 if (ip != free_ip) {
2265 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2269 if (found || pre_flushed)
2270 xfs_trans_stale_inode_buf(tp, bp);
2271 xfs_trans_binval(tp, bp);
2274 kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *));
2275 xfs_put_perag(mp, pag);
2279 * This is called to return an inode to the inode free list.
2280 * The inode should already be truncated to 0 length and have
2281 * no pages associated with it. This routine also assumes that
2282 * the inode is already a part of the transaction.
2284 * The on-disk copy of the inode will have been added to the list
2285 * of unlinked inodes in the AGI. We need to remove the inode from
2286 * that list atomically with respect to freeing it here.
2292 xfs_bmap_free_t *flist)
2296 xfs_ino_t first_ino;
2300 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2301 ASSERT(ip->i_transp == tp);
2302 ASSERT(ip->i_d.di_nlink == 0);
2303 ASSERT(ip->i_d.di_nextents == 0);
2304 ASSERT(ip->i_d.di_anextents == 0);
2305 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
2306 ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2307 ASSERT(ip->i_d.di_nblocks == 0);
2310 * Pull the on-disk inode from the AGI unlinked list.
2312 error = xfs_iunlink_remove(tp, ip);
2317 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2321 ip->i_d.di_mode = 0; /* mark incore inode as free */
2322 ip->i_d.di_flags = 0;
2323 ip->i_d.di_dmevmask = 0;
2324 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2325 ip->i_df.if_ext_max =
2326 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2327 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2328 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2330 * Bump the generation count so no one will be confused
2331 * by reincarnations of this inode.
2335 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2337 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0);
2342 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
2343 * from picking up this inode when it is reclaimed (its incore state
2344 * initialzed but not flushed to disk yet). The in-core di_mode is
2345 * already cleared and a corresponding transaction logged.
2346 * The hack here just synchronizes the in-core to on-disk
2347 * di_mode value in advance before the actual inode sync to disk.
2348 * This is OK because the inode is already unlinked and would never
2349 * change its di_mode again for this inode generation.
2350 * This is a temporary hack that would require a proper fix
2353 dip->di_core.di_mode = 0;
2356 xfs_ifree_cluster(ip, tp, first_ino);
2363 * Reallocate the space for if_broot based on the number of records
2364 * being added or deleted as indicated in rec_diff. Move the records
2365 * and pointers in if_broot to fit the new size. When shrinking this
2366 * will eliminate holes between the records and pointers created by
2367 * the caller. When growing this will create holes to be filled in
2370 * The caller must not request to add more records than would fit in
2371 * the on-disk inode root. If the if_broot is currently NULL, then
2372 * if we adding records one will be allocated. The caller must also
2373 * not request that the number of records go below zero, although
2374 * it can go to zero.
2376 * ip -- the inode whose if_broot area is changing
2377 * ext_diff -- the change in the number of records, positive or negative,
2378 * requested for the if_broot array.
2388 xfs_bmbt_block_t *new_broot;
2395 * Handle the degenerate case quietly.
2397 if (rec_diff == 0) {
2401 ifp = XFS_IFORK_PTR(ip, whichfork);
2404 * If there wasn't any memory allocated before, just
2405 * allocate it now and get out.
2407 if (ifp->if_broot_bytes == 0) {
2408 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2409 ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
2411 ifp->if_broot_bytes = (int)new_size;
2416 * If there is already an existing if_broot, then we need
2417 * to realloc() it and shift the pointers to their new
2418 * location. The records don't change location because
2419 * they are kept butted up against the btree block header.
2421 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2422 new_max = cur_max + rec_diff;
2423 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2424 ifp->if_broot = (xfs_bmbt_block_t *)
2425 kmem_realloc(ifp->if_broot,
2427 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2429 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2430 ifp->if_broot_bytes);
2431 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2433 ifp->if_broot_bytes = (int)new_size;
2434 ASSERT(ifp->if_broot_bytes <=
2435 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2436 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2441 * rec_diff is less than 0. In this case, we are shrinking the
2442 * if_broot buffer. It must already exist. If we go to zero
2443 * records, just get rid of the root and clear the status bit.
2445 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2446 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2447 new_max = cur_max + rec_diff;
2448 ASSERT(new_max >= 0);
2450 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2454 new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
2456 * First copy over the btree block header.
2458 memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
2461 ifp->if_flags &= ~XFS_IFBROOT;
2465 * Only copy the records and pointers if there are any.
2469 * First copy the records.
2471 op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
2472 ifp->if_broot_bytes);
2473 np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
2475 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2478 * Then copy the pointers.
2480 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2481 ifp->if_broot_bytes);
2482 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
2484 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2486 kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2487 ifp->if_broot = new_broot;
2488 ifp->if_broot_bytes = (int)new_size;
2489 ASSERT(ifp->if_broot_bytes <=
2490 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2496 * This is called when the amount of space needed for if_data
2497 * is increased or decreased. The change in size is indicated by
2498 * the number of bytes that need to be added or deleted in the
2499 * byte_diff parameter.
2501 * If the amount of space needed has decreased below the size of the
2502 * inline buffer, then switch to using the inline buffer. Otherwise,
2503 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2504 * to what is needed.
2506 * ip -- the inode whose if_data area is changing
2507 * byte_diff -- the change in the number of bytes, positive or negative,
2508 * requested for the if_data array.
2520 if (byte_diff == 0) {
2524 ifp = XFS_IFORK_PTR(ip, whichfork);
2525 new_size = (int)ifp->if_bytes + byte_diff;
2526 ASSERT(new_size >= 0);
2528 if (new_size == 0) {
2529 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2530 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2532 ifp->if_u1.if_data = NULL;
2534 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2536 * If the valid extents/data can fit in if_inline_ext/data,
2537 * copy them from the malloc'd vector and free it.
2539 if (ifp->if_u1.if_data == NULL) {
2540 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2541 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2542 ASSERT(ifp->if_real_bytes != 0);
2543 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2545 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2546 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2551 * Stuck with malloc/realloc.
2552 * For inline data, the underlying buffer must be
2553 * a multiple of 4 bytes in size so that it can be
2554 * logged and stay on word boundaries. We enforce
2557 real_size = roundup(new_size, 4);
2558 if (ifp->if_u1.if_data == NULL) {
2559 ASSERT(ifp->if_real_bytes == 0);
2560 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2561 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2563 * Only do the realloc if the underlying size
2564 * is really changing.
2566 if (ifp->if_real_bytes != real_size) {
2567 ifp->if_u1.if_data =
2568 kmem_realloc(ifp->if_u1.if_data,
2574 ASSERT(ifp->if_real_bytes == 0);
2575 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2576 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2580 ifp->if_real_bytes = real_size;
2581 ifp->if_bytes = new_size;
2582 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2589 * Map inode to disk block and offset.
2591 * mp -- the mount point structure for the current file system
2592 * tp -- the current transaction
2593 * ino -- the inode number of the inode to be located
2594 * imap -- this structure is filled in with the information necessary
2595 * to retrieve the given inode from disk
2596 * flags -- flags to pass to xfs_dilocate indicating whether or not
2597 * lookups in the inode btree were OK or not
2607 xfs_fsblock_t fsbno;
2612 fsbno = imap->im_blkno ?
2613 XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
2614 error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
2618 imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
2619 imap->im_len = XFS_FSB_TO_BB(mp, len);
2620 imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
2621 imap->im_ioffset = (ushort)off;
2622 imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
2625 * If the inode number maps to a block outside the bounds
2626 * of the file system then return NULL rather than calling
2627 * read_buf and panicing when we get an error from the
2630 if ((imap->im_blkno + imap->im_len) >
2631 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2632 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
2633 "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > "
2634 " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)",
2635 (unsigned long long) imap->im_blkno,
2636 (unsigned long long) imap->im_len,
2637 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2650 ifp = XFS_IFORK_PTR(ip, whichfork);
2651 if (ifp->if_broot != NULL) {
2652 kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2653 ifp->if_broot = NULL;
2657 * If the format is local, then we can't have an extents
2658 * array so just look for an inline data array. If we're
2659 * not local then we may or may not have an extents list,
2660 * so check and free it up if we do.
2662 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2663 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2664 (ifp->if_u1.if_data != NULL)) {
2665 ASSERT(ifp->if_real_bytes != 0);
2666 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2667 ifp->if_u1.if_data = NULL;
2668 ifp->if_real_bytes = 0;
2670 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2671 ((ifp->if_flags & XFS_IFEXTIREC) ||
2672 ((ifp->if_u1.if_extents != NULL) &&
2673 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2674 ASSERT(ifp->if_real_bytes != 0);
2675 xfs_iext_destroy(ifp);
2677 ASSERT(ifp->if_u1.if_extents == NULL ||
2678 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2679 ASSERT(ifp->if_real_bytes == 0);
2680 if (whichfork == XFS_ATTR_FORK) {
2681 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2687 * This is called free all the memory associated with an inode.
2688 * It must free the inode itself and any buffers allocated for
2689 * if_extents/if_data and if_broot. It must also free the lock
2690 * associated with the inode.
2696 switch (ip->i_d.di_mode & S_IFMT) {
2700 xfs_idestroy_fork(ip, XFS_DATA_FORK);
2704 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2705 mrfree(&ip->i_lock);
2706 mrfree(&ip->i_iolock);
2707 freesema(&ip->i_flock);
2709 #ifdef XFS_INODE_TRACE
2710 ktrace_free(ip->i_trace);
2712 #ifdef XFS_BMAP_TRACE
2713 ktrace_free(ip->i_xtrace);
2715 #ifdef XFS_BMBT_TRACE
2716 ktrace_free(ip->i_btrace);
2719 ktrace_free(ip->i_rwtrace);
2721 #ifdef XFS_ILOCK_TRACE
2722 ktrace_free(ip->i_lock_trace);
2724 #ifdef XFS_DIR2_TRACE
2725 ktrace_free(ip->i_dir_trace);
2729 * Only if we are shutting down the fs will we see an
2730 * inode still in the AIL. If it is there, we should remove
2731 * it to prevent a use-after-free from occurring.
2733 xfs_mount_t *mp = ip->i_mount;
2734 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
2736 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
2737 XFS_FORCED_SHUTDOWN(ip->i_mount));
2738 if (lip->li_flags & XFS_LI_IN_AIL) {
2739 spin_lock(&mp->m_ail_lock);
2740 if (lip->li_flags & XFS_LI_IN_AIL)
2741 xfs_trans_delete_ail(mp, lip);
2743 spin_unlock(&mp->m_ail_lock);
2745 xfs_inode_item_destroy(ip);
2747 kmem_zone_free(xfs_inode_zone, ip);
2752 * Increment the pin count of the given buffer.
2753 * This value is protected by ipinlock spinlock in the mount structure.
2759 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2761 atomic_inc(&ip->i_pincount);
2765 * Decrement the pin count of the given inode, and wake up
2766 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2767 * inode must have been previously pinned with a call to xfs_ipin().
2773 ASSERT(atomic_read(&ip->i_pincount) > 0);
2775 if (atomic_dec_and_test(&ip->i_pincount))
2776 wake_up(&ip->i_ipin_wait);
2780 * This is called to wait for the given inode to be unpinned.
2781 * It will sleep until this happens. The caller must have the
2782 * inode locked in at least shared mode so that the buffer cannot
2783 * be subsequently pinned once someone is waiting for it to be
2790 xfs_inode_log_item_t *iip;
2793 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
2795 if (atomic_read(&ip->i_pincount) == 0) {
2800 if (iip && iip->ili_last_lsn) {
2801 lsn = iip->ili_last_lsn;
2807 * Give the log a push so we don't wait here too long.
2809 xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE);
2811 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2816 * xfs_iextents_copy()
2818 * This is called to copy the REAL extents (as opposed to the delayed
2819 * allocation extents) from the inode into the given buffer. It
2820 * returns the number of bytes copied into the buffer.
2822 * If there are no delayed allocation extents, then we can just
2823 * memcpy() the extents into the buffer. Otherwise, we need to
2824 * examine each extent in turn and skip those which are delayed.
2836 xfs_fsblock_t start_block;
2838 ifp = XFS_IFORK_PTR(ip, whichfork);
2839 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
2840 ASSERT(ifp->if_bytes > 0);
2842 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2843 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2847 * There are some delayed allocation extents in the
2848 * inode, so copy the extents one at a time and skip
2849 * the delayed ones. There must be at least one
2850 * non-delayed extent.
2853 for (i = 0; i < nrecs; i++) {
2854 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2855 start_block = xfs_bmbt_get_startblock(ep);
2856 if (ISNULLSTARTBLOCK(start_block)) {
2858 * It's a delayed allocation extent, so skip it.
2863 /* Translate to on disk format */
2864 put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2865 put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2869 ASSERT(copied != 0);
2870 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2872 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2876 * Each of the following cases stores data into the same region
2877 * of the on-disk inode, so only one of them can be valid at
2878 * any given time. While it is possible to have conflicting formats
2879 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2880 * in EXTENTS format, this can only happen when the fork has
2881 * changed formats after being modified but before being flushed.
2882 * In these cases, the format always takes precedence, because the
2883 * format indicates the current state of the fork.
2890 xfs_inode_log_item_t *iip,
2897 #ifdef XFS_TRANS_DEBUG
2900 static const short brootflag[2] =
2901 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2902 static const short dataflag[2] =
2903 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2904 static const short extflag[2] =
2905 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2909 ifp = XFS_IFORK_PTR(ip, whichfork);
2911 * This can happen if we gave up in iformat in an error path,
2912 * for the attribute fork.
2915 ASSERT(whichfork == XFS_ATTR_FORK);
2918 cp = XFS_DFORK_PTR(dip, whichfork);
2920 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2921 case XFS_DINODE_FMT_LOCAL:
2922 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2923 (ifp->if_bytes > 0)) {
2924 ASSERT(ifp->if_u1.if_data != NULL);
2925 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2926 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2930 case XFS_DINODE_FMT_EXTENTS:
2931 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2932 !(iip->ili_format.ilf_fields & extflag[whichfork]));
2933 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2934 (ifp->if_bytes == 0));
2935 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2936 (ifp->if_bytes > 0));
2937 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2938 (ifp->if_bytes > 0)) {
2939 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2940 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2945 case XFS_DINODE_FMT_BTREE:
2946 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2947 (ifp->if_broot_bytes > 0)) {
2948 ASSERT(ifp->if_broot != NULL);
2949 ASSERT(ifp->if_broot_bytes <=
2950 (XFS_IFORK_SIZE(ip, whichfork) +
2951 XFS_BROOT_SIZE_ADJ));
2952 xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
2953 (xfs_bmdr_block_t *)cp,
2954 XFS_DFORK_SIZE(dip, mp, whichfork));
2958 case XFS_DINODE_FMT_DEV:
2959 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
2960 ASSERT(whichfork == XFS_DATA_FORK);
2961 dip->di_u.di_dev = cpu_to_be32(ip->i_df.if_u2.if_rdev);
2965 case XFS_DINODE_FMT_UUID:
2966 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
2967 ASSERT(whichfork == XFS_DATA_FORK);
2968 memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
2982 * xfs_iflush() will write a modified inode's changes out to the
2983 * inode's on disk home. The caller must have the inode lock held
2984 * in at least shared mode and the inode flush semaphore must be
2985 * held as well. The inode lock will still be held upon return from
2986 * the call and the caller is free to unlock it.
2987 * The inode flush lock will be unlocked when the inode reaches the disk.
2988 * The flags indicate how the inode's buffer should be written out.
2995 xfs_inode_log_item_t *iip;
3002 int clcount; /* count of inodes clustered */
3004 struct hlist_node *entry;
3005 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
3007 XFS_STATS_INC(xs_iflush_count);
3009 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3010 ASSERT(issemalocked(&(ip->i_flock)));
3011 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3012 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3018 * If the inode isn't dirty, then just release the inode
3019 * flush lock and do nothing.
3021 if ((ip->i_update_core == 0) &&
3022 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3023 ASSERT((iip != NULL) ?
3024 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1);
3030 * We can't flush the inode until it is unpinned, so
3031 * wait for it. We know noone new can pin it, because
3032 * we are holding the inode lock shared and you need
3033 * to hold it exclusively to pin the inode.
3035 xfs_iunpin_wait(ip);
3038 * This may have been unpinned because the filesystem is shutting
3039 * down forcibly. If that's the case we must not write this inode
3040 * to disk, because the log record didn't make it to disk!
3042 if (XFS_FORCED_SHUTDOWN(mp)) {
3043 ip->i_update_core = 0;
3045 iip->ili_format.ilf_fields = 0;
3047 return XFS_ERROR(EIO);
3051 * Get the buffer containing the on-disk inode.
3053 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0);
3060 * Decide how buffer will be flushed out. This is done before
3061 * the call to xfs_iflush_int because this field is zeroed by it.
3063 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3065 * Flush out the inode buffer according to the directions
3066 * of the caller. In the cases where the caller has given
3067 * us a choice choose the non-delwri case. This is because
3068 * the inode is in the AIL and we need to get it out soon.
3071 case XFS_IFLUSH_SYNC:
3072 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3075 case XFS_IFLUSH_ASYNC:
3076 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3079 case XFS_IFLUSH_DELWRI:
3089 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3090 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3091 case XFS_IFLUSH_DELWRI:
3094 case XFS_IFLUSH_ASYNC:
3097 case XFS_IFLUSH_SYNC:
3108 * First flush out the inode that xfs_iflush was called with.
3110 error = xfs_iflush_int(ip, bp);
3117 * see if other inodes can be gathered into this write
3119 spin_lock(&ip->i_cluster->icl_lock);
3120 ip->i_cluster->icl_buf = bp;
3123 hlist_for_each_entry(iq, entry, &ip->i_cluster->icl_inodes, i_cnode) {
3128 * Do an un-protected check to see if the inode is dirty and
3129 * is a candidate for flushing. These checks will be repeated
3130 * later after the appropriate locks are acquired.
3133 if ((iq->i_update_core == 0) &&
3135 !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
3136 xfs_ipincount(iq) == 0) {
3141 * Try to get locks. If any are unavailable,
3142 * then this inode cannot be flushed and is skipped.
3145 /* get inode locks (just i_lock) */
3146 if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) {
3147 /* get inode flush lock */
3148 if (xfs_iflock_nowait(iq)) {
3149 /* check if pinned */
3150 if (xfs_ipincount(iq) == 0) {
3151 /* arriving here means that
3152 * this inode can be flushed.
3153 * first re-check that it's
3157 if ((iq->i_update_core != 0)||
3159 (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3161 error = xfs_iflush_int(iq, bp);
3165 goto cluster_corrupt_out;
3174 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3177 spin_unlock(&ip->i_cluster->icl_lock);
3180 XFS_STATS_INC(xs_icluster_flushcnt);
3181 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3185 * If the buffer is pinned then push on the log so we won't
3186 * get stuck waiting in the write for too long.
3188 if (XFS_BUF_ISPINNED(bp)){
3189 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3192 if (flags & INT_DELWRI) {
3193 xfs_bdwrite(mp, bp);
3194 } else if (flags & INT_ASYNC) {
3195 xfs_bawrite(mp, bp);
3197 error = xfs_bwrite(mp, bp);
3203 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3204 xfs_iflush_abort(ip);
3206 * Unlocks the flush lock
3208 return XFS_ERROR(EFSCORRUPTED);
3210 cluster_corrupt_out:
3211 /* Corruption detected in the clustering loop. Invalidate the
3212 * inode buffer and shut down the filesystem.
3214 spin_unlock(&ip->i_cluster->icl_lock);
3217 * Clean up the buffer. If it was B_DELWRI, just release it --
3218 * brelse can handle it with no problems. If not, shut down the
3219 * filesystem before releasing the buffer.
3221 if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) {
3225 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3229 * Just like incore_relse: if we have b_iodone functions,
3230 * mark the buffer as an error and call them. Otherwise
3231 * mark it as stale and brelse.
3233 if (XFS_BUF_IODONE_FUNC(bp)) {
3234 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
3238 XFS_BUF_ERROR(bp,EIO);
3246 xfs_iflush_abort(iq);
3248 * Unlocks the flush lock
3250 return XFS_ERROR(EFSCORRUPTED);
3259 xfs_inode_log_item_t *iip;
3262 #ifdef XFS_TRANS_DEBUG
3266 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3267 ASSERT(issemalocked(&(ip->i_flock)));
3268 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3269 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3276 * If the inode isn't dirty, then just release the inode
3277 * flush lock and do nothing.
3279 if ((ip->i_update_core == 0) &&
3280 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3285 /* set *dip = inode's place in the buffer */
3286 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset);
3289 * Clear i_update_core before copying out the data.
3290 * This is for coordination with our timestamp updates
3291 * that don't hold the inode lock. They will always
3292 * update the timestamps BEFORE setting i_update_core,
3293 * so if we clear i_update_core after they set it we
3294 * are guaranteed to see their updates to the timestamps.
3295 * I believe that this depends on strongly ordered memory
3296 * semantics, but we have that. We use the SYNCHRONIZE
3297 * macro to make sure that the compiler does not reorder
3298 * the i_update_core access below the data copy below.
3300 ip->i_update_core = 0;
3304 * Make sure to get the latest atime from the Linux inode.
3306 xfs_synchronize_atime(ip);
3308 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC,
3309 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3310 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3311 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3312 ip->i_ino, be16_to_cpu(dip->di_core.di_magic), dip);
3315 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3316 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3317 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3318 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3319 ip->i_ino, ip, ip->i_d.di_magic);
3322 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3324 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3325 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3326 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3327 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3328 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3332 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3334 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3335 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3336 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3337 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3338 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3339 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3344 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3345 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3346 XFS_RANDOM_IFLUSH_5)) {
3347 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3348 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3350 ip->i_d.di_nextents + ip->i_d.di_anextents,
3355 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3356 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3357 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3358 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3359 ip->i_ino, ip->i_d.di_forkoff, ip);
3363 * bump the flush iteration count, used to detect flushes which
3364 * postdate a log record during recovery.
3367 ip->i_d.di_flushiter++;
3370 * Copy the dirty parts of the inode into the on-disk
3371 * inode. We always copy out the core of the inode,
3372 * because if the inode is dirty at all the core must
3375 xfs_dinode_to_disk(&dip->di_core, &ip->i_d);
3377 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3378 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3379 ip->i_d.di_flushiter = 0;
3382 * If this is really an old format inode and the superblock version
3383 * has not been updated to support only new format inodes, then
3384 * convert back to the old inode format. If the superblock version
3385 * has been updated, then make the conversion permanent.
3387 ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
3388 xfs_sb_version_hasnlink(&mp->m_sb));
3389 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
3390 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
3394 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3395 dip->di_core.di_onlink = cpu_to_be16(ip->i_d.di_nlink);
3398 * The superblock version has already been bumped,
3399 * so just make the conversion to the new inode
3402 ip->i_d.di_version = XFS_DINODE_VERSION_2;
3403 dip->di_core.di_version = XFS_DINODE_VERSION_2;
3404 ip->i_d.di_onlink = 0;
3405 dip->di_core.di_onlink = 0;
3406 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3407 memset(&(dip->di_core.di_pad[0]), 0,
3408 sizeof(dip->di_core.di_pad));
3409 ASSERT(ip->i_d.di_projid == 0);
3413 if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) {
3417 if (XFS_IFORK_Q(ip)) {
3419 * The only error from xfs_iflush_fork is on the data fork.
3421 (void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3423 xfs_inobp_check(mp, bp);
3426 * We've recorded everything logged in the inode, so we'd
3427 * like to clear the ilf_fields bits so we don't log and
3428 * flush things unnecessarily. However, we can't stop
3429 * logging all this information until the data we've copied
3430 * into the disk buffer is written to disk. If we did we might
3431 * overwrite the copy of the inode in the log with all the
3432 * data after re-logging only part of it, and in the face of
3433 * a crash we wouldn't have all the data we need to recover.
3435 * What we do is move the bits to the ili_last_fields field.
3436 * When logging the inode, these bits are moved back to the
3437 * ilf_fields field. In the xfs_iflush_done() routine we
3438 * clear ili_last_fields, since we know that the information
3439 * those bits represent is permanently on disk. As long as
3440 * the flush completes before the inode is logged again, then
3441 * both ilf_fields and ili_last_fields will be cleared.
3443 * We can play with the ilf_fields bits here, because the inode
3444 * lock must be held exclusively in order to set bits there
3445 * and the flush lock protects the ili_last_fields bits.
3446 * Set ili_logged so the flush done
3447 * routine can tell whether or not to look in the AIL.
3448 * Also, store the current LSN of the inode so that we can tell
3449 * whether the item has moved in the AIL from xfs_iflush_done().
3450 * In order to read the lsn we need the AIL lock, because
3451 * it is a 64 bit value that cannot be read atomically.
3453 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3454 iip->ili_last_fields = iip->ili_format.ilf_fields;
3455 iip->ili_format.ilf_fields = 0;
3456 iip->ili_logged = 1;
3458 ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */
3459 spin_lock(&mp->m_ail_lock);
3460 iip->ili_flush_lsn = iip->ili_item.li_lsn;
3461 spin_unlock(&mp->m_ail_lock);
3464 * Attach the function xfs_iflush_done to the inode's
3465 * buffer. This will remove the inode from the AIL
3466 * and unlock the inode's flush lock when the inode is
3467 * completely written to disk.
3469 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3470 xfs_iflush_done, (xfs_log_item_t *)iip);
3472 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3473 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3476 * We're flushing an inode which is not in the AIL and has
3477 * not been logged but has i_update_core set. For this
3478 * case we can use a B_DELWRI flush and immediately drop
3479 * the inode flush lock because we can avoid the whole
3480 * AIL state thing. It's OK to drop the flush lock now,
3481 * because we've already locked the buffer and to do anything
3482 * you really need both.
3485 ASSERT(iip->ili_logged == 0);
3486 ASSERT(iip->ili_last_fields == 0);
3487 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3495 return XFS_ERROR(EFSCORRUPTED);
3500 * Flush all inactive inodes in mp.
3510 XFS_MOUNT_ILOCK(mp);
3516 /* Make sure we skip markers inserted by sync */
3517 if (ip->i_mount == NULL) {
3522 vp = XFS_ITOV_NULL(ip);
3524 XFS_MOUNT_IUNLOCK(mp);
3525 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
3529 ASSERT(vn_count(vp) == 0);
3532 } while (ip != mp->m_inodes);
3534 XFS_MOUNT_IUNLOCK(mp);
3537 #ifdef XFS_ILOCK_TRACE
3538 ktrace_t *xfs_ilock_trace_buf;
3541 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3543 ktrace_enter(ip->i_lock_trace,
3545 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3546 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3547 (void *)ra, /* caller of ilock */
3548 (void *)(unsigned long)current_cpu(),
3549 (void *)(unsigned long)current_pid(),
3550 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3555 * Return a pointer to the extent record at file index idx.
3557 xfs_bmbt_rec_host_t *
3559 xfs_ifork_t *ifp, /* inode fork pointer */
3560 xfs_extnum_t idx) /* index of target extent */
3563 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3564 return ifp->if_u1.if_ext_irec->er_extbuf;
3565 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3566 xfs_ext_irec_t *erp; /* irec pointer */
3567 int erp_idx = 0; /* irec index */
3568 xfs_extnum_t page_idx = idx; /* ext index in target list */
3570 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3571 return &erp->er_extbuf[page_idx];
3572 } else if (ifp->if_bytes) {
3573 return &ifp->if_u1.if_extents[idx];
3580 * Insert new item(s) into the extent records for incore inode
3581 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3585 xfs_ifork_t *ifp, /* inode fork pointer */
3586 xfs_extnum_t idx, /* starting index of new items */
3587 xfs_extnum_t count, /* number of inserted items */
3588 xfs_bmbt_irec_t *new) /* items to insert */
3590 xfs_extnum_t i; /* extent record index */
3592 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3593 xfs_iext_add(ifp, idx, count);
3594 for (i = idx; i < idx + count; i++, new++)
3595 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
3599 * This is called when the amount of space required for incore file
3600 * extents needs to be increased. The ext_diff parameter stores the
3601 * number of new extents being added and the idx parameter contains
3602 * the extent index where the new extents will be added. If the new
3603 * extents are being appended, then we just need to (re)allocate and
3604 * initialize the space. Otherwise, if the new extents are being
3605 * inserted into the middle of the existing entries, a bit more work
3606 * is required to make room for the new extents to be inserted. The
3607 * caller is responsible for filling in the new extent entries upon
3612 xfs_ifork_t *ifp, /* inode fork pointer */
3613 xfs_extnum_t idx, /* index to begin adding exts */
3614 int ext_diff) /* number of extents to add */
3616 int byte_diff; /* new bytes being added */
3617 int new_size; /* size of extents after adding */
3618 xfs_extnum_t nextents; /* number of extents in file */
3620 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3621 ASSERT((idx >= 0) && (idx <= nextents));
3622 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3623 new_size = ifp->if_bytes + byte_diff;
3625 * If the new number of extents (nextents + ext_diff)
3626 * fits inside the inode, then continue to use the inline
3629 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3630 if (idx < nextents) {
3631 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3632 &ifp->if_u2.if_inline_ext[idx],
3633 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3634 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3636 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3637 ifp->if_real_bytes = 0;
3638 ifp->if_lastex = nextents + ext_diff;
3641 * Otherwise use a linear (direct) extent list.
3642 * If the extents are currently inside the inode,
3643 * xfs_iext_realloc_direct will switch us from
3644 * inline to direct extent allocation mode.
3646 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3647 xfs_iext_realloc_direct(ifp, new_size);
3648 if (idx < nextents) {
3649 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3650 &ifp->if_u1.if_extents[idx],
3651 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3652 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3655 /* Indirection array */
3657 xfs_ext_irec_t *erp;
3661 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3662 if (ifp->if_flags & XFS_IFEXTIREC) {
3663 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3665 xfs_iext_irec_init(ifp);
3666 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3667 erp = ifp->if_u1.if_ext_irec;
3669 /* Extents fit in target extent page */
3670 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3671 if (page_idx < erp->er_extcount) {
3672 memmove(&erp->er_extbuf[page_idx + ext_diff],
3673 &erp->er_extbuf[page_idx],
3674 (erp->er_extcount - page_idx) *
3675 sizeof(xfs_bmbt_rec_t));
3676 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3678 erp->er_extcount += ext_diff;
3679 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3681 /* Insert a new extent page */
3683 xfs_iext_add_indirect_multi(ifp,
3684 erp_idx, page_idx, ext_diff);
3687 * If extent(s) are being appended to the last page in
3688 * the indirection array and the new extent(s) don't fit
3689 * in the page, then erp is NULL and erp_idx is set to
3690 * the next index needed in the indirection array.
3693 int count = ext_diff;
3696 erp = xfs_iext_irec_new(ifp, erp_idx);
3697 erp->er_extcount = count;
3698 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3705 ifp->if_bytes = new_size;
3709 * This is called when incore extents are being added to the indirection
3710 * array and the new extents do not fit in the target extent list. The
3711 * erp_idx parameter contains the irec index for the target extent list
3712 * in the indirection array, and the idx parameter contains the extent
3713 * index within the list. The number of extents being added is stored
3714 * in the count parameter.
3716 * |-------| |-------|
3717 * | | | | idx - number of extents before idx
3719 * | | | | count - number of extents being inserted at idx
3720 * |-------| |-------|
3721 * | count | | nex2 | nex2 - number of extents after idx + count
3722 * |-------| |-------|
3725 xfs_iext_add_indirect_multi(
3726 xfs_ifork_t *ifp, /* inode fork pointer */
3727 int erp_idx, /* target extent irec index */
3728 xfs_extnum_t idx, /* index within target list */
3729 int count) /* new extents being added */
3731 int byte_diff; /* new bytes being added */
3732 xfs_ext_irec_t *erp; /* pointer to irec entry */
3733 xfs_extnum_t ext_diff; /* number of extents to add */
3734 xfs_extnum_t ext_cnt; /* new extents still needed */
3735 xfs_extnum_t nex2; /* extents after idx + count */
3736 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3737 int nlists; /* number of irec's (lists) */
3739 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3740 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3741 nex2 = erp->er_extcount - idx;
3742 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3745 * Save second part of target extent list
3746 * (all extents past */
3748 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3749 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
3750 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3751 erp->er_extcount -= nex2;
3752 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3753 memset(&erp->er_extbuf[idx], 0, byte_diff);
3757 * Add the new extents to the end of the target
3758 * list, then allocate new irec record(s) and
3759 * extent buffer(s) as needed to store the rest
3760 * of the new extents.
3763 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3765 erp->er_extcount += ext_diff;
3766 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3767 ext_cnt -= ext_diff;
3771 erp = xfs_iext_irec_new(ifp, erp_idx);
3772 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3773 erp->er_extcount = ext_diff;
3774 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3775 ext_cnt -= ext_diff;
3778 /* Add nex2 extents back to indirection array */
3780 xfs_extnum_t ext_avail;
3783 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3784 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3787 * If nex2 extents fit in the current page, append
3788 * nex2_ep after the new extents.
3790 if (nex2 <= ext_avail) {
3791 i = erp->er_extcount;
3794 * Otherwise, check if space is available in the
3797 else if ((erp_idx < nlists - 1) &&
3798 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3799 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3802 /* Create a hole for nex2 extents */
3803 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3804 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3807 * Final choice, create a new extent page for
3812 erp = xfs_iext_irec_new(ifp, erp_idx);
3814 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3815 kmem_free(nex2_ep, byte_diff);
3816 erp->er_extcount += nex2;
3817 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3822 * This is called when the amount of space required for incore file
3823 * extents needs to be decreased. The ext_diff parameter stores the
3824 * number of extents to be removed and the idx parameter contains
3825 * the extent index where the extents will be removed from.
3827 * If the amount of space needed has decreased below the linear
3828 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3829 * extent array. Otherwise, use kmem_realloc() to adjust the
3830 * size to what is needed.
3834 xfs_ifork_t *ifp, /* inode fork pointer */
3835 xfs_extnum_t idx, /* index to begin removing exts */
3836 int ext_diff) /* number of extents to remove */
3838 xfs_extnum_t nextents; /* number of extents in file */
3839 int new_size; /* size of extents after removal */
3841 ASSERT(ext_diff > 0);
3842 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3843 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3845 if (new_size == 0) {
3846 xfs_iext_destroy(ifp);
3847 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3848 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3849 } else if (ifp->if_real_bytes) {
3850 xfs_iext_remove_direct(ifp, idx, ext_diff);
3852 xfs_iext_remove_inline(ifp, idx, ext_diff);
3854 ifp->if_bytes = new_size;
3858 * This removes ext_diff extents from the inline buffer, beginning
3859 * at extent index idx.
3862 xfs_iext_remove_inline(
3863 xfs_ifork_t *ifp, /* inode fork pointer */
3864 xfs_extnum_t idx, /* index to begin removing exts */
3865 int ext_diff) /* number of extents to remove */
3867 int nextents; /* number of extents in file */
3869 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3870 ASSERT(idx < XFS_INLINE_EXTS);
3871 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3872 ASSERT(((nextents - ext_diff) > 0) &&
3873 (nextents - ext_diff) < XFS_INLINE_EXTS);
3875 if (idx + ext_diff < nextents) {
3876 memmove(&ifp->if_u2.if_inline_ext[idx],
3877 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3878 (nextents - (idx + ext_diff)) *
3879 sizeof(xfs_bmbt_rec_t));
3880 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3881 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3883 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3884 ext_diff * sizeof(xfs_bmbt_rec_t));
3889 * This removes ext_diff extents from a linear (direct) extent list,
3890 * beginning at extent index idx. If the extents are being removed
3891 * from the end of the list (ie. truncate) then we just need to re-
3892 * allocate the list to remove the extra space. Otherwise, if the
3893 * extents are being removed from the middle of the existing extent
3894 * entries, then we first need to move the extent records beginning
3895 * at idx + ext_diff up in the list to overwrite the records being
3896 * removed, then remove the extra space via kmem_realloc.
3899 xfs_iext_remove_direct(
3900 xfs_ifork_t *ifp, /* inode fork pointer */
3901 xfs_extnum_t idx, /* index to begin removing exts */
3902 int ext_diff) /* number of extents to remove */
3904 xfs_extnum_t nextents; /* number of extents in file */
3905 int new_size; /* size of extents after removal */
3907 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3908 new_size = ifp->if_bytes -
3909 (ext_diff * sizeof(xfs_bmbt_rec_t));
3910 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3912 if (new_size == 0) {
3913 xfs_iext_destroy(ifp);
3916 /* Move extents up in the list (if needed) */
3917 if (idx + ext_diff < nextents) {
3918 memmove(&ifp->if_u1.if_extents[idx],
3919 &ifp->if_u1.if_extents[idx + ext_diff],
3920 (nextents - (idx + ext_diff)) *
3921 sizeof(xfs_bmbt_rec_t));
3923 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3924 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3926 * Reallocate the direct extent list. If the extents
3927 * will fit inside the inode then xfs_iext_realloc_direct
3928 * will switch from direct to inline extent allocation
3931 xfs_iext_realloc_direct(ifp, new_size);
3932 ifp->if_bytes = new_size;
3936 * This is called when incore extents are being removed from the
3937 * indirection array and the extents being removed span multiple extent
3938 * buffers. The idx parameter contains the file extent index where we
3939 * want to begin removing extents, and the count parameter contains
3940 * how many extents need to be removed.
3942 * |-------| |-------|
3943 * | nex1 | | | nex1 - number of extents before idx
3944 * |-------| | count |
3945 * | | | | count - number of extents being removed at idx
3946 * | count | |-------|
3947 * | | | nex2 | nex2 - number of extents after idx + count
3948 * |-------| |-------|
3951 xfs_iext_remove_indirect(
3952 xfs_ifork_t *ifp, /* inode fork pointer */
3953 xfs_extnum_t idx, /* index to begin removing extents */
3954 int count) /* number of extents to remove */
3956 xfs_ext_irec_t *erp; /* indirection array pointer */
3957 int erp_idx = 0; /* indirection array index */
3958 xfs_extnum_t ext_cnt; /* extents left to remove */
3959 xfs_extnum_t ext_diff; /* extents to remove in current list */
3960 xfs_extnum_t nex1; /* number of extents before idx */
3961 xfs_extnum_t nex2; /* extents after idx + count */
3962 int nlists; /* entries in indirection array */
3963 int page_idx = idx; /* index in target extent list */
3965 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3966 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3967 ASSERT(erp != NULL);
3968 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3972 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
3973 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
3975 * Check for deletion of entire list;
3976 * xfs_iext_irec_remove() updates extent offsets.
3978 if (ext_diff == erp->er_extcount) {
3979 xfs_iext_irec_remove(ifp, erp_idx);
3980 ext_cnt -= ext_diff;
3983 ASSERT(erp_idx < ifp->if_real_bytes /
3985 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3992 /* Move extents up (if needed) */
3994 memmove(&erp->er_extbuf[nex1],
3995 &erp->er_extbuf[nex1 + ext_diff],
3996 nex2 * sizeof(xfs_bmbt_rec_t));
3998 /* Zero out rest of page */
3999 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
4000 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
4001 /* Update remaining counters */
4002 erp->er_extcount -= ext_diff;
4003 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
4004 ext_cnt -= ext_diff;
4009 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
4010 xfs_iext_irec_compact(ifp);
4014 * Create, destroy, or resize a linear (direct) block of extents.
4017 xfs_iext_realloc_direct(
4018 xfs_ifork_t *ifp, /* inode fork pointer */
4019 int new_size) /* new size of extents */
4021 int rnew_size; /* real new size of extents */
4023 rnew_size = new_size;
4025 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
4026 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
4027 (new_size != ifp->if_real_bytes)));
4029 /* Free extent records */
4030 if (new_size == 0) {
4031 xfs_iext_destroy(ifp);
4033 /* Resize direct extent list and zero any new bytes */
4034 else if (ifp->if_real_bytes) {
4035 /* Check if extents will fit inside the inode */
4036 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
4037 xfs_iext_direct_to_inline(ifp, new_size /
4038 (uint)sizeof(xfs_bmbt_rec_t));
4039 ifp->if_bytes = new_size;
4042 if (!is_power_of_2(new_size)){
4043 rnew_size = roundup_pow_of_two(new_size);
4045 if (rnew_size != ifp->if_real_bytes) {
4046 ifp->if_u1.if_extents =
4047 kmem_realloc(ifp->if_u1.if_extents,
4052 if (rnew_size > ifp->if_real_bytes) {
4053 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
4054 (uint)sizeof(xfs_bmbt_rec_t)], 0,
4055 rnew_size - ifp->if_real_bytes);
4059 * Switch from the inline extent buffer to a direct
4060 * extent list. Be sure to include the inline extent
4061 * bytes in new_size.
4064 new_size += ifp->if_bytes;
4065 if (!is_power_of_2(new_size)) {
4066 rnew_size = roundup_pow_of_two(new_size);
4068 xfs_iext_inline_to_direct(ifp, rnew_size);
4070 ifp->if_real_bytes = rnew_size;
4071 ifp->if_bytes = new_size;
4075 * Switch from linear (direct) extent records to inline buffer.
4078 xfs_iext_direct_to_inline(
4079 xfs_ifork_t *ifp, /* inode fork pointer */
4080 xfs_extnum_t nextents) /* number of extents in file */
4082 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4083 ASSERT(nextents <= XFS_INLINE_EXTS);
4085 * The inline buffer was zeroed when we switched
4086 * from inline to direct extent allocation mode,
4087 * so we don't need to clear it here.
4089 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
4090 nextents * sizeof(xfs_bmbt_rec_t));
4091 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4092 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4093 ifp->if_real_bytes = 0;
4097 * Switch from inline buffer to linear (direct) extent records.
4098 * new_size should already be rounded up to the next power of 2
4099 * by the caller (when appropriate), so use new_size as it is.
4100 * However, since new_size may be rounded up, we can't update
4101 * if_bytes here. It is the caller's responsibility to update
4102 * if_bytes upon return.
4105 xfs_iext_inline_to_direct(
4106 xfs_ifork_t *ifp, /* inode fork pointer */
4107 int new_size) /* number of extents in file */
4109 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_SLEEP);
4110 memset(ifp->if_u1.if_extents, 0, new_size);
4111 if (ifp->if_bytes) {
4112 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
4114 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4115 sizeof(xfs_bmbt_rec_t));
4117 ifp->if_real_bytes = new_size;
4121 * Resize an extent indirection array to new_size bytes.
4124 xfs_iext_realloc_indirect(
4125 xfs_ifork_t *ifp, /* inode fork pointer */
4126 int new_size) /* new indirection array size */
4128 int nlists; /* number of irec's (ex lists) */
4129 int size; /* current indirection array size */
4131 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4132 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4133 size = nlists * sizeof(xfs_ext_irec_t);
4134 ASSERT(ifp->if_real_bytes);
4135 ASSERT((new_size >= 0) && (new_size != size));
4136 if (new_size == 0) {
4137 xfs_iext_destroy(ifp);
4139 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4140 kmem_realloc(ifp->if_u1.if_ext_irec,
4141 new_size, size, KM_SLEEP);
4146 * Switch from indirection array to linear (direct) extent allocations.
4149 xfs_iext_indirect_to_direct(
4150 xfs_ifork_t *ifp) /* inode fork pointer */
4152 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
4153 xfs_extnum_t nextents; /* number of extents in file */
4154 int size; /* size of file extents */
4156 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4157 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4158 ASSERT(nextents <= XFS_LINEAR_EXTS);
4159 size = nextents * sizeof(xfs_bmbt_rec_t);
4161 xfs_iext_irec_compact_full(ifp);
4162 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4164 ep = ifp->if_u1.if_ext_irec->er_extbuf;
4165 kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t));
4166 ifp->if_flags &= ~XFS_IFEXTIREC;
4167 ifp->if_u1.if_extents = ep;
4168 ifp->if_bytes = size;
4169 if (nextents < XFS_LINEAR_EXTS) {
4170 xfs_iext_realloc_direct(ifp, size);
4175 * Free incore file extents.
4179 xfs_ifork_t *ifp) /* inode fork pointer */
4181 if (ifp->if_flags & XFS_IFEXTIREC) {
4185 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4186 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4187 xfs_iext_irec_remove(ifp, erp_idx);
4189 ifp->if_flags &= ~XFS_IFEXTIREC;
4190 } else if (ifp->if_real_bytes) {
4191 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4192 } else if (ifp->if_bytes) {
4193 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4194 sizeof(xfs_bmbt_rec_t));
4196 ifp->if_u1.if_extents = NULL;
4197 ifp->if_real_bytes = 0;
4202 * Return a pointer to the extent record for file system block bno.
4204 xfs_bmbt_rec_host_t * /* pointer to found extent record */
4205 xfs_iext_bno_to_ext(
4206 xfs_ifork_t *ifp, /* inode fork pointer */
4207 xfs_fileoff_t bno, /* block number to search for */
4208 xfs_extnum_t *idxp) /* index of target extent */
4210 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
4211 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
4212 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
4213 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4214 int high; /* upper boundary in search */
4215 xfs_extnum_t idx = 0; /* index of target extent */
4216 int low; /* lower boundary in search */
4217 xfs_extnum_t nextents; /* number of file extents */
4218 xfs_fileoff_t startoff = 0; /* start offset of extent */
4220 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4221 if (nextents == 0) {
4226 if (ifp->if_flags & XFS_IFEXTIREC) {
4227 /* Find target extent list */
4229 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4230 base = erp->er_extbuf;
4231 high = erp->er_extcount - 1;
4233 base = ifp->if_u1.if_extents;
4234 high = nextents - 1;
4236 /* Binary search extent records */
4237 while (low <= high) {
4238 idx = (low + high) >> 1;
4240 startoff = xfs_bmbt_get_startoff(ep);
4241 blockcount = xfs_bmbt_get_blockcount(ep);
4242 if (bno < startoff) {
4244 } else if (bno >= startoff + blockcount) {
4247 /* Convert back to file-based extent index */
4248 if (ifp->if_flags & XFS_IFEXTIREC) {
4249 idx += erp->er_extoff;
4255 /* Convert back to file-based extent index */
4256 if (ifp->if_flags & XFS_IFEXTIREC) {
4257 idx += erp->er_extoff;
4259 if (bno >= startoff + blockcount) {
4260 if (++idx == nextents) {
4263 ep = xfs_iext_get_ext(ifp, idx);
4271 * Return a pointer to the indirection array entry containing the
4272 * extent record for filesystem block bno. Store the index of the
4273 * target irec in *erp_idxp.
4275 xfs_ext_irec_t * /* pointer to found extent record */
4276 xfs_iext_bno_to_irec(
4277 xfs_ifork_t *ifp, /* inode fork pointer */
4278 xfs_fileoff_t bno, /* block number to search for */
4279 int *erp_idxp) /* irec index of target ext list */
4281 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4282 xfs_ext_irec_t *erp_next; /* next indirection array entry */
4283 int erp_idx; /* indirection array index */
4284 int nlists; /* number of extent irec's (lists) */
4285 int high; /* binary search upper limit */
4286 int low; /* binary search lower limit */
4288 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4289 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4293 while (low <= high) {
4294 erp_idx = (low + high) >> 1;
4295 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4296 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4297 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4299 } else if (erp_next && bno >=
4300 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4306 *erp_idxp = erp_idx;
4311 * Return a pointer to the indirection array entry containing the
4312 * extent record at file extent index *idxp. Store the index of the
4313 * target irec in *erp_idxp and store the page index of the target
4314 * extent record in *idxp.
4317 xfs_iext_idx_to_irec(
4318 xfs_ifork_t *ifp, /* inode fork pointer */
4319 xfs_extnum_t *idxp, /* extent index (file -> page) */
4320 int *erp_idxp, /* pointer to target irec */
4321 int realloc) /* new bytes were just added */
4323 xfs_ext_irec_t *prev; /* pointer to previous irec */
4324 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
4325 int erp_idx; /* indirection array index */
4326 int nlists; /* number of irec's (ex lists) */
4327 int high; /* binary search upper limit */
4328 int low; /* binary search lower limit */
4329 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
4331 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4332 ASSERT(page_idx >= 0 && page_idx <=
4333 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4334 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4339 /* Binary search extent irec's */
4340 while (low <= high) {
4341 erp_idx = (low + high) >> 1;
4342 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4343 prev = erp_idx > 0 ? erp - 1 : NULL;
4344 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4345 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4347 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
4348 (page_idx == erp->er_extoff + erp->er_extcount &&
4351 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
4352 erp->er_extcount == XFS_LINEAR_EXTS) {
4356 erp = erp_idx < nlists ? erp + 1 : NULL;
4359 page_idx -= erp->er_extoff;
4364 *erp_idxp = erp_idx;
4369 * Allocate and initialize an indirection array once the space needed
4370 * for incore extents increases above XFS_IEXT_BUFSZ.
4374 xfs_ifork_t *ifp) /* inode fork pointer */
4376 xfs_ext_irec_t *erp; /* indirection array pointer */
4377 xfs_extnum_t nextents; /* number of extents in file */
4379 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4380 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4381 ASSERT(nextents <= XFS_LINEAR_EXTS);
4383 erp = (xfs_ext_irec_t *)
4384 kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
4386 if (nextents == 0) {
4387 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4388 } else if (!ifp->if_real_bytes) {
4389 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4390 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4391 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4393 erp->er_extbuf = ifp->if_u1.if_extents;
4394 erp->er_extcount = nextents;
4397 ifp->if_flags |= XFS_IFEXTIREC;
4398 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4399 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4400 ifp->if_u1.if_ext_irec = erp;
4406 * Allocate and initialize a new entry in the indirection array.
4410 xfs_ifork_t *ifp, /* inode fork pointer */
4411 int erp_idx) /* index for new irec */
4413 xfs_ext_irec_t *erp; /* indirection array pointer */
4414 int i; /* loop counter */
4415 int nlists; /* number of irec's (ex lists) */
4417 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4418 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4420 /* Resize indirection array */
4421 xfs_iext_realloc_indirect(ifp, ++nlists *
4422 sizeof(xfs_ext_irec_t));
4424 * Move records down in the array so the
4425 * new page can use erp_idx.
4427 erp = ifp->if_u1.if_ext_irec;
4428 for (i = nlists - 1; i > erp_idx; i--) {
4429 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4431 ASSERT(i == erp_idx);
4433 /* Initialize new extent record */
4434 erp = ifp->if_u1.if_ext_irec;
4435 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4436 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4437 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4438 erp[erp_idx].er_extcount = 0;
4439 erp[erp_idx].er_extoff = erp_idx > 0 ?
4440 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4441 return (&erp[erp_idx]);
4445 * Remove a record from the indirection array.
4448 xfs_iext_irec_remove(
4449 xfs_ifork_t *ifp, /* inode fork pointer */
4450 int erp_idx) /* irec index to remove */
4452 xfs_ext_irec_t *erp; /* indirection array pointer */
4453 int i; /* loop counter */
4454 int nlists; /* number of irec's (ex lists) */
4456 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4457 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4458 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4459 if (erp->er_extbuf) {
4460 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4462 kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ);
4464 /* Compact extent records */
4465 erp = ifp->if_u1.if_ext_irec;
4466 for (i = erp_idx; i < nlists - 1; i++) {
4467 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4470 * Manually free the last extent record from the indirection
4471 * array. A call to xfs_iext_realloc_indirect() with a size
4472 * of zero would result in a call to xfs_iext_destroy() which
4473 * would in turn call this function again, creating a nasty
4477 xfs_iext_realloc_indirect(ifp,
4478 nlists * sizeof(xfs_ext_irec_t));
4480 kmem_free(ifp->if_u1.if_ext_irec,
4481 sizeof(xfs_ext_irec_t));
4483 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4487 * This is called to clean up large amounts of unused memory allocated
4488 * by the indirection array. Before compacting anything though, verify
4489 * that the indirection array is still needed and switch back to the
4490 * linear extent list (or even the inline buffer) if possible. The
4491 * compaction policy is as follows:
4493 * Full Compaction: Extents fit into a single page (or inline buffer)
4494 * Full Compaction: Extents occupy less than 10% of allocated space
4495 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4496 * No Compaction: Extents occupy at least 50% of allocated space
4499 xfs_iext_irec_compact(
4500 xfs_ifork_t *ifp) /* inode fork pointer */
4502 xfs_extnum_t nextents; /* number of extents in file */
4503 int nlists; /* number of irec's (ex lists) */
4505 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4506 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4507 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4509 if (nextents == 0) {
4510 xfs_iext_destroy(ifp);
4511 } else if (nextents <= XFS_INLINE_EXTS) {
4512 xfs_iext_indirect_to_direct(ifp);
4513 xfs_iext_direct_to_inline(ifp, nextents);
4514 } else if (nextents <= XFS_LINEAR_EXTS) {
4515 xfs_iext_indirect_to_direct(ifp);
4516 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) {
4517 xfs_iext_irec_compact_full(ifp);
4518 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4519 xfs_iext_irec_compact_pages(ifp);
4524 * Combine extents from neighboring extent pages.
4527 xfs_iext_irec_compact_pages(
4528 xfs_ifork_t *ifp) /* inode fork pointer */
4530 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
4531 int erp_idx = 0; /* indirection array index */
4532 int nlists; /* number of irec's (ex lists) */
4534 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4535 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4536 while (erp_idx < nlists - 1) {
4537 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4539 if (erp_next->er_extcount <=
4540 (XFS_LINEAR_EXTS - erp->er_extcount)) {
4541 memmove(&erp->er_extbuf[erp->er_extcount],
4542 erp_next->er_extbuf, erp_next->er_extcount *
4543 sizeof(xfs_bmbt_rec_t));
4544 erp->er_extcount += erp_next->er_extcount;
4546 * Free page before removing extent record
4547 * so er_extoffs don't get modified in
4548 * xfs_iext_irec_remove.
4550 kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ);
4551 erp_next->er_extbuf = NULL;
4552 xfs_iext_irec_remove(ifp, erp_idx + 1);
4553 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4561 * Fully compact the extent records managed by the indirection array.
4564 xfs_iext_irec_compact_full(
4565 xfs_ifork_t *ifp) /* inode fork pointer */
4567 xfs_bmbt_rec_host_t *ep, *ep_next; /* extent record pointers */
4568 xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */
4569 int erp_idx = 0; /* extent irec index */
4570 int ext_avail; /* empty entries in ex list */
4571 int ext_diff; /* number of exts to add */
4572 int nlists; /* number of irec's (ex lists) */
4574 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4575 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4576 erp = ifp->if_u1.if_ext_irec;
4577 ep = &erp->er_extbuf[erp->er_extcount];
4579 ep_next = erp_next->er_extbuf;
4580 while (erp_idx < nlists - 1) {
4581 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
4582 ext_diff = MIN(ext_avail, erp_next->er_extcount);
4583 memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t));
4584 erp->er_extcount += ext_diff;
4585 erp_next->er_extcount -= ext_diff;
4586 /* Remove next page */
4587 if (erp_next->er_extcount == 0) {
4589 * Free page before removing extent record
4590 * so er_extoffs don't get modified in
4591 * xfs_iext_irec_remove.
4593 kmem_free(erp_next->er_extbuf,
4594 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4595 erp_next->er_extbuf = NULL;
4596 xfs_iext_irec_remove(ifp, erp_idx + 1);
4597 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4598 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4599 /* Update next page */
4601 /* Move rest of page up to become next new page */
4602 memmove(erp_next->er_extbuf, ep_next,
4603 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4604 ep_next = erp_next->er_extbuf;
4605 memset(&ep_next[erp_next->er_extcount], 0,
4606 (XFS_LINEAR_EXTS - erp_next->er_extcount) *
4607 sizeof(xfs_bmbt_rec_t));
4609 if (erp->er_extcount == XFS_LINEAR_EXTS) {
4611 if (erp_idx < nlists)
4612 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4616 ep = &erp->er_extbuf[erp->er_extcount];
4618 ep_next = erp_next->er_extbuf;
4623 * This is called to update the er_extoff field in the indirection
4624 * array when extents have been added or removed from one of the
4625 * extent lists. erp_idx contains the irec index to begin updating
4626 * at and ext_diff contains the number of extents that were added
4630 xfs_iext_irec_update_extoffs(
4631 xfs_ifork_t *ifp, /* inode fork pointer */
4632 int erp_idx, /* irec index to update */
4633 int ext_diff) /* number of new extents */
4635 int i; /* loop counter */
4636 int nlists; /* number of irec's (ex lists */
4638 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4639 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4640 for (i = erp_idx; i < nlists; i++) {
4641 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;