2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/log2.h>
22 #include "xfs_types.h"
26 #include "xfs_trans.h"
27 #include "xfs_trans_priv.h"
31 #include "xfs_dmapi.h"
32 #include "xfs_mount.h"
33 #include "xfs_bmap_btree.h"
34 #include "xfs_alloc_btree.h"
35 #include "xfs_ialloc_btree.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_attr_sf.h"
38 #include "xfs_dinode.h"
39 #include "xfs_inode.h"
40 #include "xfs_buf_item.h"
41 #include "xfs_inode_item.h"
42 #include "xfs_btree.h"
43 #include "xfs_btree_trace.h"
44 #include "xfs_alloc.h"
45 #include "xfs_ialloc.h"
48 #include "xfs_error.h"
49 #include "xfs_utils.h"
50 #include "xfs_dir2_trace.h"
51 #include "xfs_quota.h"
53 #include "xfs_filestream.h"
54 #include "xfs_vnodeops.h"
56 kmem_zone_t *xfs_ifork_zone;
57 kmem_zone_t *xfs_inode_zone;
60 * Used in xfs_itruncate(). This is the maximum number of extents
61 * freed from a file in a single transaction.
63 #define XFS_ITRUNC_MAX_EXTENTS 2
65 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
66 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
67 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
68 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
72 * Make sure that the extents in the given memory buffer
82 xfs_bmbt_rec_host_t rec;
85 for (i = 0; i < nrecs; i++) {
86 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
87 rec.l0 = get_unaligned(&ep->l0);
88 rec.l1 = get_unaligned(&ep->l1);
89 xfs_bmbt_get_all(&rec, &irec);
90 if (fmt == XFS_EXTFMT_NOSTATE)
91 ASSERT(irec.br_state == XFS_EXT_NORM);
95 #define xfs_validate_extents(ifp, nrecs, fmt)
99 * Check that none of the inode's in the buffer have a next
100 * unlinked field of 0.
112 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
114 for (i = 0; i < j; i++) {
115 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
116 i * mp->m_sb.sb_inodesize);
117 if (!dip->di_next_unlinked) {
118 xfs_fs_cmn_err(CE_ALERT, mp,
119 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.",
121 ASSERT(dip->di_next_unlinked);
128 * Find the buffer associated with the given inode map
129 * We do basic validation checks on the buffer once it has been
130 * retrieved from disk.
136 struct xfs_imap *imap,
146 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
147 (int)imap->im_len, buf_flags, &bp);
149 if (error != EAGAIN) {
151 "xfs_imap_to_bp: xfs_trans_read_buf()returned "
152 "an error %d on %s. Returning error.",
153 error, mp->m_fsname);
155 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
161 * Validate the magic number and version of every inode in the buffer
162 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
165 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog;
166 #else /* usual case */
170 for (i = 0; i < ni; i++) {
174 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
175 (i << mp->m_sb.sb_inodelog));
176 di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC &&
177 XFS_DINODE_GOOD_VERSION(dip->di_version);
178 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
179 XFS_ERRTAG_ITOBP_INOTOBP,
180 XFS_RANDOM_ITOBP_INOTOBP))) {
181 if (imap_flags & XFS_IMAP_BULKSTAT) {
182 xfs_trans_brelse(tp, bp);
183 return XFS_ERROR(EINVAL);
185 XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
186 XFS_ERRLEVEL_HIGH, mp, dip);
189 "Device %s - bad inode magic/vsn "
190 "daddr %lld #%d (magic=%x)",
191 XFS_BUFTARG_NAME(mp->m_ddev_targp),
192 (unsigned long long)imap->im_blkno, i,
193 be16_to_cpu(dip->di_magic));
195 xfs_trans_brelse(tp, bp);
196 return XFS_ERROR(EFSCORRUPTED);
200 xfs_inobp_check(mp, bp);
203 * Mark the buffer as an inode buffer now that it looks good
205 XFS_BUF_SET_VTYPE(bp, B_FS_INO);
212 * This routine is called to map an inode number within a file
213 * system to the buffer containing the on-disk version of the
214 * inode. It returns a pointer to the buffer containing the
215 * on-disk inode in the bpp parameter, and in the dip parameter
216 * it returns a pointer to the on-disk inode within that buffer.
218 * If a non-zero error is returned, then the contents of bpp and
219 * dipp are undefined.
221 * Use xfs_imap() to determine the size and location of the
222 * buffer to read from disk.
234 struct xfs_imap imap;
239 error = xfs_imap(mp, tp, ino, &imap, imap_flags);
243 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags);
247 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
249 *offset = imap.im_boffset;
255 * This routine is called to map an inode to the buffer containing
256 * the on-disk version of the inode. It returns a pointer to the
257 * buffer containing the on-disk inode in the bpp parameter, and in
258 * the dip parameter it returns a pointer to the on-disk inode within
261 * If a non-zero error is returned, then the contents of bpp and
262 * dipp are undefined.
264 * The inode is expected to already been mapped to its buffer and read
265 * in once, thus we can use the mapping information stored in the inode
266 * rather than calling xfs_imap(). This allows us to avoid the overhead
267 * of looking at the inode btree for small block file systems
282 ASSERT(ip->i_imap.im_blkno != 0);
284 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0);
289 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
295 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
301 * Move inode type and inode format specific information from the
302 * on-disk inode to the in-core inode. For fifos, devs, and sockets
303 * this means set if_rdev to the proper value. For files, directories,
304 * and symlinks this means to bring in the in-line data or extent
305 * pointers. For a file in B-tree format, only the root is immediately
306 * brought in-core. The rest will be in-lined in if_extents when it
307 * is first referenced (see xfs_iread_extents()).
314 xfs_attr_shortform_t *atp;
318 ip->i_df.if_ext_max =
319 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
322 if (unlikely(be32_to_cpu(dip->di_nextents) +
323 be16_to_cpu(dip->di_anextents) >
324 be64_to_cpu(dip->di_nblocks))) {
325 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
326 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
327 (unsigned long long)ip->i_ino,
328 (int)(be32_to_cpu(dip->di_nextents) +
329 be16_to_cpu(dip->di_anextents)),
331 be64_to_cpu(dip->di_nblocks));
332 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
334 return XFS_ERROR(EFSCORRUPTED);
337 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
338 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
339 "corrupt dinode %Lu, forkoff = 0x%x.",
340 (unsigned long long)ip->i_ino,
342 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
344 return XFS_ERROR(EFSCORRUPTED);
347 switch (ip->i_d.di_mode & S_IFMT) {
352 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
353 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
355 return XFS_ERROR(EFSCORRUPTED);
359 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
365 switch (dip->di_format) {
366 case XFS_DINODE_FMT_LOCAL:
368 * no local regular files yet
370 if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) {
371 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
373 "(local format for regular file).",
374 (unsigned long long) ip->i_ino);
375 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
378 return XFS_ERROR(EFSCORRUPTED);
381 di_size = be64_to_cpu(dip->di_size);
382 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
383 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
385 "(bad size %Ld for local inode).",
386 (unsigned long long) ip->i_ino,
387 (long long) di_size);
388 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
391 return XFS_ERROR(EFSCORRUPTED);
395 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
397 case XFS_DINODE_FMT_EXTENTS:
398 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
400 case XFS_DINODE_FMT_BTREE:
401 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
404 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
406 return XFS_ERROR(EFSCORRUPTED);
411 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
412 return XFS_ERROR(EFSCORRUPTED);
417 if (!XFS_DFORK_Q(dip))
419 ASSERT(ip->i_afp == NULL);
420 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
421 ip->i_afp->if_ext_max =
422 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
423 switch (dip->di_aformat) {
424 case XFS_DINODE_FMT_LOCAL:
425 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
426 size = be16_to_cpu(atp->hdr.totsize);
427 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
429 case XFS_DINODE_FMT_EXTENTS:
430 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
432 case XFS_DINODE_FMT_BTREE:
433 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
436 error = XFS_ERROR(EFSCORRUPTED);
440 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
442 xfs_idestroy_fork(ip, XFS_DATA_FORK);
448 * The file is in-lined in the on-disk inode.
449 * If it fits into if_inline_data, then copy
450 * it there, otherwise allocate a buffer for it
451 * and copy the data there. Either way, set
452 * if_data to point at the data.
453 * If we allocate a buffer for the data, make
454 * sure that its size is a multiple of 4 and
455 * record the real size in i_real_bytes.
468 * If the size is unreasonable, then something
469 * is wrong and we just bail out rather than crash in
470 * kmem_alloc() or memcpy() below.
472 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
473 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
475 "(bad size %d for local fork, size = %d).",
476 (unsigned long long) ip->i_ino, size,
477 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
478 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
480 return XFS_ERROR(EFSCORRUPTED);
482 ifp = XFS_IFORK_PTR(ip, whichfork);
485 ifp->if_u1.if_data = NULL;
486 else if (size <= sizeof(ifp->if_u2.if_inline_data))
487 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
489 real_size = roundup(size, 4);
490 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
492 ifp->if_bytes = size;
493 ifp->if_real_bytes = real_size;
495 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
496 ifp->if_flags &= ~XFS_IFEXTENTS;
497 ifp->if_flags |= XFS_IFINLINE;
502 * The file consists of a set of extents all
503 * of which fit into the on-disk inode.
504 * If there are few enough extents to fit into
505 * the if_inline_ext, then copy them there.
506 * Otherwise allocate a buffer for them and copy
507 * them into it. Either way, set if_extents
508 * to point at the extents.
522 ifp = XFS_IFORK_PTR(ip, whichfork);
523 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
524 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
527 * If the number of extents is unreasonable, then something
528 * is wrong and we just bail out rather than crash in
529 * kmem_alloc() or memcpy() below.
531 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
532 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
533 "corrupt inode %Lu ((a)extents = %d).",
534 (unsigned long long) ip->i_ino, nex);
535 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
537 return XFS_ERROR(EFSCORRUPTED);
540 ifp->if_real_bytes = 0;
542 ifp->if_u1.if_extents = NULL;
543 else if (nex <= XFS_INLINE_EXTS)
544 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
546 xfs_iext_add(ifp, 0, nex);
548 ifp->if_bytes = size;
550 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
551 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
552 for (i = 0; i < nex; i++, dp++) {
553 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
554 ep->l0 = get_unaligned_be64(&dp->l0);
555 ep->l1 = get_unaligned_be64(&dp->l1);
557 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
558 if (whichfork != XFS_DATA_FORK ||
559 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
560 if (unlikely(xfs_check_nostate_extents(
562 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
565 return XFS_ERROR(EFSCORRUPTED);
568 ifp->if_flags |= XFS_IFEXTENTS;
573 * The file has too many extents to fit into
574 * the inode, so they are in B-tree format.
575 * Allocate a buffer for the root of the B-tree
576 * and copy the root into it. The i_extents
577 * field will remain NULL until all of the
578 * extents are read in (when they are needed).
586 xfs_bmdr_block_t *dfp;
592 ifp = XFS_IFORK_PTR(ip, whichfork);
593 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
594 size = XFS_BMAP_BROOT_SPACE(dfp);
595 nrecs = be16_to_cpu(dfp->bb_numrecs);
598 * blow out if -- fork has less extents than can fit in
599 * fork (fork shouldn't be a btree format), root btree
600 * block has more records than can fit into the fork,
601 * or the number of extents is greater than the number of
604 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
605 || XFS_BMDR_SPACE_CALC(nrecs) >
606 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
607 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
608 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
609 "corrupt inode %Lu (btree).",
610 (unsigned long long) ip->i_ino);
611 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
613 return XFS_ERROR(EFSCORRUPTED);
616 ifp->if_broot_bytes = size;
617 ifp->if_broot = kmem_alloc(size, KM_SLEEP);
618 ASSERT(ifp->if_broot != NULL);
620 * Copy and convert from the on-disk structure
621 * to the in-memory structure.
623 xfs_bmdr_to_bmbt(ip->i_mount, dfp,
624 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
625 ifp->if_broot, size);
626 ifp->if_flags &= ~XFS_IFEXTENTS;
627 ifp->if_flags |= XFS_IFBROOT;
633 xfs_dinode_from_disk(
637 to->di_magic = be16_to_cpu(from->di_magic);
638 to->di_mode = be16_to_cpu(from->di_mode);
639 to->di_version = from ->di_version;
640 to->di_format = from->di_format;
641 to->di_onlink = be16_to_cpu(from->di_onlink);
642 to->di_uid = be32_to_cpu(from->di_uid);
643 to->di_gid = be32_to_cpu(from->di_gid);
644 to->di_nlink = be32_to_cpu(from->di_nlink);
645 to->di_projid = be16_to_cpu(from->di_projid);
646 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
647 to->di_flushiter = be16_to_cpu(from->di_flushiter);
648 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
649 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
650 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
651 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
652 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
653 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
654 to->di_size = be64_to_cpu(from->di_size);
655 to->di_nblocks = be64_to_cpu(from->di_nblocks);
656 to->di_extsize = be32_to_cpu(from->di_extsize);
657 to->di_nextents = be32_to_cpu(from->di_nextents);
658 to->di_anextents = be16_to_cpu(from->di_anextents);
659 to->di_forkoff = from->di_forkoff;
660 to->di_aformat = from->di_aformat;
661 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
662 to->di_dmstate = be16_to_cpu(from->di_dmstate);
663 to->di_flags = be16_to_cpu(from->di_flags);
664 to->di_gen = be32_to_cpu(from->di_gen);
670 xfs_icdinode_t *from)
672 to->di_magic = cpu_to_be16(from->di_magic);
673 to->di_mode = cpu_to_be16(from->di_mode);
674 to->di_version = from ->di_version;
675 to->di_format = from->di_format;
676 to->di_onlink = cpu_to_be16(from->di_onlink);
677 to->di_uid = cpu_to_be32(from->di_uid);
678 to->di_gid = cpu_to_be32(from->di_gid);
679 to->di_nlink = cpu_to_be32(from->di_nlink);
680 to->di_projid = cpu_to_be16(from->di_projid);
681 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
682 to->di_flushiter = cpu_to_be16(from->di_flushiter);
683 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
684 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
685 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
686 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
687 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
688 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
689 to->di_size = cpu_to_be64(from->di_size);
690 to->di_nblocks = cpu_to_be64(from->di_nblocks);
691 to->di_extsize = cpu_to_be32(from->di_extsize);
692 to->di_nextents = cpu_to_be32(from->di_nextents);
693 to->di_anextents = cpu_to_be16(from->di_anextents);
694 to->di_forkoff = from->di_forkoff;
695 to->di_aformat = from->di_aformat;
696 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
697 to->di_dmstate = cpu_to_be16(from->di_dmstate);
698 to->di_flags = cpu_to_be16(from->di_flags);
699 to->di_gen = cpu_to_be32(from->di_gen);
708 if (di_flags & XFS_DIFLAG_ANY) {
709 if (di_flags & XFS_DIFLAG_REALTIME)
710 flags |= XFS_XFLAG_REALTIME;
711 if (di_flags & XFS_DIFLAG_PREALLOC)
712 flags |= XFS_XFLAG_PREALLOC;
713 if (di_flags & XFS_DIFLAG_IMMUTABLE)
714 flags |= XFS_XFLAG_IMMUTABLE;
715 if (di_flags & XFS_DIFLAG_APPEND)
716 flags |= XFS_XFLAG_APPEND;
717 if (di_flags & XFS_DIFLAG_SYNC)
718 flags |= XFS_XFLAG_SYNC;
719 if (di_flags & XFS_DIFLAG_NOATIME)
720 flags |= XFS_XFLAG_NOATIME;
721 if (di_flags & XFS_DIFLAG_NODUMP)
722 flags |= XFS_XFLAG_NODUMP;
723 if (di_flags & XFS_DIFLAG_RTINHERIT)
724 flags |= XFS_XFLAG_RTINHERIT;
725 if (di_flags & XFS_DIFLAG_PROJINHERIT)
726 flags |= XFS_XFLAG_PROJINHERIT;
727 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
728 flags |= XFS_XFLAG_NOSYMLINKS;
729 if (di_flags & XFS_DIFLAG_EXTSIZE)
730 flags |= XFS_XFLAG_EXTSIZE;
731 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
732 flags |= XFS_XFLAG_EXTSZINHERIT;
733 if (di_flags & XFS_DIFLAG_NODEFRAG)
734 flags |= XFS_XFLAG_NODEFRAG;
735 if (di_flags & XFS_DIFLAG_FILESTREAM)
736 flags |= XFS_XFLAG_FILESTREAM;
746 xfs_icdinode_t *dic = &ip->i_d;
748 return _xfs_dic2xflags(dic->di_flags) |
749 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
756 return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
757 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
761 * Allocate and initialise an xfs_inode.
763 STATIC struct xfs_inode *
765 struct xfs_mount *mp,
768 struct xfs_inode *ip;
771 * if this didn't occur in transactions, we could use
772 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
773 * code up to do this anyway.
775 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
779 ASSERT(atomic_read(&ip->i_iocount) == 0);
780 ASSERT(atomic_read(&ip->i_pincount) == 0);
781 ASSERT(!spin_is_locked(&ip->i_flags_lock));
782 ASSERT(completion_done(&ip->i_flush));
785 * initialise the VFS inode here to get failures
786 * out of the way early.
788 if (!inode_init_always(mp->m_super, VFS_I(ip))) {
789 kmem_zone_free(xfs_inode_zone, ip);
793 /* initialise the xfs inode */
796 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
798 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
800 ip->i_update_core = 0;
801 ip->i_update_size = 0;
802 ip->i_delayed_blks = 0;
803 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
808 * Initialize inode's trace buffers.
810 #ifdef XFS_INODE_TRACE
811 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
813 #ifdef XFS_BMAP_TRACE
814 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
816 #ifdef XFS_BTREE_TRACE
817 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
820 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
822 #ifdef XFS_ILOCK_TRACE
823 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
825 #ifdef XFS_DIR2_TRACE
826 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
833 * Given a mount structure and an inode number, return a pointer
834 * to a newly allocated in-core inode corresponding to the given
837 * Initialize the inode's attributes and extent pointers if it
838 * already has them (it will not if the inode has no links).
854 ip = xfs_inode_alloc(mp, ino);
859 * Fill in the location information in the in-core inode.
861 ip->i_imap.im_blkno = bno;
862 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, imap_flags);
864 goto out_destroy_inode;
865 ASSERT(bno == 0 || bno == ip->i_imap.im_blkno);
868 * Get pointers to the on-disk inode and the buffer containing it.
870 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
871 XFS_BUF_LOCK, imap_flags);
873 goto out_destroy_inode;
874 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
877 * If we got something that isn't an inode it means someone
878 * (nfs or dmi) has a stale handle.
880 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) {
882 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
883 "dip->di_magic (0x%x) != "
884 "XFS_DINODE_MAGIC (0x%x)",
885 be16_to_cpu(dip->di_magic),
888 error = XFS_ERROR(EINVAL);
893 * If the on-disk inode is already linked to a directory
894 * entry, copy all of the inode into the in-core inode.
895 * xfs_iformat() handles copying in the inode format
896 * specific information.
897 * Otherwise, just get the truly permanent information.
900 xfs_dinode_from_disk(&ip->i_d, dip);
901 error = xfs_iformat(ip, dip);
904 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
905 "xfs_iformat() returned error %d",
911 ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
912 ip->i_d.di_version = dip->di_version;
913 ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
914 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
916 * Make sure to pull in the mode here as well in
917 * case the inode is released without being used.
918 * This ensures that xfs_inactive() will see that
919 * the inode is already free and not try to mess
920 * with the uninitialized part of it.
924 * Initialize the per-fork minima and maxima for a new
925 * inode here. xfs_iformat will do it for old inodes.
927 ip->i_df.if_ext_max =
928 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
932 * The inode format changed when we moved the link count and
933 * made it 32 bits long. If this is an old format inode,
934 * convert it in memory to look like a new one. If it gets
935 * flushed to disk we will convert back before flushing or
936 * logging it. We zero out the new projid field and the old link
937 * count field. We'll handle clearing the pad field (the remains
938 * of the old uuid field) when we actually convert the inode to
939 * the new format. We don't change the version number so that we
940 * can distinguish this from a real new format inode.
942 if (ip->i_d.di_version == 1) {
943 ip->i_d.di_nlink = ip->i_d.di_onlink;
944 ip->i_d.di_onlink = 0;
945 ip->i_d.di_projid = 0;
948 ip->i_delayed_blks = 0;
949 ip->i_size = ip->i_d.di_size;
952 * Mark the buffer containing the inode as something to keep
953 * around for a while. This helps to keep recently accessed
954 * meta-data in-core longer.
956 XFS_BUF_SET_REF(bp, XFS_INO_REF);
959 * Use xfs_trans_brelse() to release the buffer containing the
960 * on-disk inode, because it was acquired with xfs_trans_read_buf()
961 * in xfs_itobp() above. If tp is NULL, this is just a normal
962 * brelse(). If we're within a transaction, then xfs_trans_brelse()
963 * will only release the buffer if it is not dirty within the
964 * transaction. It will be OK to release the buffer in this case,
965 * because inodes on disk are never destroyed and we will be
966 * locking the new in-core inode before putting it in the hash
967 * table where other processes can find it. Thus we don't have
968 * to worry about the inode being changed just because we released
971 xfs_trans_brelse(tp, bp);
976 xfs_trans_brelse(tp, bp);
978 xfs_destroy_inode(ip);
983 * Read in extents from a btree-format inode.
984 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
994 xfs_extnum_t nextents;
997 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
998 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
1000 return XFS_ERROR(EFSCORRUPTED);
1002 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1003 size = nextents * sizeof(xfs_bmbt_rec_t);
1004 ifp = XFS_IFORK_PTR(ip, whichfork);
1007 * We know that the size is valid (it's checked in iformat_btree)
1009 ifp->if_lastex = NULLEXTNUM;
1010 ifp->if_bytes = ifp->if_real_bytes = 0;
1011 ifp->if_flags |= XFS_IFEXTENTS;
1012 xfs_iext_add(ifp, 0, nextents);
1013 error = xfs_bmap_read_extents(tp, ip, whichfork);
1015 xfs_iext_destroy(ifp);
1016 ifp->if_flags &= ~XFS_IFEXTENTS;
1019 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
1024 * Allocate an inode on disk and return a copy of its in-core version.
1025 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
1026 * appropriately within the inode. The uid and gid for the inode are
1027 * set according to the contents of the given cred structure.
1029 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1030 * has a free inode available, call xfs_iget()
1031 * to obtain the in-core version of the allocated inode. Finally,
1032 * fill in the inode and log its initial contents. In this case,
1033 * ialloc_context would be set to NULL and call_again set to false.
1035 * If xfs_dialloc() does not have an available inode,
1036 * it will replenish its supply by doing an allocation. Since we can
1037 * only do one allocation within a transaction without deadlocks, we
1038 * must commit the current transaction before returning the inode itself.
1039 * In this case, therefore, we will set call_again to true and return.
1040 * The caller should then commit the current transaction, start a new
1041 * transaction, and call xfs_ialloc() again to actually get the inode.
1043 * To ensure that some other process does not grab the inode that
1044 * was allocated during the first call to xfs_ialloc(), this routine
1045 * also returns the [locked] bp pointing to the head of the freelist
1046 * as ialloc_context. The caller should hold this buffer across
1047 * the commit and pass it back into this routine on the second call.
1049 * If we are allocating quota inodes, we do not have a parent inode
1050 * to attach to or associate with (i.e. pip == NULL) because they
1051 * are not linked into the directory structure - they are attached
1052 * directly to the superblock - and so have no parent.
1064 xfs_buf_t **ialloc_context,
1065 boolean_t *call_again,
1073 int filestreams = 0;
1076 * Call the space management code to pick
1077 * the on-disk inode to be allocated.
1079 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1080 ialloc_context, call_again, &ino);
1083 if (*call_again || ino == NULLFSINO) {
1087 ASSERT(*ialloc_context == NULL);
1090 * Get the in-core inode with the lock held exclusively.
1091 * This is because we're setting fields here we need
1092 * to prevent others from looking at until we're done.
1094 error = xfs_trans_iget(tp->t_mountp, tp, ino,
1095 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1100 ip->i_d.di_mode = (__uint16_t)mode;
1101 ip->i_d.di_onlink = 0;
1102 ip->i_d.di_nlink = nlink;
1103 ASSERT(ip->i_d.di_nlink == nlink);
1104 ip->i_d.di_uid = current_fsuid();
1105 ip->i_d.di_gid = current_fsgid();
1106 ip->i_d.di_projid = prid;
1107 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1110 * If the superblock version is up to where we support new format
1111 * inodes and this is currently an old format inode, then change
1112 * the inode version number now. This way we only do the conversion
1113 * here rather than here and in the flush/logging code.
1115 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) &&
1116 ip->i_d.di_version == 1) {
1117 ip->i_d.di_version = 2;
1119 * We've already zeroed the old link count, the projid field,
1120 * and the pad field.
1125 * Project ids won't be stored on disk if we are using a version 1 inode.
1127 if ((prid != 0) && (ip->i_d.di_version == 1))
1128 xfs_bump_ino_vers2(tp, ip);
1130 if (pip && XFS_INHERIT_GID(pip)) {
1131 ip->i_d.di_gid = pip->i_d.di_gid;
1132 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1133 ip->i_d.di_mode |= S_ISGID;
1138 * If the group ID of the new file does not match the effective group
1139 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1140 * (and only if the irix_sgid_inherit compatibility variable is set).
1142 if ((irix_sgid_inherit) &&
1143 (ip->i_d.di_mode & S_ISGID) &&
1144 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1145 ip->i_d.di_mode &= ~S_ISGID;
1148 ip->i_d.di_size = 0;
1150 ip->i_d.di_nextents = 0;
1151 ASSERT(ip->i_d.di_nblocks == 0);
1154 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
1155 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
1156 ip->i_d.di_atime = ip->i_d.di_mtime;
1157 ip->i_d.di_ctime = ip->i_d.di_mtime;
1160 * di_gen will have been taken care of in xfs_iread.
1162 ip->i_d.di_extsize = 0;
1163 ip->i_d.di_dmevmask = 0;
1164 ip->i_d.di_dmstate = 0;
1165 ip->i_d.di_flags = 0;
1166 flags = XFS_ILOG_CORE;
1167 switch (mode & S_IFMT) {
1172 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1173 ip->i_df.if_u2.if_rdev = rdev;
1174 ip->i_df.if_flags = 0;
1175 flags |= XFS_ILOG_DEV;
1179 * we can't set up filestreams until after the VFS inode
1180 * is set up properly.
1182 if (pip && xfs_inode_is_filestream(pip))
1186 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1189 if ((mode & S_IFMT) == S_IFDIR) {
1190 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1191 di_flags |= XFS_DIFLAG_RTINHERIT;
1192 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1193 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1194 ip->i_d.di_extsize = pip->i_d.di_extsize;
1196 } else if ((mode & S_IFMT) == S_IFREG) {
1197 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1198 di_flags |= XFS_DIFLAG_REALTIME;
1199 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1200 di_flags |= XFS_DIFLAG_EXTSIZE;
1201 ip->i_d.di_extsize = pip->i_d.di_extsize;
1204 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1205 xfs_inherit_noatime)
1206 di_flags |= XFS_DIFLAG_NOATIME;
1207 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1209 di_flags |= XFS_DIFLAG_NODUMP;
1210 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1212 di_flags |= XFS_DIFLAG_SYNC;
1213 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1214 xfs_inherit_nosymlinks)
1215 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1216 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1217 di_flags |= XFS_DIFLAG_PROJINHERIT;
1218 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1219 xfs_inherit_nodefrag)
1220 di_flags |= XFS_DIFLAG_NODEFRAG;
1221 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1222 di_flags |= XFS_DIFLAG_FILESTREAM;
1223 ip->i_d.di_flags |= di_flags;
1227 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1228 ip->i_df.if_flags = XFS_IFEXTENTS;
1229 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1230 ip->i_df.if_u1.if_extents = NULL;
1236 * Attribute fork settings for new inode.
1238 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1239 ip->i_d.di_anextents = 0;
1242 * Log the new values stuffed into the inode.
1244 xfs_trans_log_inode(tp, ip, flags);
1246 /* now that we have an i_mode we can setup inode ops and unlock */
1247 xfs_setup_inode(ip);
1249 /* now we have set up the vfs inode we can associate the filestream */
1251 error = xfs_filestream_associate(pip, ip);
1255 xfs_iflags_set(ip, XFS_IFILESTREAM);
1263 * Check to make sure that there are no blocks allocated to the
1264 * file beyond the size of the file. We don't check this for
1265 * files with fixed size extents or real time extents, but we
1266 * at least do it for regular files.
1275 xfs_fileoff_t map_first;
1277 xfs_bmbt_irec_t imaps[2];
1279 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1282 if (XFS_IS_REALTIME_INODE(ip))
1285 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
1289 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1291 * The filesystem could be shutting down, so bmapi may return
1294 if (xfs_bmapi(NULL, ip, map_first,
1296 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1298 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1301 ASSERT(nimaps == 1);
1302 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1307 * Calculate the last possible buffered byte in a file. This must
1308 * include data that was buffered beyond the EOF by the write code.
1309 * This also needs to deal with overflowing the xfs_fsize_t type
1310 * which can happen for sizes near the limit.
1312 * We also need to take into account any blocks beyond the EOF. It
1313 * may be the case that they were buffered by a write which failed.
1314 * In that case the pages will still be in memory, but the inode size
1315 * will never have been updated.
1322 xfs_fsize_t last_byte;
1323 xfs_fileoff_t last_block;
1324 xfs_fileoff_t size_last_block;
1327 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
1331 * Only check for blocks beyond the EOF if the extents have
1332 * been read in. This eliminates the need for the inode lock,
1333 * and it also saves us from looking when it really isn't
1336 if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1337 error = xfs_bmap_last_offset(NULL, ip, &last_block,
1345 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
1346 last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1348 last_byte = XFS_FSB_TO_B(mp, last_block);
1349 if (last_byte < 0) {
1350 return XFS_MAXIOFFSET(mp);
1352 last_byte += (1 << mp->m_writeio_log);
1353 if (last_byte < 0) {
1354 return XFS_MAXIOFFSET(mp);
1359 #if defined(XFS_RW_TRACE)
1365 xfs_fsize_t new_size,
1366 xfs_off_t toss_start,
1367 xfs_off_t toss_finish)
1369 if (ip->i_rwtrace == NULL) {
1373 ktrace_enter(ip->i_rwtrace,
1376 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1377 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1378 (void*)((long)flag),
1379 (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1380 (void*)(unsigned long)(new_size & 0xffffffff),
1381 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1382 (void*)(unsigned long)(toss_start & 0xffffffff),
1383 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1384 (void*)(unsigned long)(toss_finish & 0xffffffff),
1385 (void*)(unsigned long)current_cpu(),
1386 (void*)(unsigned long)current_pid(),
1392 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1396 * Start the truncation of the file to new_size. The new size
1397 * must be smaller than the current size. This routine will
1398 * clear the buffer and page caches of file data in the removed
1399 * range, and xfs_itruncate_finish() will remove the underlying
1402 * The inode must have its I/O lock locked EXCLUSIVELY, and it
1403 * must NOT have the inode lock held at all. This is because we're
1404 * calling into the buffer/page cache code and we can't hold the
1405 * inode lock when we do so.
1407 * We need to wait for any direct I/Os in flight to complete before we
1408 * proceed with the truncate. This is needed to prevent the extents
1409 * being read or written by the direct I/Os from being removed while the
1410 * I/O is in flight as there is no other method of synchronising
1411 * direct I/O with the truncate operation. Also, because we hold
1412 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1413 * started until the truncate completes and drops the lock. Essentially,
1414 * the vn_iowait() call forms an I/O barrier that provides strict ordering
1415 * between direct I/Os and the truncate operation.
1417 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1418 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
1419 * in the case that the caller is locking things out of order and
1420 * may not be able to call xfs_itruncate_finish() with the inode lock
1421 * held without dropping the I/O lock. If the caller must drop the
1422 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1423 * must be called again with all the same restrictions as the initial
1427 xfs_itruncate_start(
1430 xfs_fsize_t new_size)
1432 xfs_fsize_t last_byte;
1433 xfs_off_t toss_start;
1437 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1438 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1439 ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1440 (flags == XFS_ITRUNC_MAYBE));
1444 /* wait for the completion of any pending DIOs */
1445 if (new_size == 0 || new_size < ip->i_size)
1449 * Call toss_pages or flushinval_pages to get rid of pages
1450 * overlapping the region being removed. We have to use
1451 * the less efficient flushinval_pages in the case that the
1452 * caller may not be able to finish the truncate without
1453 * dropping the inode's I/O lock. Make sure
1454 * to catch any pages brought in by buffers overlapping
1455 * the EOF by searching out beyond the isize by our
1456 * block size. We round new_size up to a block boundary
1457 * so that we don't toss things on the same block as
1458 * new_size but before it.
1460 * Before calling toss_page or flushinval_pages, make sure to
1461 * call remapf() over the same region if the file is mapped.
1462 * This frees up mapped file references to the pages in the
1463 * given range and for the flushinval_pages case it ensures
1464 * that we get the latest mapped changes flushed out.
1466 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1467 toss_start = XFS_FSB_TO_B(mp, toss_start);
1468 if (toss_start < 0) {
1470 * The place to start tossing is beyond our maximum
1471 * file size, so there is no way that the data extended
1476 last_byte = xfs_file_last_byte(ip);
1477 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
1479 if (last_byte > toss_start) {
1480 if (flags & XFS_ITRUNC_DEFINITE) {
1481 xfs_tosspages(ip, toss_start,
1482 -1, FI_REMAPF_LOCKED);
1484 error = xfs_flushinval_pages(ip, toss_start,
1485 -1, FI_REMAPF_LOCKED);
1490 if (new_size == 0) {
1491 ASSERT(VN_CACHED(VFS_I(ip)) == 0);
1498 * Shrink the file to the given new_size. The new size must be smaller than
1499 * the current size. This will free up the underlying blocks in the removed
1500 * range after a call to xfs_itruncate_start() or xfs_atruncate_start().
1502 * The transaction passed to this routine must have made a permanent log
1503 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1504 * given transaction and start new ones, so make sure everything involved in
1505 * the transaction is tidy before calling here. Some transaction will be
1506 * returned to the caller to be committed. The incoming transaction must
1507 * already include the inode, and both inode locks must be held exclusively.
1508 * The inode must also be "held" within the transaction. On return the inode
1509 * will be "held" within the returned transaction. This routine does NOT
1510 * require any disk space to be reserved for it within the transaction.
1512 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it
1513 * indicates the fork which is to be truncated. For the attribute fork we only
1514 * support truncation to size 0.
1516 * We use the sync parameter to indicate whether or not the first transaction
1517 * we perform might have to be synchronous. For the attr fork, it needs to be
1518 * so if the unlink of the inode is not yet known to be permanent in the log.
1519 * This keeps us from freeing and reusing the blocks of the attribute fork
1520 * before the unlink of the inode becomes permanent.
1522 * For the data fork, we normally have to run synchronously if we're being
1523 * called out of the inactive path or we're being called out of the create path
1524 * where we're truncating an existing file. Either way, the truncate needs to
1525 * be sync so blocks don't reappear in the file with altered data in case of a
1526 * crash. wsync filesystems can run the first case async because anything that
1527 * shrinks the inode has to run sync so by the time we're called here from
1528 * inactive, the inode size is permanently set to 0.
1530 * Calls from the truncate path always need to be sync unless we're in a wsync
1531 * filesystem and the file has already been unlinked.
1533 * The caller is responsible for correctly setting the sync parameter. It gets
1534 * too hard for us to guess here which path we're being called out of just
1535 * based on inode state.
1537 * If we get an error, we must return with the inode locked and linked into the
1538 * current transaction. This keeps things simple for the higher level code,
1539 * because it always knows that the inode is locked and held in the transaction
1540 * that returns to it whether errors occur or not. We don't mark the inode
1541 * dirty on error so that transactions can be easily aborted if possible.
1544 xfs_itruncate_finish(
1547 xfs_fsize_t new_size,
1551 xfs_fsblock_t first_block;
1552 xfs_fileoff_t first_unmap_block;
1553 xfs_fileoff_t last_block;
1554 xfs_filblks_t unmap_len=0;
1559 xfs_bmap_free_t free_list;
1562 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
1563 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1564 ASSERT(*tp != NULL);
1565 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1566 ASSERT(ip->i_transp == *tp);
1567 ASSERT(ip->i_itemp != NULL);
1568 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1572 mp = (ntp)->t_mountp;
1573 ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1576 * We only support truncating the entire attribute fork.
1578 if (fork == XFS_ATTR_FORK) {
1581 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1582 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
1584 * The first thing we do is set the size to new_size permanently
1585 * on disk. This way we don't have to worry about anyone ever
1586 * being able to look at the data being freed even in the face
1587 * of a crash. What we're getting around here is the case where
1588 * we free a block, it is allocated to another file, it is written
1589 * to, and then we crash. If the new data gets written to the
1590 * file but the log buffers containing the free and reallocation
1591 * don't, then we'd end up with garbage in the blocks being freed.
1592 * As long as we make the new_size permanent before actually
1593 * freeing any blocks it doesn't matter if they get writtten to.
1595 * The callers must signal into us whether or not the size
1596 * setting here must be synchronous. There are a few cases
1597 * where it doesn't have to be synchronous. Those cases
1598 * occur if the file is unlinked and we know the unlink is
1599 * permanent or if the blocks being truncated are guaranteed
1600 * to be beyond the inode eof (regardless of the link count)
1601 * and the eof value is permanent. Both of these cases occur
1602 * only on wsync-mounted filesystems. In those cases, we're
1603 * guaranteed that no user will ever see the data in the blocks
1604 * that are being truncated so the truncate can run async.
1605 * In the free beyond eof case, the file may wind up with
1606 * more blocks allocated to it than it needs if we crash
1607 * and that won't get fixed until the next time the file
1608 * is re-opened and closed but that's ok as that shouldn't
1609 * be too many blocks.
1611 * However, we can't just make all wsync xactions run async
1612 * because there's one call out of the create path that needs
1613 * to run sync where it's truncating an existing file to size
1614 * 0 whose size is > 0.
1616 * It's probably possible to come up with a test in this
1617 * routine that would correctly distinguish all the above
1618 * cases from the values of the function parameters and the
1619 * inode state but for sanity's sake, I've decided to let the
1620 * layers above just tell us. It's simpler to correctly figure
1621 * out in the layer above exactly under what conditions we
1622 * can run async and I think it's easier for others read and
1623 * follow the logic in case something has to be changed.
1624 * cscope is your friend -- rcc.
1626 * The attribute fork is much simpler.
1628 * For the attribute fork we allow the caller to tell us whether
1629 * the unlink of the inode that led to this call is yet permanent
1630 * in the on disk log. If it is not and we will be freeing extents
1631 * in this inode then we make the first transaction synchronous
1632 * to make sure that the unlink is permanent by the time we free
1635 if (fork == XFS_DATA_FORK) {
1636 if (ip->i_d.di_nextents > 0) {
1638 * If we are not changing the file size then do
1639 * not update the on-disk file size - we may be
1640 * called from xfs_inactive_free_eofblocks(). If we
1641 * update the on-disk file size and then the system
1642 * crashes before the contents of the file are
1643 * flushed to disk then the files may be full of
1644 * holes (ie NULL files bug).
1646 if (ip->i_size != new_size) {
1647 ip->i_d.di_size = new_size;
1648 ip->i_size = new_size;
1649 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1653 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1654 if (ip->i_d.di_anextents > 0)
1655 xfs_trans_set_sync(ntp);
1657 ASSERT(fork == XFS_DATA_FORK ||
1658 (fork == XFS_ATTR_FORK &&
1659 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1660 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1663 * Since it is possible for space to become allocated beyond
1664 * the end of the file (in a crash where the space is allocated
1665 * but the inode size is not yet updated), simply remove any
1666 * blocks which show up between the new EOF and the maximum
1667 * possible file size. If the first block to be removed is
1668 * beyond the maximum file size (ie it is the same as last_block),
1669 * then there is nothing to do.
1671 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1672 ASSERT(first_unmap_block <= last_block);
1674 if (last_block == first_unmap_block) {
1677 unmap_len = last_block - first_unmap_block + 1;
1681 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
1682 * will tell us whether it freed the entire range or
1683 * not. If this is a synchronous mount (wsync),
1684 * then we can tell bunmapi to keep all the
1685 * transactions asynchronous since the unlink
1686 * transaction that made this inode inactive has
1687 * already hit the disk. There's no danger of
1688 * the freed blocks being reused, there being a
1689 * crash, and the reused blocks suddenly reappearing
1690 * in this file with garbage in them once recovery
1693 XFS_BMAP_INIT(&free_list, &first_block);
1694 error = xfs_bunmapi(ntp, ip,
1695 first_unmap_block, unmap_len,
1696 XFS_BMAPI_AFLAG(fork) |
1697 (sync ? 0 : XFS_BMAPI_ASYNC),
1698 XFS_ITRUNC_MAX_EXTENTS,
1699 &first_block, &free_list,
1703 * If the bunmapi call encounters an error,
1704 * return to the caller where the transaction
1705 * can be properly aborted. We just need to
1706 * make sure we're not holding any resources
1707 * that we were not when we came in.
1709 xfs_bmap_cancel(&free_list);
1714 * Duplicate the transaction that has the permanent
1715 * reservation and commit the old transaction.
1717 error = xfs_bmap_finish(tp, &free_list, &committed);
1720 /* link the inode into the next xact in the chain */
1721 xfs_trans_ijoin(ntp, ip,
1722 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1723 xfs_trans_ihold(ntp, ip);
1728 * If the bmap finish call encounters an error, return
1729 * to the caller where the transaction can be properly
1730 * aborted. We just need to make sure we're not
1731 * holding any resources that we were not when we came
1734 * Aborting from this point might lose some blocks in
1735 * the file system, but oh well.
1737 xfs_bmap_cancel(&free_list);
1743 * Mark the inode dirty so it will be logged and
1744 * moved forward in the log as part of every commit.
1746 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1749 ntp = xfs_trans_dup(ntp);
1750 error = xfs_trans_commit(*tp, 0);
1753 /* link the inode into the next transaction in the chain */
1754 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1755 xfs_trans_ihold(ntp, ip);
1760 * transaction commit worked ok so we can drop the extra ticket
1761 * reference that we gained in xfs_trans_dup()
1763 xfs_log_ticket_put(ntp->t_ticket);
1764 error = xfs_trans_reserve(ntp, 0,
1765 XFS_ITRUNCATE_LOG_RES(mp), 0,
1766 XFS_TRANS_PERM_LOG_RES,
1767 XFS_ITRUNCATE_LOG_COUNT);
1772 * Only update the size in the case of the data fork, but
1773 * always re-log the inode so that our permanent transaction
1774 * can keep on rolling it forward in the log.
1776 if (fork == XFS_DATA_FORK) {
1777 xfs_isize_check(mp, ip, new_size);
1779 * If we are not changing the file size then do
1780 * not update the on-disk file size - we may be
1781 * called from xfs_inactive_free_eofblocks(). If we
1782 * update the on-disk file size and then the system
1783 * crashes before the contents of the file are
1784 * flushed to disk then the files may be full of
1785 * holes (ie NULL files bug).
1787 if (ip->i_size != new_size) {
1788 ip->i_d.di_size = new_size;
1789 ip->i_size = new_size;
1792 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1793 ASSERT((new_size != 0) ||
1794 (fork == XFS_ATTR_FORK) ||
1795 (ip->i_delayed_blks == 0));
1796 ASSERT((new_size != 0) ||
1797 (fork == XFS_ATTR_FORK) ||
1798 (ip->i_d.di_nextents == 0));
1799 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
1804 * This is called when the inode's link count goes to 0.
1805 * We place the on-disk inode on a list in the AGI. It
1806 * will be pulled from this list when the inode is freed.
1823 ASSERT(ip->i_d.di_nlink == 0);
1824 ASSERT(ip->i_d.di_mode != 0);
1825 ASSERT(ip->i_transp == tp);
1830 * Get the agi buffer first. It ensures lock ordering
1833 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
1836 agi = XFS_BUF_TO_AGI(agibp);
1839 * Get the index into the agi hash table for the
1840 * list this inode will go on.
1842 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1844 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1845 ASSERT(agi->agi_unlinked[bucket_index]);
1846 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1848 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1850 * There is already another inode in the bucket we need
1851 * to add ourselves to. Add us at the front of the list.
1852 * Here we put the head pointer into our next pointer,
1853 * and then we fall through to point the head at us.
1855 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
1859 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO);
1860 /* both on-disk, don't endian flip twice */
1861 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1862 offset = ip->i_imap.im_boffset +
1863 offsetof(xfs_dinode_t, di_next_unlinked);
1864 xfs_trans_inode_buf(tp, ibp);
1865 xfs_trans_log_buf(tp, ibp, offset,
1866 (offset + sizeof(xfs_agino_t) - 1));
1867 xfs_inobp_check(mp, ibp);
1871 * Point the bucket head pointer at the inode being inserted.
1874 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1875 offset = offsetof(xfs_agi_t, agi_unlinked) +
1876 (sizeof(xfs_agino_t) * bucket_index);
1877 xfs_trans_log_buf(tp, agibp, offset,
1878 (offset + sizeof(xfs_agino_t) - 1));
1883 * Pull the on-disk inode from the AGI unlinked list.
1896 xfs_agnumber_t agno;
1898 xfs_agino_t next_agino;
1899 xfs_buf_t *last_ibp;
1900 xfs_dinode_t *last_dip = NULL;
1902 int offset, last_offset = 0;
1906 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1909 * Get the agi buffer first. It ensures lock ordering
1912 error = xfs_read_agi(mp, tp, agno, &agibp);
1916 agi = XFS_BUF_TO_AGI(agibp);
1919 * Get the index into the agi hash table for the
1920 * list this inode will go on.
1922 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1924 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1925 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
1926 ASSERT(agi->agi_unlinked[bucket_index]);
1928 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1930 * We're at the head of the list. Get the inode's
1931 * on-disk buffer to see if there is anyone after us
1932 * on the list. Only modify our next pointer if it
1933 * is not already NULLAGINO. This saves us the overhead
1934 * of dealing with the buffer when there is no need to
1937 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
1940 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
1941 error, mp->m_fsname);
1944 next_agino = be32_to_cpu(dip->di_next_unlinked);
1945 ASSERT(next_agino != 0);
1946 if (next_agino != NULLAGINO) {
1947 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1948 offset = ip->i_imap.im_boffset +
1949 offsetof(xfs_dinode_t, di_next_unlinked);
1950 xfs_trans_inode_buf(tp, ibp);
1951 xfs_trans_log_buf(tp, ibp, offset,
1952 (offset + sizeof(xfs_agino_t) - 1));
1953 xfs_inobp_check(mp, ibp);
1955 xfs_trans_brelse(tp, ibp);
1958 * Point the bucket head pointer at the next inode.
1960 ASSERT(next_agino != 0);
1961 ASSERT(next_agino != agino);
1962 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1963 offset = offsetof(xfs_agi_t, agi_unlinked) +
1964 (sizeof(xfs_agino_t) * bucket_index);
1965 xfs_trans_log_buf(tp, agibp, offset,
1966 (offset + sizeof(xfs_agino_t) - 1));
1969 * We need to search the list for the inode being freed.
1971 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1973 while (next_agino != agino) {
1975 * If the last inode wasn't the one pointing to
1976 * us, then release its buffer since we're not
1977 * going to do anything with it.
1979 if (last_ibp != NULL) {
1980 xfs_trans_brelse(tp, last_ibp);
1982 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
1983 error = xfs_inotobp(mp, tp, next_ino, &last_dip,
1984 &last_ibp, &last_offset, 0);
1987 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.",
1988 error, mp->m_fsname);
1991 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1992 ASSERT(next_agino != NULLAGINO);
1993 ASSERT(next_agino != 0);
1996 * Now last_ibp points to the buffer previous to us on
1997 * the unlinked list. Pull us from the list.
1999 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
2002 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2003 error, mp->m_fsname);
2006 next_agino = be32_to_cpu(dip->di_next_unlinked);
2007 ASSERT(next_agino != 0);
2008 ASSERT(next_agino != agino);
2009 if (next_agino != NULLAGINO) {
2010 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2011 offset = ip->i_imap.im_boffset +
2012 offsetof(xfs_dinode_t, di_next_unlinked);
2013 xfs_trans_inode_buf(tp, ibp);
2014 xfs_trans_log_buf(tp, ibp, offset,
2015 (offset + sizeof(xfs_agino_t) - 1));
2016 xfs_inobp_check(mp, ibp);
2018 xfs_trans_brelse(tp, ibp);
2021 * Point the previous inode on the list to the next inode.
2023 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
2024 ASSERT(next_agino != 0);
2025 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2026 xfs_trans_inode_buf(tp, last_ibp);
2027 xfs_trans_log_buf(tp, last_ibp, offset,
2028 (offset + sizeof(xfs_agino_t) - 1));
2029 xfs_inobp_check(mp, last_ibp);
2036 xfs_inode_t *free_ip,
2040 xfs_mount_t *mp = free_ip->i_mount;
2041 int blks_per_cluster;
2044 int i, j, found, pre_flushed;
2047 xfs_inode_t *ip, **ip_found;
2048 xfs_inode_log_item_t *iip;
2049 xfs_log_item_t *lip;
2050 xfs_perag_t *pag = xfs_get_perag(mp, inum);
2052 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
2053 blks_per_cluster = 1;
2054 ninodes = mp->m_sb.sb_inopblock;
2055 nbufs = XFS_IALLOC_BLOCKS(mp);
2057 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2058 mp->m_sb.sb_blocksize;
2059 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2060 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2063 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
2065 for (j = 0; j < nbufs; j++, inum += ninodes) {
2066 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2067 XFS_INO_TO_AGBNO(mp, inum));
2071 * Look for each inode in memory and attempt to lock it,
2072 * we can be racing with flush and tail pushing here.
2073 * any inode we get the locks on, add to an array of
2074 * inode items to process later.
2076 * The get the buffer lock, we could beat a flush
2077 * or tail pushing thread to the lock here, in which
2078 * case they will go looking for the inode buffer
2079 * and fail, we need some other form of interlock
2083 for (i = 0; i < ninodes; i++) {
2084 read_lock(&pag->pag_ici_lock);
2085 ip = radix_tree_lookup(&pag->pag_ici_root,
2086 XFS_INO_TO_AGINO(mp, (inum + i)));
2088 /* Inode not in memory or we found it already,
2091 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
2092 read_unlock(&pag->pag_ici_lock);
2096 if (xfs_inode_clean(ip)) {
2097 read_unlock(&pag->pag_ici_lock);
2101 /* If we can get the locks then add it to the
2102 * list, otherwise by the time we get the bp lock
2103 * below it will already be attached to the
2107 /* This inode will already be locked - by us, lets
2111 if (ip == free_ip) {
2112 if (xfs_iflock_nowait(ip)) {
2113 xfs_iflags_set(ip, XFS_ISTALE);
2114 if (xfs_inode_clean(ip)) {
2117 ip_found[found++] = ip;
2120 read_unlock(&pag->pag_ici_lock);
2124 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2125 if (xfs_iflock_nowait(ip)) {
2126 xfs_iflags_set(ip, XFS_ISTALE);
2128 if (xfs_inode_clean(ip)) {
2130 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2132 ip_found[found++] = ip;
2135 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2138 read_unlock(&pag->pag_ici_lock);
2141 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2142 mp->m_bsize * blks_per_cluster,
2146 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2148 if (lip->li_type == XFS_LI_INODE) {
2149 iip = (xfs_inode_log_item_t *)lip;
2150 ASSERT(iip->ili_logged == 1);
2151 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
2152 xfs_trans_ail_copy_lsn(mp->m_ail,
2153 &iip->ili_flush_lsn,
2154 &iip->ili_item.li_lsn);
2155 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2158 lip = lip->li_bio_list;
2161 for (i = 0; i < found; i++) {
2166 ip->i_update_core = 0;
2168 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2172 iip->ili_last_fields = iip->ili_format.ilf_fields;
2173 iip->ili_format.ilf_fields = 0;
2174 iip->ili_logged = 1;
2175 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2176 &iip->ili_item.li_lsn);
2178 xfs_buf_attach_iodone(bp,
2179 (void(*)(xfs_buf_t*,xfs_log_item_t*))
2180 xfs_istale_done, (xfs_log_item_t *)iip);
2181 if (ip != free_ip) {
2182 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2186 if (found || pre_flushed)
2187 xfs_trans_stale_inode_buf(tp, bp);
2188 xfs_trans_binval(tp, bp);
2191 kmem_free(ip_found);
2192 xfs_put_perag(mp, pag);
2196 * This is called to return an inode to the inode free list.
2197 * The inode should already be truncated to 0 length and have
2198 * no pages associated with it. This routine also assumes that
2199 * the inode is already a part of the transaction.
2201 * The on-disk copy of the inode will have been added to the list
2202 * of unlinked inodes in the AGI. We need to remove the inode from
2203 * that list atomically with respect to freeing it here.
2209 xfs_bmap_free_t *flist)
2213 xfs_ino_t first_ino;
2217 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2218 ASSERT(ip->i_transp == tp);
2219 ASSERT(ip->i_d.di_nlink == 0);
2220 ASSERT(ip->i_d.di_nextents == 0);
2221 ASSERT(ip->i_d.di_anextents == 0);
2222 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
2223 ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2224 ASSERT(ip->i_d.di_nblocks == 0);
2227 * Pull the on-disk inode from the AGI unlinked list.
2229 error = xfs_iunlink_remove(tp, ip);
2234 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2238 ip->i_d.di_mode = 0; /* mark incore inode as free */
2239 ip->i_d.di_flags = 0;
2240 ip->i_d.di_dmevmask = 0;
2241 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2242 ip->i_df.if_ext_max =
2243 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2244 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2245 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2247 * Bump the generation count so no one will be confused
2248 * by reincarnations of this inode.
2252 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2254 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
2259 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
2260 * from picking up this inode when it is reclaimed (its incore state
2261 * initialzed but not flushed to disk yet). The in-core di_mode is
2262 * already cleared and a corresponding transaction logged.
2263 * The hack here just synchronizes the in-core to on-disk
2264 * di_mode value in advance before the actual inode sync to disk.
2265 * This is OK because the inode is already unlinked and would never
2266 * change its di_mode again for this inode generation.
2267 * This is a temporary hack that would require a proper fix
2273 xfs_ifree_cluster(ip, tp, first_ino);
2280 * Reallocate the space for if_broot based on the number of records
2281 * being added or deleted as indicated in rec_diff. Move the records
2282 * and pointers in if_broot to fit the new size. When shrinking this
2283 * will eliminate holes between the records and pointers created by
2284 * the caller. When growing this will create holes to be filled in
2287 * The caller must not request to add more records than would fit in
2288 * the on-disk inode root. If the if_broot is currently NULL, then
2289 * if we adding records one will be allocated. The caller must also
2290 * not request that the number of records go below zero, although
2291 * it can go to zero.
2293 * ip -- the inode whose if_broot area is changing
2294 * ext_diff -- the change in the number of records, positive or negative,
2295 * requested for the if_broot array.
2303 struct xfs_mount *mp = ip->i_mount;
2306 struct xfs_btree_block *new_broot;
2313 * Handle the degenerate case quietly.
2315 if (rec_diff == 0) {
2319 ifp = XFS_IFORK_PTR(ip, whichfork);
2322 * If there wasn't any memory allocated before, just
2323 * allocate it now and get out.
2325 if (ifp->if_broot_bytes == 0) {
2326 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2327 ifp->if_broot = kmem_alloc(new_size, KM_SLEEP);
2328 ifp->if_broot_bytes = (int)new_size;
2333 * If there is already an existing if_broot, then we need
2334 * to realloc() it and shift the pointers to their new
2335 * location. The records don't change location because
2336 * they are kept butted up against the btree block header.
2338 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
2339 new_max = cur_max + rec_diff;
2340 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2341 ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
2342 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2344 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2345 ifp->if_broot_bytes);
2346 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2348 ifp->if_broot_bytes = (int)new_size;
2349 ASSERT(ifp->if_broot_bytes <=
2350 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2351 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2356 * rec_diff is less than 0. In this case, we are shrinking the
2357 * if_broot buffer. It must already exist. If we go to zero
2358 * records, just get rid of the root and clear the status bit.
2360 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2361 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
2362 new_max = cur_max + rec_diff;
2363 ASSERT(new_max >= 0);
2365 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2369 new_broot = kmem_alloc(new_size, KM_SLEEP);
2371 * First copy over the btree block header.
2373 memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN);
2376 ifp->if_flags &= ~XFS_IFBROOT;
2380 * Only copy the records and pointers if there are any.
2384 * First copy the records.
2386 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
2387 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
2388 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2391 * Then copy the pointers.
2393 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
2394 ifp->if_broot_bytes);
2395 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
2397 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2399 kmem_free(ifp->if_broot);
2400 ifp->if_broot = new_broot;
2401 ifp->if_broot_bytes = (int)new_size;
2402 ASSERT(ifp->if_broot_bytes <=
2403 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2409 * This is called when the amount of space needed for if_data
2410 * is increased or decreased. The change in size is indicated by
2411 * the number of bytes that need to be added or deleted in the
2412 * byte_diff parameter.
2414 * If the amount of space needed has decreased below the size of the
2415 * inline buffer, then switch to using the inline buffer. Otherwise,
2416 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2417 * to what is needed.
2419 * ip -- the inode whose if_data area is changing
2420 * byte_diff -- the change in the number of bytes, positive or negative,
2421 * requested for the if_data array.
2433 if (byte_diff == 0) {
2437 ifp = XFS_IFORK_PTR(ip, whichfork);
2438 new_size = (int)ifp->if_bytes + byte_diff;
2439 ASSERT(new_size >= 0);
2441 if (new_size == 0) {
2442 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2443 kmem_free(ifp->if_u1.if_data);
2445 ifp->if_u1.if_data = NULL;
2447 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2449 * If the valid extents/data can fit in if_inline_ext/data,
2450 * copy them from the malloc'd vector and free it.
2452 if (ifp->if_u1.if_data == NULL) {
2453 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2454 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2455 ASSERT(ifp->if_real_bytes != 0);
2456 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2458 kmem_free(ifp->if_u1.if_data);
2459 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2464 * Stuck with malloc/realloc.
2465 * For inline data, the underlying buffer must be
2466 * a multiple of 4 bytes in size so that it can be
2467 * logged and stay on word boundaries. We enforce
2470 real_size = roundup(new_size, 4);
2471 if (ifp->if_u1.if_data == NULL) {
2472 ASSERT(ifp->if_real_bytes == 0);
2473 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2474 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2476 * Only do the realloc if the underlying size
2477 * is really changing.
2479 if (ifp->if_real_bytes != real_size) {
2480 ifp->if_u1.if_data =
2481 kmem_realloc(ifp->if_u1.if_data,
2487 ASSERT(ifp->if_real_bytes == 0);
2488 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2489 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2493 ifp->if_real_bytes = real_size;
2494 ifp->if_bytes = new_size;
2495 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2505 ifp = XFS_IFORK_PTR(ip, whichfork);
2506 if (ifp->if_broot != NULL) {
2507 kmem_free(ifp->if_broot);
2508 ifp->if_broot = NULL;
2512 * If the format is local, then we can't have an extents
2513 * array so just look for an inline data array. If we're
2514 * not local then we may or may not have an extents list,
2515 * so check and free it up if we do.
2517 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2518 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2519 (ifp->if_u1.if_data != NULL)) {
2520 ASSERT(ifp->if_real_bytes != 0);
2521 kmem_free(ifp->if_u1.if_data);
2522 ifp->if_u1.if_data = NULL;
2523 ifp->if_real_bytes = 0;
2525 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2526 ((ifp->if_flags & XFS_IFEXTIREC) ||
2527 ((ifp->if_u1.if_extents != NULL) &&
2528 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2529 ASSERT(ifp->if_real_bytes != 0);
2530 xfs_iext_destroy(ifp);
2532 ASSERT(ifp->if_u1.if_extents == NULL ||
2533 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2534 ASSERT(ifp->if_real_bytes == 0);
2535 if (whichfork == XFS_ATTR_FORK) {
2536 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2542 * This is called free all the memory associated with an inode.
2543 * It must free the inode itself and any buffers allocated for
2544 * if_extents/if_data and if_broot. It must also free the lock
2545 * associated with the inode.
2547 * Note: because we don't initialise everything on reallocation out
2548 * of the zone, we must ensure we nullify everything correctly before
2549 * freeing the structure.
2555 switch (ip->i_d.di_mode & S_IFMT) {
2559 xfs_idestroy_fork(ip, XFS_DATA_FORK);
2563 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2565 #ifdef XFS_INODE_TRACE
2566 ktrace_free(ip->i_trace);
2568 #ifdef XFS_BMAP_TRACE
2569 ktrace_free(ip->i_xtrace);
2571 #ifdef XFS_BTREE_TRACE
2572 ktrace_free(ip->i_btrace);
2575 ktrace_free(ip->i_rwtrace);
2577 #ifdef XFS_ILOCK_TRACE
2578 ktrace_free(ip->i_lock_trace);
2580 #ifdef XFS_DIR2_TRACE
2581 ktrace_free(ip->i_dir_trace);
2585 * Only if we are shutting down the fs will we see an
2586 * inode still in the AIL. If it is there, we should remove
2587 * it to prevent a use-after-free from occurring.
2589 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
2590 struct xfs_ail *ailp = lip->li_ailp;
2592 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
2593 XFS_FORCED_SHUTDOWN(ip->i_mount));
2594 if (lip->li_flags & XFS_LI_IN_AIL) {
2595 spin_lock(&ailp->xa_lock);
2596 if (lip->li_flags & XFS_LI_IN_AIL)
2597 xfs_trans_ail_delete(ailp, lip);
2599 spin_unlock(&ailp->xa_lock);
2601 xfs_inode_item_destroy(ip);
2604 /* asserts to verify all state is correct here */
2605 ASSERT(atomic_read(&ip->i_iocount) == 0);
2606 ASSERT(atomic_read(&ip->i_pincount) == 0);
2607 ASSERT(!spin_is_locked(&ip->i_flags_lock));
2608 ASSERT(completion_done(&ip->i_flush));
2609 kmem_zone_free(xfs_inode_zone, ip);
2614 * Increment the pin count of the given buffer.
2615 * This value is protected by ipinlock spinlock in the mount structure.
2621 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2623 atomic_inc(&ip->i_pincount);
2627 * Decrement the pin count of the given inode, and wake up
2628 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2629 * inode must have been previously pinned with a call to xfs_ipin().
2635 ASSERT(atomic_read(&ip->i_pincount) > 0);
2637 if (atomic_dec_and_test(&ip->i_pincount))
2638 wake_up(&ip->i_ipin_wait);
2642 * This is called to unpin an inode. It can be directed to wait or to return
2643 * immediately without waiting for the inode to be unpinned. The caller must
2644 * have the inode locked in at least shared mode so that the buffer cannot be
2645 * subsequently pinned once someone is waiting for it to be unpinned.
2652 xfs_inode_log_item_t *iip = ip->i_itemp;
2654 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2655 if (atomic_read(&ip->i_pincount) == 0)
2658 /* Give the log a push to start the unpinning I/O */
2659 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ?
2660 iip->ili_last_lsn : 0, XFS_LOG_FORCE);
2662 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2669 __xfs_iunpin_wait(ip, 1);
2676 __xfs_iunpin_wait(ip, 0);
2681 * xfs_iextents_copy()
2683 * This is called to copy the REAL extents (as opposed to the delayed
2684 * allocation extents) from the inode into the given buffer. It
2685 * returns the number of bytes copied into the buffer.
2687 * If there are no delayed allocation extents, then we can just
2688 * memcpy() the extents into the buffer. Otherwise, we need to
2689 * examine each extent in turn and skip those which are delayed.
2701 xfs_fsblock_t start_block;
2703 ifp = XFS_IFORK_PTR(ip, whichfork);
2704 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2705 ASSERT(ifp->if_bytes > 0);
2707 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2708 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2712 * There are some delayed allocation extents in the
2713 * inode, so copy the extents one at a time and skip
2714 * the delayed ones. There must be at least one
2715 * non-delayed extent.
2718 for (i = 0; i < nrecs; i++) {
2719 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2720 start_block = xfs_bmbt_get_startblock(ep);
2721 if (ISNULLSTARTBLOCK(start_block)) {
2723 * It's a delayed allocation extent, so skip it.
2728 /* Translate to on disk format */
2729 put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2730 put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2734 ASSERT(copied != 0);
2735 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2737 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2741 * Each of the following cases stores data into the same region
2742 * of the on-disk inode, so only one of them can be valid at
2743 * any given time. While it is possible to have conflicting formats
2744 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2745 * in EXTENTS format, this can only happen when the fork has
2746 * changed formats after being modified but before being flushed.
2747 * In these cases, the format always takes precedence, because the
2748 * format indicates the current state of the fork.
2755 xfs_inode_log_item_t *iip,
2762 #ifdef XFS_TRANS_DEBUG
2765 static const short brootflag[2] =
2766 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2767 static const short dataflag[2] =
2768 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2769 static const short extflag[2] =
2770 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2774 ifp = XFS_IFORK_PTR(ip, whichfork);
2776 * This can happen if we gave up in iformat in an error path,
2777 * for the attribute fork.
2780 ASSERT(whichfork == XFS_ATTR_FORK);
2783 cp = XFS_DFORK_PTR(dip, whichfork);
2785 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2786 case XFS_DINODE_FMT_LOCAL:
2787 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2788 (ifp->if_bytes > 0)) {
2789 ASSERT(ifp->if_u1.if_data != NULL);
2790 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2791 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2795 case XFS_DINODE_FMT_EXTENTS:
2796 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2797 !(iip->ili_format.ilf_fields & extflag[whichfork]));
2798 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2799 (ifp->if_bytes == 0));
2800 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2801 (ifp->if_bytes > 0));
2802 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2803 (ifp->if_bytes > 0)) {
2804 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2805 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2810 case XFS_DINODE_FMT_BTREE:
2811 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2812 (ifp->if_broot_bytes > 0)) {
2813 ASSERT(ifp->if_broot != NULL);
2814 ASSERT(ifp->if_broot_bytes <=
2815 (XFS_IFORK_SIZE(ip, whichfork) +
2816 XFS_BROOT_SIZE_ADJ));
2817 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
2818 (xfs_bmdr_block_t *)cp,
2819 XFS_DFORK_SIZE(dip, mp, whichfork));
2823 case XFS_DINODE_FMT_DEV:
2824 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
2825 ASSERT(whichfork == XFS_DATA_FORK);
2826 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
2830 case XFS_DINODE_FMT_UUID:
2831 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
2832 ASSERT(whichfork == XFS_DATA_FORK);
2833 memcpy(XFS_DFORK_DPTR(dip),
2834 &ip->i_df.if_u2.if_uuid,
2850 xfs_mount_t *mp = ip->i_mount;
2851 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
2852 unsigned long first_index, mask;
2853 unsigned long inodes_per_cluster;
2855 xfs_inode_t **ilist;
2862 ASSERT(pag->pagi_inodeok);
2863 ASSERT(pag->pag_ici_init);
2865 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
2866 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2867 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2871 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2872 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2873 read_lock(&pag->pag_ici_lock);
2874 /* really need a gang lookup range call here */
2875 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
2876 first_index, inodes_per_cluster);
2880 for (i = 0; i < nr_found; i++) {
2884 /* if the inode lies outside this cluster, we're done. */
2885 if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index)
2888 * Do an un-protected check to see if the inode is dirty and
2889 * is a candidate for flushing. These checks will be repeated
2890 * later after the appropriate locks are acquired.
2892 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
2896 * Try to get locks. If any are unavailable or it is pinned,
2897 * then this inode cannot be flushed and is skipped.
2900 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
2902 if (!xfs_iflock_nowait(iq)) {
2903 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2906 if (xfs_ipincount(iq)) {
2908 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2913 * arriving here means that this inode can be flushed. First
2914 * re-check that it's dirty before flushing.
2916 if (!xfs_inode_clean(iq)) {
2918 error = xfs_iflush_int(iq, bp);
2920 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2921 goto cluster_corrupt_out;
2927 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2931 XFS_STATS_INC(xs_icluster_flushcnt);
2932 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
2936 read_unlock(&pag->pag_ici_lock);
2941 cluster_corrupt_out:
2943 * Corruption detected in the clustering loop. Invalidate the
2944 * inode buffer and shut down the filesystem.
2946 read_unlock(&pag->pag_ici_lock);
2948 * Clean up the buffer. If it was B_DELWRI, just release it --
2949 * brelse can handle it with no problems. If not, shut down the
2950 * filesystem before releasing the buffer.
2952 bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp);
2956 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2958 if (!bufwasdelwri) {
2960 * Just like incore_relse: if we have b_iodone functions,
2961 * mark the buffer as an error and call them. Otherwise
2962 * mark it as stale and brelse.
2964 if (XFS_BUF_IODONE_FUNC(bp)) {
2965 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
2969 XFS_BUF_ERROR(bp,EIO);
2978 * Unlocks the flush lock
2980 xfs_iflush_abort(iq);
2982 return XFS_ERROR(EFSCORRUPTED);
2986 * xfs_iflush() will write a modified inode's changes out to the
2987 * inode's on disk home. The caller must have the inode lock held
2988 * in at least shared mode and the inode flush completion must be
2989 * active as well. The inode lock will still be held upon return from
2990 * the call and the caller is free to unlock it.
2991 * The inode flush will be completed when the inode reaches the disk.
2992 * The flags indicate how the inode's buffer should be written out.
2999 xfs_inode_log_item_t *iip;
3004 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
3005 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
3007 XFS_STATS_INC(xs_iflush_count);
3009 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3010 ASSERT(!completion_done(&ip->i_flush));
3011 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3012 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3018 * If the inode isn't dirty, then just release the inode
3019 * flush lock and do nothing.
3021 if (xfs_inode_clean(ip)) {
3027 * We can't flush the inode until it is unpinned, so wait for it if we
3028 * are allowed to block. We know noone new can pin it, because we are
3029 * holding the inode lock shared and you need to hold it exclusively to
3032 * If we are not allowed to block, force the log out asynchronously so
3033 * that when we come back the inode will be unpinned. If other inodes
3034 * in the same cluster are dirty, they will probably write the inode
3035 * out for us if they occur after the log force completes.
3037 if (noblock && xfs_ipincount(ip)) {
3038 xfs_iunpin_nowait(ip);
3042 xfs_iunpin_wait(ip);
3045 * This may have been unpinned because the filesystem is shutting
3046 * down forcibly. If that's the case we must not write this inode
3047 * to disk, because the log record didn't make it to disk!
3049 if (XFS_FORCED_SHUTDOWN(mp)) {
3050 ip->i_update_core = 0;
3052 iip->ili_format.ilf_fields = 0;
3054 return XFS_ERROR(EIO);
3058 * Decide how buffer will be flushed out. This is done before
3059 * the call to xfs_iflush_int because this field is zeroed by it.
3061 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3063 * Flush out the inode buffer according to the directions
3064 * of the caller. In the cases where the caller has given
3065 * us a choice choose the non-delwri case. This is because
3066 * the inode is in the AIL and we need to get it out soon.
3069 case XFS_IFLUSH_SYNC:
3070 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3073 case XFS_IFLUSH_ASYNC_NOBLOCK:
3074 case XFS_IFLUSH_ASYNC:
3075 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3078 case XFS_IFLUSH_DELWRI:
3088 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3089 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3090 case XFS_IFLUSH_DELWRI:
3093 case XFS_IFLUSH_ASYNC_NOBLOCK:
3094 case XFS_IFLUSH_ASYNC:
3097 case XFS_IFLUSH_SYNC:
3108 * Get the buffer containing the on-disk inode.
3110 error = xfs_itobp(mp, NULL, ip, &dip, &bp,
3111 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
3118 * First flush out the inode that xfs_iflush was called with.
3120 error = xfs_iflush_int(ip, bp);
3125 * If the buffer is pinned then push on the log now so we won't
3126 * get stuck waiting in the write for too long.
3128 if (XFS_BUF_ISPINNED(bp))
3129 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3133 * see if other inodes can be gathered into this write
3135 error = xfs_iflush_cluster(ip, bp);
3137 goto cluster_corrupt_out;
3139 if (flags & INT_DELWRI) {
3140 xfs_bdwrite(mp, bp);
3141 } else if (flags & INT_ASYNC) {
3142 error = xfs_bawrite(mp, bp);
3144 error = xfs_bwrite(mp, bp);
3150 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3151 cluster_corrupt_out:
3153 * Unlocks the flush lock
3155 xfs_iflush_abort(ip);
3156 return XFS_ERROR(EFSCORRUPTED);
3165 xfs_inode_log_item_t *iip;
3168 #ifdef XFS_TRANS_DEBUG
3172 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3173 ASSERT(!completion_done(&ip->i_flush));
3174 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3175 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3182 * If the inode isn't dirty, then just release the inode
3183 * flush lock and do nothing.
3185 if (xfs_inode_clean(ip)) {
3190 /* set *dip = inode's place in the buffer */
3191 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
3194 * Clear i_update_core before copying out the data.
3195 * This is for coordination with our timestamp updates
3196 * that don't hold the inode lock. They will always
3197 * update the timestamps BEFORE setting i_update_core,
3198 * so if we clear i_update_core after they set it we
3199 * are guaranteed to see their updates to the timestamps.
3200 * I believe that this depends on strongly ordered memory
3201 * semantics, but we have that. We use the SYNCHRONIZE
3202 * macro to make sure that the compiler does not reorder
3203 * the i_update_core access below the data copy below.
3205 ip->i_update_core = 0;
3209 * Make sure to get the latest atime from the Linux inode.
3211 xfs_synchronize_atime(ip);
3213 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC,
3214 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3215 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3216 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3217 ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3220 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3221 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3222 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3223 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3224 ip->i_ino, ip, ip->i_d.di_magic);
3227 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3229 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3230 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3231 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3232 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3233 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3237 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3239 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3240 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3241 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3242 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3243 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3244 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3249 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3250 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3251 XFS_RANDOM_IFLUSH_5)) {
3252 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3253 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3255 ip->i_d.di_nextents + ip->i_d.di_anextents,
3260 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3261 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3262 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3263 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3264 ip->i_ino, ip->i_d.di_forkoff, ip);
3268 * bump the flush iteration count, used to detect flushes which
3269 * postdate a log record during recovery.
3272 ip->i_d.di_flushiter++;
3275 * Copy the dirty parts of the inode into the on-disk
3276 * inode. We always copy out the core of the inode,
3277 * because if the inode is dirty at all the core must
3280 xfs_dinode_to_disk(dip, &ip->i_d);
3282 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3283 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3284 ip->i_d.di_flushiter = 0;
3287 * If this is really an old format inode and the superblock version
3288 * has not been updated to support only new format inodes, then
3289 * convert back to the old inode format. If the superblock version
3290 * has been updated, then make the conversion permanent.
3292 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
3293 if (ip->i_d.di_version == 1) {
3294 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
3298 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3299 dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink);
3302 * The superblock version has already been bumped,
3303 * so just make the conversion to the new inode
3306 ip->i_d.di_version = 2;
3307 dip->di_version = 2;
3308 ip->i_d.di_onlink = 0;
3310 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3311 memset(&(dip->di_pad[0]), 0,
3312 sizeof(dip->di_pad));
3313 ASSERT(ip->i_d.di_projid == 0);
3317 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
3318 if (XFS_IFORK_Q(ip))
3319 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3320 xfs_inobp_check(mp, bp);
3323 * We've recorded everything logged in the inode, so we'd
3324 * like to clear the ilf_fields bits so we don't log and
3325 * flush things unnecessarily. However, we can't stop
3326 * logging all this information until the data we've copied
3327 * into the disk buffer is written to disk. If we did we might
3328 * overwrite the copy of the inode in the log with all the
3329 * data after re-logging only part of it, and in the face of
3330 * a crash we wouldn't have all the data we need to recover.
3332 * What we do is move the bits to the ili_last_fields field.
3333 * When logging the inode, these bits are moved back to the
3334 * ilf_fields field. In the xfs_iflush_done() routine we
3335 * clear ili_last_fields, since we know that the information
3336 * those bits represent is permanently on disk. As long as
3337 * the flush completes before the inode is logged again, then
3338 * both ilf_fields and ili_last_fields will be cleared.
3340 * We can play with the ilf_fields bits here, because the inode
3341 * lock must be held exclusively in order to set bits there
3342 * and the flush lock protects the ili_last_fields bits.
3343 * Set ili_logged so the flush done
3344 * routine can tell whether or not to look in the AIL.
3345 * Also, store the current LSN of the inode so that we can tell
3346 * whether the item has moved in the AIL from xfs_iflush_done().
3347 * In order to read the lsn we need the AIL lock, because
3348 * it is a 64 bit value that cannot be read atomically.
3350 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3351 iip->ili_last_fields = iip->ili_format.ilf_fields;
3352 iip->ili_format.ilf_fields = 0;
3353 iip->ili_logged = 1;
3355 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3356 &iip->ili_item.li_lsn);
3359 * Attach the function xfs_iflush_done to the inode's
3360 * buffer. This will remove the inode from the AIL
3361 * and unlock the inode's flush lock when the inode is
3362 * completely written to disk.
3364 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3365 xfs_iflush_done, (xfs_log_item_t *)iip);
3367 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3368 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3371 * We're flushing an inode which is not in the AIL and has
3372 * not been logged but has i_update_core set. For this
3373 * case we can use a B_DELWRI flush and immediately drop
3374 * the inode flush lock because we can avoid the whole
3375 * AIL state thing. It's OK to drop the flush lock now,
3376 * because we've already locked the buffer and to do anything
3377 * you really need both.
3380 ASSERT(iip->ili_logged == 0);
3381 ASSERT(iip->ili_last_fields == 0);
3382 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3390 return XFS_ERROR(EFSCORRUPTED);
3395 #ifdef XFS_ILOCK_TRACE
3396 ktrace_t *xfs_ilock_trace_buf;
3399 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3401 ktrace_enter(ip->i_lock_trace,
3403 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3404 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3405 (void *)ra, /* caller of ilock */
3406 (void *)(unsigned long)current_cpu(),
3407 (void *)(unsigned long)current_pid(),
3408 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3413 * Return a pointer to the extent record at file index idx.
3415 xfs_bmbt_rec_host_t *
3417 xfs_ifork_t *ifp, /* inode fork pointer */
3418 xfs_extnum_t idx) /* index of target extent */
3421 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3422 return ifp->if_u1.if_ext_irec->er_extbuf;
3423 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3424 xfs_ext_irec_t *erp; /* irec pointer */
3425 int erp_idx = 0; /* irec index */
3426 xfs_extnum_t page_idx = idx; /* ext index in target list */
3428 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3429 return &erp->er_extbuf[page_idx];
3430 } else if (ifp->if_bytes) {
3431 return &ifp->if_u1.if_extents[idx];
3438 * Insert new item(s) into the extent records for incore inode
3439 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3443 xfs_ifork_t *ifp, /* inode fork pointer */
3444 xfs_extnum_t idx, /* starting index of new items */
3445 xfs_extnum_t count, /* number of inserted items */
3446 xfs_bmbt_irec_t *new) /* items to insert */
3448 xfs_extnum_t i; /* extent record index */
3450 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3451 xfs_iext_add(ifp, idx, count);
3452 for (i = idx; i < idx + count; i++, new++)
3453 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
3457 * This is called when the amount of space required for incore file
3458 * extents needs to be increased. The ext_diff parameter stores the
3459 * number of new extents being added and the idx parameter contains
3460 * the extent index where the new extents will be added. If the new
3461 * extents are being appended, then we just need to (re)allocate and
3462 * initialize the space. Otherwise, if the new extents are being
3463 * inserted into the middle of the existing entries, a bit more work
3464 * is required to make room for the new extents to be inserted. The
3465 * caller is responsible for filling in the new extent entries upon
3470 xfs_ifork_t *ifp, /* inode fork pointer */
3471 xfs_extnum_t idx, /* index to begin adding exts */
3472 int ext_diff) /* number of extents to add */
3474 int byte_diff; /* new bytes being added */
3475 int new_size; /* size of extents after adding */
3476 xfs_extnum_t nextents; /* number of extents in file */
3478 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3479 ASSERT((idx >= 0) && (idx <= nextents));
3480 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3481 new_size = ifp->if_bytes + byte_diff;
3483 * If the new number of extents (nextents + ext_diff)
3484 * fits inside the inode, then continue to use the inline
3487 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3488 if (idx < nextents) {
3489 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3490 &ifp->if_u2.if_inline_ext[idx],
3491 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3492 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3494 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3495 ifp->if_real_bytes = 0;
3496 ifp->if_lastex = nextents + ext_diff;
3499 * Otherwise use a linear (direct) extent list.
3500 * If the extents are currently inside the inode,
3501 * xfs_iext_realloc_direct will switch us from
3502 * inline to direct extent allocation mode.
3504 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3505 xfs_iext_realloc_direct(ifp, new_size);
3506 if (idx < nextents) {
3507 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3508 &ifp->if_u1.if_extents[idx],
3509 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3510 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3513 /* Indirection array */
3515 xfs_ext_irec_t *erp;
3519 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3520 if (ifp->if_flags & XFS_IFEXTIREC) {
3521 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3523 xfs_iext_irec_init(ifp);
3524 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3525 erp = ifp->if_u1.if_ext_irec;
3527 /* Extents fit in target extent page */
3528 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3529 if (page_idx < erp->er_extcount) {
3530 memmove(&erp->er_extbuf[page_idx + ext_diff],
3531 &erp->er_extbuf[page_idx],
3532 (erp->er_extcount - page_idx) *
3533 sizeof(xfs_bmbt_rec_t));
3534 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3536 erp->er_extcount += ext_diff;
3537 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3539 /* Insert a new extent page */
3541 xfs_iext_add_indirect_multi(ifp,
3542 erp_idx, page_idx, ext_diff);
3545 * If extent(s) are being appended to the last page in
3546 * the indirection array and the new extent(s) don't fit
3547 * in the page, then erp is NULL and erp_idx is set to
3548 * the next index needed in the indirection array.
3551 int count = ext_diff;
3554 erp = xfs_iext_irec_new(ifp, erp_idx);
3555 erp->er_extcount = count;
3556 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3563 ifp->if_bytes = new_size;
3567 * This is called when incore extents are being added to the indirection
3568 * array and the new extents do not fit in the target extent list. The
3569 * erp_idx parameter contains the irec index for the target extent list
3570 * in the indirection array, and the idx parameter contains the extent
3571 * index within the list. The number of extents being added is stored
3572 * in the count parameter.
3574 * |-------| |-------|
3575 * | | | | idx - number of extents before idx
3577 * | | | | count - number of extents being inserted at idx
3578 * |-------| |-------|
3579 * | count | | nex2 | nex2 - number of extents after idx + count
3580 * |-------| |-------|
3583 xfs_iext_add_indirect_multi(
3584 xfs_ifork_t *ifp, /* inode fork pointer */
3585 int erp_idx, /* target extent irec index */
3586 xfs_extnum_t idx, /* index within target list */
3587 int count) /* new extents being added */
3589 int byte_diff; /* new bytes being added */
3590 xfs_ext_irec_t *erp; /* pointer to irec entry */
3591 xfs_extnum_t ext_diff; /* number of extents to add */
3592 xfs_extnum_t ext_cnt; /* new extents still needed */
3593 xfs_extnum_t nex2; /* extents after idx + count */
3594 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3595 int nlists; /* number of irec's (lists) */
3597 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3598 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3599 nex2 = erp->er_extcount - idx;
3600 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3603 * Save second part of target extent list
3604 * (all extents past */
3606 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3607 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
3608 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3609 erp->er_extcount -= nex2;
3610 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3611 memset(&erp->er_extbuf[idx], 0, byte_diff);
3615 * Add the new extents to the end of the target
3616 * list, then allocate new irec record(s) and
3617 * extent buffer(s) as needed to store the rest
3618 * of the new extents.
3621 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3623 erp->er_extcount += ext_diff;
3624 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3625 ext_cnt -= ext_diff;
3629 erp = xfs_iext_irec_new(ifp, erp_idx);
3630 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3631 erp->er_extcount = ext_diff;
3632 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3633 ext_cnt -= ext_diff;
3636 /* Add nex2 extents back to indirection array */
3638 xfs_extnum_t ext_avail;
3641 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3642 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3645 * If nex2 extents fit in the current page, append
3646 * nex2_ep after the new extents.
3648 if (nex2 <= ext_avail) {
3649 i = erp->er_extcount;
3652 * Otherwise, check if space is available in the
3655 else if ((erp_idx < nlists - 1) &&
3656 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3657 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3660 /* Create a hole for nex2 extents */
3661 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3662 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3665 * Final choice, create a new extent page for
3670 erp = xfs_iext_irec_new(ifp, erp_idx);
3672 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3674 erp->er_extcount += nex2;
3675 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3680 * This is called when the amount of space required for incore file
3681 * extents needs to be decreased. The ext_diff parameter stores the
3682 * number of extents to be removed and the idx parameter contains
3683 * the extent index where the extents will be removed from.
3685 * If the amount of space needed has decreased below the linear
3686 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3687 * extent array. Otherwise, use kmem_realloc() to adjust the
3688 * size to what is needed.
3692 xfs_ifork_t *ifp, /* inode fork pointer */
3693 xfs_extnum_t idx, /* index to begin removing exts */
3694 int ext_diff) /* number of extents to remove */
3696 xfs_extnum_t nextents; /* number of extents in file */
3697 int new_size; /* size of extents after removal */
3699 ASSERT(ext_diff > 0);
3700 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3701 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3703 if (new_size == 0) {
3704 xfs_iext_destroy(ifp);
3705 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3706 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3707 } else if (ifp->if_real_bytes) {
3708 xfs_iext_remove_direct(ifp, idx, ext_diff);
3710 xfs_iext_remove_inline(ifp, idx, ext_diff);
3712 ifp->if_bytes = new_size;
3716 * This removes ext_diff extents from the inline buffer, beginning
3717 * at extent index idx.
3720 xfs_iext_remove_inline(
3721 xfs_ifork_t *ifp, /* inode fork pointer */
3722 xfs_extnum_t idx, /* index to begin removing exts */
3723 int ext_diff) /* number of extents to remove */
3725 int nextents; /* number of extents in file */
3727 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3728 ASSERT(idx < XFS_INLINE_EXTS);
3729 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3730 ASSERT(((nextents - ext_diff) > 0) &&
3731 (nextents - ext_diff) < XFS_INLINE_EXTS);
3733 if (idx + ext_diff < nextents) {
3734 memmove(&ifp->if_u2.if_inline_ext[idx],
3735 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3736 (nextents - (idx + ext_diff)) *
3737 sizeof(xfs_bmbt_rec_t));
3738 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3739 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3741 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3742 ext_diff * sizeof(xfs_bmbt_rec_t));
3747 * This removes ext_diff extents from a linear (direct) extent list,
3748 * beginning at extent index idx. If the extents are being removed
3749 * from the end of the list (ie. truncate) then we just need to re-
3750 * allocate the list to remove the extra space. Otherwise, if the
3751 * extents are being removed from the middle of the existing extent
3752 * entries, then we first need to move the extent records beginning
3753 * at idx + ext_diff up in the list to overwrite the records being
3754 * removed, then remove the extra space via kmem_realloc.
3757 xfs_iext_remove_direct(
3758 xfs_ifork_t *ifp, /* inode fork pointer */
3759 xfs_extnum_t idx, /* index to begin removing exts */
3760 int ext_diff) /* number of extents to remove */
3762 xfs_extnum_t nextents; /* number of extents in file */
3763 int new_size; /* size of extents after removal */
3765 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3766 new_size = ifp->if_bytes -
3767 (ext_diff * sizeof(xfs_bmbt_rec_t));
3768 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3770 if (new_size == 0) {
3771 xfs_iext_destroy(ifp);
3774 /* Move extents up in the list (if needed) */
3775 if (idx + ext_diff < nextents) {
3776 memmove(&ifp->if_u1.if_extents[idx],
3777 &ifp->if_u1.if_extents[idx + ext_diff],
3778 (nextents - (idx + ext_diff)) *
3779 sizeof(xfs_bmbt_rec_t));
3781 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3782 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3784 * Reallocate the direct extent list. If the extents
3785 * will fit inside the inode then xfs_iext_realloc_direct
3786 * will switch from direct to inline extent allocation
3789 xfs_iext_realloc_direct(ifp, new_size);
3790 ifp->if_bytes = new_size;
3794 * This is called when incore extents are being removed from the
3795 * indirection array and the extents being removed span multiple extent
3796 * buffers. The idx parameter contains the file extent index where we
3797 * want to begin removing extents, and the count parameter contains
3798 * how many extents need to be removed.
3800 * |-------| |-------|
3801 * | nex1 | | | nex1 - number of extents before idx
3802 * |-------| | count |
3803 * | | | | count - number of extents being removed at idx
3804 * | count | |-------|
3805 * | | | nex2 | nex2 - number of extents after idx + count
3806 * |-------| |-------|
3809 xfs_iext_remove_indirect(
3810 xfs_ifork_t *ifp, /* inode fork pointer */
3811 xfs_extnum_t idx, /* index to begin removing extents */
3812 int count) /* number of extents to remove */
3814 xfs_ext_irec_t *erp; /* indirection array pointer */
3815 int erp_idx = 0; /* indirection array index */
3816 xfs_extnum_t ext_cnt; /* extents left to remove */
3817 xfs_extnum_t ext_diff; /* extents to remove in current list */
3818 xfs_extnum_t nex1; /* number of extents before idx */
3819 xfs_extnum_t nex2; /* extents after idx + count */
3820 int nlists; /* entries in indirection array */
3821 int page_idx = idx; /* index in target extent list */
3823 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3824 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3825 ASSERT(erp != NULL);
3826 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3830 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
3831 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
3833 * Check for deletion of entire list;
3834 * xfs_iext_irec_remove() updates extent offsets.
3836 if (ext_diff == erp->er_extcount) {
3837 xfs_iext_irec_remove(ifp, erp_idx);
3838 ext_cnt -= ext_diff;
3841 ASSERT(erp_idx < ifp->if_real_bytes /
3843 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3850 /* Move extents up (if needed) */
3852 memmove(&erp->er_extbuf[nex1],
3853 &erp->er_extbuf[nex1 + ext_diff],
3854 nex2 * sizeof(xfs_bmbt_rec_t));
3856 /* Zero out rest of page */
3857 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
3858 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
3859 /* Update remaining counters */
3860 erp->er_extcount -= ext_diff;
3861 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
3862 ext_cnt -= ext_diff;
3867 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
3868 xfs_iext_irec_compact(ifp);
3872 * Create, destroy, or resize a linear (direct) block of extents.
3875 xfs_iext_realloc_direct(
3876 xfs_ifork_t *ifp, /* inode fork pointer */
3877 int new_size) /* new size of extents */
3879 int rnew_size; /* real new size of extents */
3881 rnew_size = new_size;
3883 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
3884 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
3885 (new_size != ifp->if_real_bytes)));
3887 /* Free extent records */
3888 if (new_size == 0) {
3889 xfs_iext_destroy(ifp);
3891 /* Resize direct extent list and zero any new bytes */
3892 else if (ifp->if_real_bytes) {
3893 /* Check if extents will fit inside the inode */
3894 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
3895 xfs_iext_direct_to_inline(ifp, new_size /
3896 (uint)sizeof(xfs_bmbt_rec_t));
3897 ifp->if_bytes = new_size;
3900 if (!is_power_of_2(new_size)){
3901 rnew_size = roundup_pow_of_two(new_size);
3903 if (rnew_size != ifp->if_real_bytes) {
3904 ifp->if_u1.if_extents =
3905 kmem_realloc(ifp->if_u1.if_extents,
3907 ifp->if_real_bytes, KM_NOFS);
3909 if (rnew_size > ifp->if_real_bytes) {
3910 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
3911 (uint)sizeof(xfs_bmbt_rec_t)], 0,
3912 rnew_size - ifp->if_real_bytes);
3916 * Switch from the inline extent buffer to a direct
3917 * extent list. Be sure to include the inline extent
3918 * bytes in new_size.
3921 new_size += ifp->if_bytes;
3922 if (!is_power_of_2(new_size)) {
3923 rnew_size = roundup_pow_of_two(new_size);
3925 xfs_iext_inline_to_direct(ifp, rnew_size);
3927 ifp->if_real_bytes = rnew_size;
3928 ifp->if_bytes = new_size;
3932 * Switch from linear (direct) extent records to inline buffer.
3935 xfs_iext_direct_to_inline(
3936 xfs_ifork_t *ifp, /* inode fork pointer */
3937 xfs_extnum_t nextents) /* number of extents in file */
3939 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3940 ASSERT(nextents <= XFS_INLINE_EXTS);
3942 * The inline buffer was zeroed when we switched
3943 * from inline to direct extent allocation mode,
3944 * so we don't need to clear it here.
3946 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
3947 nextents * sizeof(xfs_bmbt_rec_t));
3948 kmem_free(ifp->if_u1.if_extents);
3949 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3950 ifp->if_real_bytes = 0;
3954 * Switch from inline buffer to linear (direct) extent records.
3955 * new_size should already be rounded up to the next power of 2
3956 * by the caller (when appropriate), so use new_size as it is.
3957 * However, since new_size may be rounded up, we can't update
3958 * if_bytes here. It is the caller's responsibility to update
3959 * if_bytes upon return.
3962 xfs_iext_inline_to_direct(
3963 xfs_ifork_t *ifp, /* inode fork pointer */
3964 int new_size) /* number of extents in file */
3966 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
3967 memset(ifp->if_u1.if_extents, 0, new_size);
3968 if (ifp->if_bytes) {
3969 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
3971 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
3972 sizeof(xfs_bmbt_rec_t));
3974 ifp->if_real_bytes = new_size;
3978 * Resize an extent indirection array to new_size bytes.
3981 xfs_iext_realloc_indirect(
3982 xfs_ifork_t *ifp, /* inode fork pointer */
3983 int new_size) /* new indirection array size */
3985 int nlists; /* number of irec's (ex lists) */
3986 int size; /* current indirection array size */
3988 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3989 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3990 size = nlists * sizeof(xfs_ext_irec_t);
3991 ASSERT(ifp->if_real_bytes);
3992 ASSERT((new_size >= 0) && (new_size != size));
3993 if (new_size == 0) {
3994 xfs_iext_destroy(ifp);
3996 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
3997 kmem_realloc(ifp->if_u1.if_ext_irec,
3998 new_size, size, KM_NOFS);
4003 * Switch from indirection array to linear (direct) extent allocations.
4006 xfs_iext_indirect_to_direct(
4007 xfs_ifork_t *ifp) /* inode fork pointer */
4009 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
4010 xfs_extnum_t nextents; /* number of extents in file */
4011 int size; /* size of file extents */
4013 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4014 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4015 ASSERT(nextents <= XFS_LINEAR_EXTS);
4016 size = nextents * sizeof(xfs_bmbt_rec_t);
4018 xfs_iext_irec_compact_pages(ifp);
4019 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4021 ep = ifp->if_u1.if_ext_irec->er_extbuf;
4022 kmem_free(ifp->if_u1.if_ext_irec);
4023 ifp->if_flags &= ~XFS_IFEXTIREC;
4024 ifp->if_u1.if_extents = ep;
4025 ifp->if_bytes = size;
4026 if (nextents < XFS_LINEAR_EXTS) {
4027 xfs_iext_realloc_direct(ifp, size);
4032 * Free incore file extents.
4036 xfs_ifork_t *ifp) /* inode fork pointer */
4038 if (ifp->if_flags & XFS_IFEXTIREC) {
4042 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4043 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4044 xfs_iext_irec_remove(ifp, erp_idx);
4046 ifp->if_flags &= ~XFS_IFEXTIREC;
4047 } else if (ifp->if_real_bytes) {
4048 kmem_free(ifp->if_u1.if_extents);
4049 } else if (ifp->if_bytes) {
4050 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4051 sizeof(xfs_bmbt_rec_t));
4053 ifp->if_u1.if_extents = NULL;
4054 ifp->if_real_bytes = 0;
4059 * Return a pointer to the extent record for file system block bno.
4061 xfs_bmbt_rec_host_t * /* pointer to found extent record */
4062 xfs_iext_bno_to_ext(
4063 xfs_ifork_t *ifp, /* inode fork pointer */
4064 xfs_fileoff_t bno, /* block number to search for */
4065 xfs_extnum_t *idxp) /* index of target extent */
4067 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
4068 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
4069 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
4070 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4071 int high; /* upper boundary in search */
4072 xfs_extnum_t idx = 0; /* index of target extent */
4073 int low; /* lower boundary in search */
4074 xfs_extnum_t nextents; /* number of file extents */
4075 xfs_fileoff_t startoff = 0; /* start offset of extent */
4077 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4078 if (nextents == 0) {
4083 if (ifp->if_flags & XFS_IFEXTIREC) {
4084 /* Find target extent list */
4086 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4087 base = erp->er_extbuf;
4088 high = erp->er_extcount - 1;
4090 base = ifp->if_u1.if_extents;
4091 high = nextents - 1;
4093 /* Binary search extent records */
4094 while (low <= high) {
4095 idx = (low + high) >> 1;
4097 startoff = xfs_bmbt_get_startoff(ep);
4098 blockcount = xfs_bmbt_get_blockcount(ep);
4099 if (bno < startoff) {
4101 } else if (bno >= startoff + blockcount) {
4104 /* Convert back to file-based extent index */
4105 if (ifp->if_flags & XFS_IFEXTIREC) {
4106 idx += erp->er_extoff;
4112 /* Convert back to file-based extent index */
4113 if (ifp->if_flags & XFS_IFEXTIREC) {
4114 idx += erp->er_extoff;
4116 if (bno >= startoff + blockcount) {
4117 if (++idx == nextents) {
4120 ep = xfs_iext_get_ext(ifp, idx);
4128 * Return a pointer to the indirection array entry containing the
4129 * extent record for filesystem block bno. Store the index of the
4130 * target irec in *erp_idxp.
4132 xfs_ext_irec_t * /* pointer to found extent record */
4133 xfs_iext_bno_to_irec(
4134 xfs_ifork_t *ifp, /* inode fork pointer */
4135 xfs_fileoff_t bno, /* block number to search for */
4136 int *erp_idxp) /* irec index of target ext list */
4138 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4139 xfs_ext_irec_t *erp_next; /* next indirection array entry */
4140 int erp_idx; /* indirection array index */
4141 int nlists; /* number of extent irec's (lists) */
4142 int high; /* binary search upper limit */
4143 int low; /* binary search lower limit */
4145 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4146 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4150 while (low <= high) {
4151 erp_idx = (low + high) >> 1;
4152 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4153 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4154 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4156 } else if (erp_next && bno >=
4157 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4163 *erp_idxp = erp_idx;
4168 * Return a pointer to the indirection array entry containing the
4169 * extent record at file extent index *idxp. Store the index of the
4170 * target irec in *erp_idxp and store the page index of the target
4171 * extent record in *idxp.
4174 xfs_iext_idx_to_irec(
4175 xfs_ifork_t *ifp, /* inode fork pointer */
4176 xfs_extnum_t *idxp, /* extent index (file -> page) */
4177 int *erp_idxp, /* pointer to target irec */
4178 int realloc) /* new bytes were just added */
4180 xfs_ext_irec_t *prev; /* pointer to previous irec */
4181 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
4182 int erp_idx; /* indirection array index */
4183 int nlists; /* number of irec's (ex lists) */
4184 int high; /* binary search upper limit */
4185 int low; /* binary search lower limit */
4186 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
4188 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4189 ASSERT(page_idx >= 0 && page_idx <=
4190 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4191 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4196 /* Binary search extent irec's */
4197 while (low <= high) {
4198 erp_idx = (low + high) >> 1;
4199 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4200 prev = erp_idx > 0 ? erp - 1 : NULL;
4201 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4202 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4204 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
4205 (page_idx == erp->er_extoff + erp->er_extcount &&
4208 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
4209 erp->er_extcount == XFS_LINEAR_EXTS) {
4213 erp = erp_idx < nlists ? erp + 1 : NULL;
4216 page_idx -= erp->er_extoff;
4221 *erp_idxp = erp_idx;
4226 * Allocate and initialize an indirection array once the space needed
4227 * for incore extents increases above XFS_IEXT_BUFSZ.
4231 xfs_ifork_t *ifp) /* inode fork pointer */
4233 xfs_ext_irec_t *erp; /* indirection array pointer */
4234 xfs_extnum_t nextents; /* number of extents in file */
4236 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4237 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4238 ASSERT(nextents <= XFS_LINEAR_EXTS);
4240 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
4242 if (nextents == 0) {
4243 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
4244 } else if (!ifp->if_real_bytes) {
4245 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4246 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4247 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4249 erp->er_extbuf = ifp->if_u1.if_extents;
4250 erp->er_extcount = nextents;
4253 ifp->if_flags |= XFS_IFEXTIREC;
4254 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4255 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4256 ifp->if_u1.if_ext_irec = erp;
4262 * Allocate and initialize a new entry in the indirection array.
4266 xfs_ifork_t *ifp, /* inode fork pointer */
4267 int erp_idx) /* index for new irec */
4269 xfs_ext_irec_t *erp; /* indirection array pointer */
4270 int i; /* loop counter */
4271 int nlists; /* number of irec's (ex lists) */
4273 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4274 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4276 /* Resize indirection array */
4277 xfs_iext_realloc_indirect(ifp, ++nlists *
4278 sizeof(xfs_ext_irec_t));
4280 * Move records down in the array so the
4281 * new page can use erp_idx.
4283 erp = ifp->if_u1.if_ext_irec;
4284 for (i = nlists - 1; i > erp_idx; i--) {
4285 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4287 ASSERT(i == erp_idx);
4289 /* Initialize new extent record */
4290 erp = ifp->if_u1.if_ext_irec;
4291 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
4292 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4293 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4294 erp[erp_idx].er_extcount = 0;
4295 erp[erp_idx].er_extoff = erp_idx > 0 ?
4296 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4297 return (&erp[erp_idx]);
4301 * Remove a record from the indirection array.
4304 xfs_iext_irec_remove(
4305 xfs_ifork_t *ifp, /* inode fork pointer */
4306 int erp_idx) /* irec index to remove */
4308 xfs_ext_irec_t *erp; /* indirection array pointer */
4309 int i; /* loop counter */
4310 int nlists; /* number of irec's (ex lists) */
4312 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4313 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4314 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4315 if (erp->er_extbuf) {
4316 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4318 kmem_free(erp->er_extbuf);
4320 /* Compact extent records */
4321 erp = ifp->if_u1.if_ext_irec;
4322 for (i = erp_idx; i < nlists - 1; i++) {
4323 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4326 * Manually free the last extent record from the indirection
4327 * array. A call to xfs_iext_realloc_indirect() with a size
4328 * of zero would result in a call to xfs_iext_destroy() which
4329 * would in turn call this function again, creating a nasty
4333 xfs_iext_realloc_indirect(ifp,
4334 nlists * sizeof(xfs_ext_irec_t));
4336 kmem_free(ifp->if_u1.if_ext_irec);
4338 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4342 * This is called to clean up large amounts of unused memory allocated
4343 * by the indirection array. Before compacting anything though, verify
4344 * that the indirection array is still needed and switch back to the
4345 * linear extent list (or even the inline buffer) if possible. The
4346 * compaction policy is as follows:
4348 * Full Compaction: Extents fit into a single page (or inline buffer)
4349 * Partial Compaction: Extents occupy less than 50% of allocated space
4350 * No Compaction: Extents occupy at least 50% of allocated space
4353 xfs_iext_irec_compact(
4354 xfs_ifork_t *ifp) /* inode fork pointer */
4356 xfs_extnum_t nextents; /* number of extents in file */
4357 int nlists; /* number of irec's (ex lists) */
4359 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4360 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4361 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4363 if (nextents == 0) {
4364 xfs_iext_destroy(ifp);
4365 } else if (nextents <= XFS_INLINE_EXTS) {
4366 xfs_iext_indirect_to_direct(ifp);
4367 xfs_iext_direct_to_inline(ifp, nextents);
4368 } else if (nextents <= XFS_LINEAR_EXTS) {
4369 xfs_iext_indirect_to_direct(ifp);
4370 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4371 xfs_iext_irec_compact_pages(ifp);
4376 * Combine extents from neighboring extent pages.
4379 xfs_iext_irec_compact_pages(
4380 xfs_ifork_t *ifp) /* inode fork pointer */
4382 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
4383 int erp_idx = 0; /* indirection array index */
4384 int nlists; /* number of irec's (ex lists) */
4386 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4387 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4388 while (erp_idx < nlists - 1) {
4389 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4391 if (erp_next->er_extcount <=
4392 (XFS_LINEAR_EXTS - erp->er_extcount)) {
4393 memcpy(&erp->er_extbuf[erp->er_extcount],
4394 erp_next->er_extbuf, erp_next->er_extcount *
4395 sizeof(xfs_bmbt_rec_t));
4396 erp->er_extcount += erp_next->er_extcount;
4398 * Free page before removing extent record
4399 * so er_extoffs don't get modified in
4400 * xfs_iext_irec_remove.
4402 kmem_free(erp_next->er_extbuf);
4403 erp_next->er_extbuf = NULL;
4404 xfs_iext_irec_remove(ifp, erp_idx + 1);
4405 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4413 * This is called to update the er_extoff field in the indirection
4414 * array when extents have been added or removed from one of the
4415 * extent lists. erp_idx contains the irec index to begin updating
4416 * at and ext_diff contains the number of extents that were added
4420 xfs_iext_irec_update_extoffs(
4421 xfs_ifork_t *ifp, /* inode fork pointer */
4422 int erp_idx, /* irec index to update */
4423 int ext_diff) /* number of new extents */
4425 int i; /* loop counter */
4426 int nlists; /* number of irec's (ex lists */
4428 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4429 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4430 for (i = erp_idx; i < nlists; i++) {
4431 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;