xfs: prevent lockdep false positive in xfs_iget_cache_miss
[safe/jmp/linux-2.6] / fs / xfs / xfs_mount.c
index 2d03fe1..3530025 100644 (file)
 #include "xfs_fsops.h"
 #include "xfs_utils.h"
 
-STATIC int     xfs_mount_log_sb(xfs_mount_t *, __int64_t);
 STATIC int     xfs_uuid_mount(xfs_mount_t *);
-STATIC void    xfs_uuid_unmount(xfs_mount_t *mp);
 STATIC void    xfs_unmountfs_wait(xfs_mount_t *);
 
 
 #ifdef HAVE_PERCPU_SB
-STATIC void    xfs_icsb_destroy_counters(xfs_mount_t *);
 STATIC void    xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
-                                               int, int);
-STATIC void    xfs_icsb_sync_counters(xfs_mount_t *);
+                                               int);
+STATIC void    xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
+                                               int);
 STATIC int     xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
                                                int64_t, int);
 STATIC void    xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
 
 #else
 
-#define xfs_icsb_destroy_counters(mp)                  do { } while (0)
-#define xfs_icsb_balance_counter(mp, a, b, c)          do { } while (0)
-#define xfs_icsb_sync_counters(mp)                     do { } while (0)
+#define xfs_icsb_balance_counter(mp, a, b)             do { } while (0)
+#define xfs_icsb_balance_counter_locked(mp, a, b)      do { } while (0)
 #define xfs_icsb_modify_counters(mp, a, b, c)          do { } while (0)
 
 #endif
@@ -125,34 +122,12 @@ static const struct {
 };
 
 /*
- * Return a pointer to an initialized xfs_mount structure.
- */
-xfs_mount_t *
-xfs_mount_init(void)
-{
-       xfs_mount_t *mp;
-
-       mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP);
-
-       if (xfs_icsb_init_counters(mp)) {
-               mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
-       }
-
-       spin_lock_init(&mp->m_sb_lock);
-       mutex_init(&mp->m_ilock);
-       mutex_init(&mp->m_growlock);
-       atomic_set(&mp->m_active_trans, 0);
-
-       return mp;
-}
-
-/*
  * Free up the resources associated with a mount structure.  Assume that
  * the structure was initially zeroed, so we can tell which fields got
  * initialized.
  */
-void
-xfs_mount_free(
+STATIC void
+xfs_free_perag(
        xfs_mount_t     *mp)
 {
        if (mp->m_perag) {
@@ -160,28 +135,9 @@ xfs_mount_free(
 
                for (agno = 0; agno < mp->m_maxagi; agno++)
                        if (mp->m_perag[agno].pagb_list)
-                               kmem_free(mp->m_perag[agno].pagb_list,
-                                               sizeof(xfs_perag_busy_t) *
-                                                       XFS_PAGB_NUM_SLOTS);
-               kmem_free(mp->m_perag,
-                         sizeof(xfs_perag_t) * mp->m_sb.sb_agcount);
+                               kmem_free(mp->m_perag[agno].pagb_list);
+               kmem_free(mp->m_perag);
        }
-
-       spinlock_destroy(&mp->m_ail_lock);
-       spinlock_destroy(&mp->m_sb_lock);
-       mutex_destroy(&mp->m_ilock);
-       mutex_destroy(&mp->m_growlock);
-       if (mp->m_quotainfo)
-               XFS_QM_DONE(mp);
-
-       if (mp->m_fsname != NULL)
-               kmem_free(mp->m_fsname, mp->m_fsname_len);
-       if (mp->m_rtname != NULL)
-               kmem_free(mp->m_rtname, strlen(mp->m_rtname) + 1);
-       if (mp->m_logname != NULL)
-               kmem_free(mp->m_logname, strlen(mp->m_logname) + 1);
-
-       xfs_icsb_destroy_counters(mp);
 }
 
 /*
@@ -287,6 +243,19 @@ xfs_mount_validate_sb(
                return XFS_ERROR(EFSCORRUPTED);
        }
 
+       /*
+        * Until this is fixed only page-sized or smaller data blocks work.
+        */
+       if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
+               xfs_fs_mount_cmn_err(flags,
+                       "file system with blocksize %d bytes",
+                       sbp->sb_blocksize);
+               xfs_fs_mount_cmn_err(flags,
+                       "only pagesize (%ld) or less will currently work.",
+                       PAGE_SIZE);
+               return XFS_ERROR(ENOSYS);
+       }
+
        if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
            xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
                xfs_fs_mount_cmn_err(flags,
@@ -308,19 +277,6 @@ xfs_mount_validate_sb(
                return XFS_ERROR(ENOSYS);
        }
 
-       /*
-        * Until this is fixed only page-sized or smaller data blocks work.
-        */
-       if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
-               xfs_fs_mount_cmn_err(flags,
-                       "file system with blocksize %d bytes",
-                       sbp->sb_blocksize);
-               xfs_fs_mount_cmn_err(flags,
-                       "only pagesize (%ld) or less will currently work.",
-                       PAGE_SIZE);
-               return XFS_ERROR(ENOSYS);
-       }
-
        return 0;
 }
 
@@ -610,8 +566,6 @@ xfs_readsb(xfs_mount_t *mp, int flags)
 STATIC void
 xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
 {
-       int     i;
-
        mp->m_agfrotor = mp->m_agirotor = 0;
        spin_lock_init(&mp->m_agirotor_lock);
        mp->m_maxagi = mp->m_sb.sb_agcount;
@@ -620,12 +574,10 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
        mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
        mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
        mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
-       mp->m_litino = sbp->sb_inodesize -
-               ((uint)sizeof(xfs_dinode_core_t) + (uint)sizeof(xfs_agino_t));
+       mp->m_litino = sbp->sb_inodesize - sizeof(struct xfs_dinode);
        mp->m_blockmask = sbp->sb_blocksize - 1;
        mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
        mp->m_blockwmask = mp->m_blockwsize - 1;
-       INIT_LIST_HEAD(&mp->m_del_inodes);
 
        /*
         * Setup for attributes, in case they get created.
@@ -648,24 +600,20 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
        }
        ASSERT(mp->m_attroffset < XFS_LITINO(mp));
 
-       for (i = 0; i < 2; i++) {
-               mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
-                       xfs_alloc, i == 0);
-               mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
-                       xfs_alloc, i == 0);
-       }
-       for (i = 0; i < 2; i++) {
-               mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
-                       xfs_bmbt, i == 0);
-               mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
-                       xfs_bmbt, i == 0);
-       }
-       for (i = 0; i < 2; i++) {
-               mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
-                       xfs_inobt, i == 0);
-               mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
-                       xfs_inobt, i == 0);
-       }
+       mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
+       mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
+       mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
+       mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
+
+       mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
+       mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
+       mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
+       mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
+
+       mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
+       mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
+       mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
+       mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
 
        mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
        mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
@@ -733,11 +681,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
  * Update alignment values based on mount options and sb values
  */
 STATIC int
-xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags)
+xfs_update_alignment(xfs_mount_t *mp)
 {
        xfs_sb_t        *sbp = &(mp->m_sb);
 
-       if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) {
+       if (mp->m_dalign) {
                /*
                 * If stripe unit and stripe width are not multiples
                 * of the fs blocksize turn off alignment.
@@ -787,11 +735,11 @@ xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags)
                if (xfs_sb_version_hasdalign(sbp)) {
                        if (sbp->sb_unit != mp->m_dalign) {
                                sbp->sb_unit = mp->m_dalign;
-                               *update_flags |= XFS_SB_UNIT;
+                               mp->m_update_flags |= XFS_SB_UNIT;
                        }
                        if (sbp->sb_width != mp->m_swidth) {
                                sbp->sb_width = mp->m_swidth;
-                               *update_flags |= XFS_SB_WIDTH;
+                               mp->m_update_flags |= XFS_SB_WIDTH;
                        }
                }
        } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
@@ -893,7 +841,7 @@ xfs_set_inoalignment(xfs_mount_t *mp)
  * Check that the data (and log if separate) are an ok size.
  */
 STATIC int
-xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
+xfs_check_sizes(xfs_mount_t *mp)
 {
        xfs_buf_t       *bp;
        xfs_daddr_t     d;
@@ -916,8 +864,7 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
                return error;
        }
 
-       if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) &&
-           mp->m_logdev_targp != mp->m_ddev_targp) {
+       if (mp->m_logdev_targp != mp->m_ddev_targp) {
                d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
                if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
                        cmn_err(CE_WARN, "XFS: size check 3 failed");
@@ -952,15 +899,12 @@ xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags)
  */
 int
 xfs_mountfs(
-       xfs_mount_t     *mp,
-       int             mfsi_flags)
+       xfs_mount_t     *mp)
 {
        xfs_sb_t        *sbp = &(mp->m_sb);
        xfs_inode_t     *rip;
        __uint64_t      resblks;
-       __int64_t       update_flags = 0LL;
        uint            quotamount, quotaflags;
-       int             agno;
        int             uuid_mounted = 0;
        int             error = 0;
 
@@ -987,15 +931,25 @@ xfs_mountfs(
                        "XFS: correcting sb_features alignment problem");
                sbp->sb_features2 |= sbp->sb_bad_features2;
                sbp->sb_bad_features2 = sbp->sb_features2;
-               update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
+               mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
 
                /*
                 * Re-check for ATTR2 in case it was found in bad_features2
                 * slot.
                 */
-               if (xfs_sb_version_hasattr2(&mp->m_sb))
+               if (xfs_sb_version_hasattr2(&mp->m_sb) &&
+                  !(mp->m_flags & XFS_MOUNT_NOATTR2))
                        mp->m_flags |= XFS_MOUNT_ATTR2;
+       }
 
+       if (xfs_sb_version_hasattr2(&mp->m_sb) &&
+          (mp->m_flags & XFS_MOUNT_NOATTR2)) {
+               xfs_sb_version_removeattr2(&mp->m_sb);
+               mp->m_update_flags |= XFS_SB_FEATURES2;
+
+               /* update sb_versionnum for the clearing of the morebits */
+               if (!sbp->sb_features2)
+                       mp->m_update_flags |= XFS_SB_VERSIONNUM;
        }
 
        /*
@@ -1004,7 +958,7 @@ xfs_mountfs(
         * allocator alignment is within an ag, therefore ag has
         * to be aligned at stripe boundary.
         */
-       error = xfs_update_alignment(mp, mfsi_flags, &update_flags);
+       error = xfs_update_alignment(mp);
        if (error)
                goto error1;
 
@@ -1023,8 +977,7 @@ xfs_mountfs(
         * since a single partition filesystem is identical to a single
         * partition volume/filesystem.
         */
-       if ((mfsi_flags & XFS_MFSI_SECOND) == 0 &&
-           (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
+       if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
                if (xfs_uuid_mount(mp)) {
                        error = XFS_ERROR(EINVAL);
                        goto error1;
@@ -1052,7 +1005,7 @@ xfs_mountfs(
        /*
         * Check that the data (and log if separate) are an ok size.
         */
-       error = xfs_check_sizes(mp, mfsi_flags);
+       error = xfs_check_sizes(mp);
        if (error)
                goto error1;
 
@@ -1066,13 +1019,6 @@ xfs_mountfs(
        }
 
        /*
-        * For client case we are done now
-        */
-       if (mfsi_flags & XFS_MFSI_CLIENT) {
-               return 0;
-       }
-
-       /*
         *  Copies the low order bits of the timestamp and the randomly
         *  set "sequence" number out of a UUID.
         */
@@ -1096,8 +1042,10 @@ xfs_mountfs(
         * Allocate and initialize the per-ag data.
         */
        init_rwsem(&mp->m_peraglock);
-       mp->m_perag =
-               kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP);
+       mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t),
+                                 KM_MAYFAIL);
+       if (!mp->m_perag)
+               goto error1;
 
        mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
 
@@ -1187,10 +1135,12 @@ xfs_mountfs(
        }
 
        /*
-        * If fs is not mounted readonly, then update the superblock changes.
+        * If this is a read-only mount defer the superblock updates until
+        * the next remount into writeable mode.  Otherwise we would never
+        * perform the update e.g. for the root filesystem.
         */
-       if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
-               error = xfs_mount_log_sb(mp, update_flags);
+       if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               error = xfs_mount_log_sb(mp, mp->m_update_flags);
                if (error) {
                        cmn_err(CE_WARN, "XFS: failed to write sb changes");
                        goto error4;
@@ -1209,7 +1159,7 @@ xfs_mountfs(
         * delayed until after the root and real-time bitmap inodes
         * were consistently read in.
         */
-       error = xfs_log_mount_finish(mp, mfsi_flags);
+       error = xfs_log_mount_finish(mp);
        if (error) {
                cmn_err(CE_WARN, "XFS: log mount finish failed");
                goto error4;
@@ -1218,7 +1168,7 @@ xfs_mountfs(
        /*
         * Complete the quota initialisation, post-log-replay component.
         */
-       error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags);
+       error = XFS_QM_MOUNT(mp, quotamount, quotaflags);
        if (error)
                goto error4;
 
@@ -1252,31 +1202,35 @@ xfs_mountfs(
  error3:
        xfs_log_unmount_dealloc(mp);
  error2:
-       for (agno = 0; agno < sbp->sb_agcount; agno++)
-               if (mp->m_perag[agno].pagb_list)
-                       kmem_free(mp->m_perag[agno].pagb_list,
-                         sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS);
-       kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t));
-       mp->m_perag = NULL;
-       /* FALLTHROUGH */
+       xfs_free_perag(mp);
  error1:
        if (uuid_mounted)
-               xfs_uuid_unmount(mp);
-       xfs_freesb(mp);
+               uuid_table_remove(&mp->m_sb.sb_uuid);
        return error;
 }
 
 /*
- * xfs_unmountfs
- *
  * This flushes out the inodes,dquots and the superblock, unmounts the
  * log and makes sure that incore structures are freed.
  */
-int
-xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
+void
+xfs_unmountfs(
+       struct xfs_mount        *mp)
 {
-       __uint64_t      resblks;
-       int             error = 0;
+       __uint64_t              resblks;
+       int                     error;
+
+       /*
+        * Release dquot that rootinode, rbmino and rsumino might be holding,
+        * and release the quota inodes.
+        */
+       XFS_QM_UNMOUNT(mp);
+
+       if (mp->m_rbmip)
+               IRELE(mp->m_rbmip);
+       if (mp->m_rsumip)
+               IRELE(mp->m_rsumip);
+       IRELE(mp->m_rootip);
 
        /*
         * We can potentially deadlock here if we have an inode cluster
@@ -1289,10 +1243,13 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
         * need to force the log first.
         */
        xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
-       xfs_iflush_all(mp);
+       xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_ASYNC);
 
        XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
 
+       if (mp->m_quotainfo)
+               XFS_QM_DONE(mp);
+
        /*
         * Flush out the log synchronously so that we know for sure
         * that nothing is pinned.  This is important because bflush()
@@ -1333,32 +1290,13 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
        xfs_unmountfs_wait(mp);                 /* wait for async bufs */
        xfs_log_unmount(mp);                    /* Done! No more fs ops. */
 
-       xfs_freesb(mp);
-
-       /*
-        * All inodes from this mount point should be freed.
-        */
-       ASSERT(mp->m_inodes == NULL);
-
-       xfs_unmountfs_close(mp, cr);
        if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
-               xfs_uuid_unmount(mp);
+               uuid_table_remove(&mp->m_sb.sb_uuid);
 
-#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
+#if defined(DEBUG)
        xfs_errortag_clearall(mp, 0);
 #endif
-       xfs_mount_free(mp);
-       return 0;
-}
-
-void
-xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
-{
-       if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
-               xfs_free_buftarg(mp->m_logdev_targp, 1);
-       if (mp->m_rtdev_targp)
-               xfs_free_buftarg(mp->m_rtdev_targp, 1);
-       xfs_free_buftarg(mp->m_ddev_targp, 0);
+       xfs_free_perag(mp);
 }
 
 STATIC void
@@ -1400,7 +1338,7 @@ xfs_log_sbcount(
        if (!xfs_fs_writable(mp))
                return 0;
 
-       xfs_icsb_sync_counters(mp);
+       xfs_icsb_sync_counters(mp, 0);
 
        /*
         * we don't need to do this if we are updating the superblock
@@ -1424,24 +1362,6 @@ xfs_log_sbcount(
        return error;
 }
 
-STATIC void
-xfs_mark_shared_ro(
-       xfs_mount_t     *mp,
-       xfs_buf_t       *bp)
-{
-       xfs_dsb_t       *sb = XFS_BUF_TO_SBP(bp);
-       __uint16_t      version;
-
-       if (!(sb->sb_flags & XFS_SBF_READONLY))
-               sb->sb_flags |= XFS_SBF_READONLY;
-
-       version = be16_to_cpu(sb->sb_versionnum);
-       if ((version & XFS_SB_VERSION_NUMBITS) != XFS_SB_VERSION_4 ||
-           !(version & XFS_SB_VERSION_SHAREDBIT))
-               version |= XFS_SB_VERSION_SHAREDBIT;
-       sb->sb_versionnum = cpu_to_be16(version);
-}
-
 int
 xfs_unmountfs_writesb(xfs_mount_t *mp)
 {
@@ -1457,12 +1377,6 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
 
                sbp = xfs_getsb(mp, 0);
 
-               /*
-                * mark shared-readonly if desired
-                */
-               if (mp->m_mk_sharedro)
-                       xfs_mark_shared_ro(mp, sbp);
-
                XFS_BUF_UNDONE(sbp);
                XFS_BUF_UNREAD(sbp);
                XFS_BUF_UNDELAYWRITE(sbp);
@@ -1470,13 +1384,10 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
                XFS_BUF_UNASYNC(sbp);
                ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
                xfsbdstrat(mp, sbp);
-               /* Nevermind errors we might get here. */
                error = xfs_iowait(sbp);
                if (error)
                        xfs_ioerror_alert("xfs_unmountfs_writesb",
                                          mp, sbp, XFS_BUF_ADDR(sbp));
-               if (error && mp->m_mk_sharedro)
-                       xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting.  Filesystem may not be marked shared readonly");
                xfs_buf_relse(sbp);
        }
        return error;
@@ -1905,21 +1816,11 @@ xfs_uuid_mount(
 }
 
 /*
- * Remove filesystem from the UUID table.
- */
-STATIC void
-xfs_uuid_unmount(
-       xfs_mount_t     *mp)
-{
-       uuid_table_remove(&mp->m_sb.sb_uuid);
-}
-
-/*
  * Used to log changes to the superblock unit and width fields which could
  * be altered by the mount options, as well as any potential sb_features2
  * fixup. Only the first superblock is updated.
  */
-STATIC int
+int
 xfs_mount_log_sb(
        xfs_mount_t     *mp,
        __int64_t       fields)
@@ -1928,7 +1829,8 @@ xfs_mount_log_sb(
        int             error;
 
        ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
-                        XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2));
+                        XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
+                        XFS_SB_VERSIONNUM));
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
        error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
@@ -2027,9 +1929,9 @@ xfs_icsb_cpu_notify(
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
                xfs_icsb_lock(mp);
-               xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
+               xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
+               xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
+               xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
                xfs_icsb_unlock(mp);
                break;
        case CPU_DEAD:
@@ -2049,12 +1951,9 @@ xfs_icsb_cpu_notify(
 
                memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
 
-               xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT,
-                                        XFS_ICSB_SB_LOCKED, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_IFREE,
-                                        XFS_ICSB_SB_LOCKED, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
-                                        XFS_ICSB_SB_LOCKED, 0);
+               xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
+               xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
+               xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
                spin_unlock(&mp->m_sb_lock);
                xfs_icsb_unlock(mp);
                break;
@@ -2106,13 +2005,13 @@ xfs_icsb_reinit_counters(
         * initial balance kicks us off correctly
         */
        mp->m_icsb_counters = -1;
-       xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
-       xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
-       xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
        xfs_icsb_unlock(mp);
 }
 
-STATIC void
+void
 xfs_icsb_destroy_counters(
        xfs_mount_t     *mp)
 {
@@ -2224,7 +2123,7 @@ xfs_icsb_disable_counter(
        if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
                /* drain back to superblock */
 
-               xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT);
+               xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
                switch(field) {
                case XFS_SBS_ICOUNT:
                        mp->m_sb.sb_icount = cnt.icsb_icount;
@@ -2279,38 +2178,33 @@ xfs_icsb_enable_counter(
 }
 
 void
-xfs_icsb_sync_counters_flags(
+xfs_icsb_sync_counters_locked(
        xfs_mount_t     *mp,
        int             flags)
 {
        xfs_icsb_cnts_t cnt;
 
-       /* Pass 1: lock all counters */
-       if ((flags & XFS_ICSB_SB_LOCKED) == 0)
-               spin_lock(&mp->m_sb_lock);
-
        xfs_icsb_count(mp, &cnt, flags);
 
-       /* Step 3: update mp->m_sb fields */
        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
                mp->m_sb.sb_icount = cnt.icsb_icount;
        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
                mp->m_sb.sb_ifree = cnt.icsb_ifree;
        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
                mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
-
-       if ((flags & XFS_ICSB_SB_LOCKED) == 0)
-               spin_unlock(&mp->m_sb_lock);
 }
 
 /*
  * Accurate update of per-cpu counters to incore superblock
  */
-STATIC void
+void
 xfs_icsb_sync_counters(
-       xfs_mount_t     *mp)
+       xfs_mount_t     *mp,
+       int             flags)
 {
-       xfs_icsb_sync_counters_flags(mp, 0);
+       spin_lock(&mp->m_sb_lock);
+       xfs_icsb_sync_counters_locked(mp, flags);
+       spin_unlock(&mp->m_sb_lock);
 }
 
 /*
@@ -2333,19 +2227,15 @@ xfs_icsb_sync_counters(
 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
                (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
 STATIC void
-xfs_icsb_balance_counter(
+xfs_icsb_balance_counter_locked(
        xfs_mount_t     *mp,
        xfs_sb_field_t  field,
-       int             flags,
        int             min_per_cpu)
 {
        uint64_t        count, resid;
        int             weight = num_online_cpus();
        uint64_t        min = (uint64_t)min_per_cpu;
 
-       if (!(flags & XFS_ICSB_SB_LOCKED))
-               spin_lock(&mp->m_sb_lock);
-
        /* disable counter and sync counter */
        xfs_icsb_disable_counter(mp, field);
 
@@ -2355,19 +2245,19 @@ xfs_icsb_balance_counter(
                count = mp->m_sb.sb_icount;
                resid = do_div(count, weight);
                if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
-                       goto out;
+                       return;
                break;
        case XFS_SBS_IFREE:
                count = mp->m_sb.sb_ifree;
                resid = do_div(count, weight);
                if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
-                       goto out;
+                       return;
                break;
        case XFS_SBS_FDBLOCKS:
                count = mp->m_sb.sb_fdblocks;
                resid = do_div(count, weight);
                if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
-                       goto out;
+                       return;
                break;
        default:
                BUG();
@@ -2376,9 +2266,17 @@ xfs_icsb_balance_counter(
        }
 
        xfs_icsb_enable_counter(mp, field, count, resid);
-out:
-       if (!(flags & XFS_ICSB_SB_LOCKED))
-               spin_unlock(&mp->m_sb_lock);
+}
+
+STATIC void
+xfs_icsb_balance_counter(
+       xfs_mount_t     *mp,
+       xfs_sb_field_t  fields,
+       int             min_per_cpu)
+{
+       spin_lock(&mp->m_sb_lock);
+       xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
+       spin_unlock(&mp->m_sb_lock);
 }
 
 STATIC int
@@ -2485,7 +2383,7 @@ slow_path:
         * we are done.
         */
        if (ret != ENOSPC)
-               xfs_icsb_balance_counter(mp, field, 0, 0);
+               xfs_icsb_balance_counter(mp, field, 0);
        xfs_icsb_unlock(mp);
        return ret;
 
@@ -2509,7 +2407,7 @@ balance_counter:
         * will either succeed through the fast path or slow path without
         * another balance operation being required.
         */
-       xfs_icsb_balance_counter(mp, field, 0, delta);
+       xfs_icsb_balance_counter(mp, field, delta);
        xfs_icsb_unlock(mp);
        goto again;
 }