fs/xfs/quota: Add missing mutex_unlock
[safe/jmp/linux-2.6] / fs / xfs / xfs_trans_ail.c
index 0cd47a7..e799824 100644 (file)
@@ -79,23 +79,23 @@ xfs_trans_ail_tail(
  * the push is run asynchronously in a separate thread, so we return the tail
  * of the log right now instead of the tail after the push. This means we will
  * either continue right away, or we will sleep waiting on the async thread to
- * do it's work.
+ * do its work.
  *
  * We do this unlocked - we only need to know whether there is anything in the
  * AIL at the time we are called. We don't need to access the contents of
  * any of the objects, so the lock is not needed.
  */
 void
-xfs_trans_push_ail(
-       xfs_mount_t             *mp,
-       xfs_lsn_t               threshold_lsn)
+xfs_trans_ail_push(
+       struct xfs_ail  *ailp,
+       xfs_lsn_t       threshold_lsn)
 {
-       xfs_log_item_t          *lip;
+       xfs_log_item_t  *lip;
 
-       lip = xfs_ail_min(mp->m_ail);
-       if (lip && !XFS_FORCED_SHUTDOWN(mp)) {
-               if (XFS_LSN_CMP(threshold_lsn, mp->m_ail->xa_target) > 0)
-                       xfsaild_wakeup(mp->m_ail, threshold_lsn);
+       lip = xfs_ail_min(ailp);
+       if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
+               if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0)
+                       xfsaild_wakeup(ailp, threshold_lsn);
        }
 }
 
@@ -160,7 +160,7 @@ xfs_trans_ail_cursor_next(
 /*
  * Now that the traversal is complete, we need to remove the cursor
  * from the list of traversing cursors. Avoid removing the embedded
- * push cursor, but use the fact it is alway present to make the
+ * push cursor, but use the fact it is always present to make the
  * list deletion simple.
  */
 void
@@ -228,7 +228,7 @@ xfs_trans_ail_cursor_first(
 
        list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
                if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
-                       break;
+                       goto out;
        }
        lip = NULL;
 out:
@@ -237,14 +237,15 @@ out:
 }
 
 /*
- * Function that does the work of pushing on the AIL
+ * xfsaild_push does the work of pushing on the AIL.  Returning a timeout of
+ * zero indicates that the caller should sleep until woken.
  */
 long
 xfsaild_push(
        struct xfs_ail  *ailp,
        xfs_lsn_t       *last_lsn)
 {
-       long            tout = 1000; /* milliseconds */
+       long            tout = 0;
        xfs_lsn_t       last_pushed_lsn = *last_lsn;
        xfs_lsn_t       target =  ailp->xa_target;
        xfs_lsn_t       lsn;
@@ -252,6 +253,7 @@ xfsaild_push(
        int             flush_log, count, stuck;
        xfs_mount_t     *mp = ailp->xa_mount;
        struct xfs_ail_cursor   *cur = &ailp->xa_cursors;
+       int             push_xfsbufd = 0;
 
        spin_lock(&ailp->xa_lock);
        xfs_trans_ail_cursor_init(ailp, cur);
@@ -262,7 +264,7 @@ xfsaild_push(
                 */
                xfs_trans_ail_cursor_done(ailp, cur);
                spin_unlock(&ailp->xa_lock);
-               last_pushed_lsn = 0;
+               *last_lsn = 0;
                return tout;
        }
 
@@ -279,7 +281,6 @@ xfsaild_push(
         * prevents use from spinning when we can't do anything or there is
         * lots of contention on the AIL lists.
         */
-       tout = 10;
        lsn = lip->li_lsn;
        flush_log = stuck = count = 0;
        while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
@@ -308,6 +309,7 @@ xfsaild_push(
                        XFS_STATS_INC(xs_push_ail_pushbuf);
                        IOP_PUSHBUF(lip);
                        last_pushed_lsn = lsn;
+                       push_xfsbufd = 1;
                        break;
 
                case XFS_ITEM_PINNED:
@@ -322,12 +324,6 @@ xfsaild_push(
                        stuck++;
                        break;
 
-               case XFS_ITEM_FLUSHING:
-                       XFS_STATS_INC(xs_push_ail_flushing);
-                       last_pushed_lsn = lsn;
-                       stuck++;
-                       break;
-
                default:
                        ASSERT(0);
                        break;
@@ -371,19 +367,24 @@ xfsaild_push(
                 * move forward in the AIL.
                 */
                XFS_STATS_INC(xs_push_ail_flush);
-               xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+               xfs_log_force(mp, 0);
+       }
+
+       if (push_xfsbufd) {
+               /* we've got delayed write buffers to flush */
+               wake_up_process(mp->m_ddev_targp->bt_task);
        }
 
        if (!count) {
                /* We're past our target or empty, so idle */
-               tout = 1000;
+               last_pushed_lsn = 0;
        } else if (XFS_LSN_CMP(lsn, target) >= 0) {
                /*
                 * We reached the target so wait a bit longer for I/O to
                 * complete and remove pushed items from the AIL before we
                 * start the next scan from the start of the AIL.
                 */
-               tout += 20;
+               tout = 50;
                last_pushed_lsn = 0;
        } else if ((stuck * 100) / count > 90) {
                /*
@@ -395,11 +396,14 @@ xfsaild_push(
                 * Backoff a bit more to allow some I/O to complete before
                 * continuing from where we were.
                 */
-               tout += 10;
+               tout = 20;
+       } else {
+               /* more to do, but wait a short while before continuing */
+               tout = 10;
        }
        *last_lsn = last_pushed_lsn;
        return tout;
-}      /* xfsaild_push */
+}
 
 
 /*
@@ -412,7 +416,7 @@ xfsaild_push(
  */
 void
 xfs_trans_unlocked_item(
-       xfs_mount_t     *mp,
+       struct xfs_ail  *ailp,
        xfs_log_item_t  *lip)
 {
        xfs_log_item_t  *min_lip;
@@ -424,7 +428,7 @@ xfs_trans_unlocked_item(
         * over some potentially valid data.
         */
        if (!(lip->li_flags & XFS_LI_IN_AIL) ||
-           XFS_FORCED_SHUTDOWN(mp)) {
+           XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
                return;
        }
 
@@ -440,10 +444,10 @@ xfs_trans_unlocked_item(
         * the call to xfs_log_move_tail() doesn't do anything if there's
         * not enough free space to wake people up so we're safe calling it.
         */
-       min_lip = xfs_ail_min(mp->m_ail);
+       min_lip = xfs_ail_min(ailp);
 
        if (min_lip == lip)
-               xfs_log_move_tail(mp, 1);
+               xfs_log_move_tail(ailp->xa_mount, 1);
 }      /* xfs_trans_unlocked_item */
 
 
@@ -460,14 +464,14 @@ xfs_trans_unlocked_item(
  * is dropped before returning.
  */
 void
-xfs_trans_update_ail(
-       xfs_mount_t     *mp,
+xfs_trans_ail_update(
+       struct xfs_ail  *ailp,
        xfs_log_item_t  *lip,
        xfs_lsn_t       lsn) __releases(ailp->xa_lock)
 {
-       struct xfs_ail          *ailp = mp->m_ail;
        xfs_log_item_t          *dlip = NULL;
        xfs_log_item_t          *mlip;  /* ptr to minimum lip */
+       xfs_lsn_t               tail_lsn;
 
        mlip = xfs_ail_min(ailp);
 
@@ -484,8 +488,16 @@ xfs_trans_update_ail(
 
        if (mlip == dlip) {
                mlip = xfs_ail_min(ailp);
+               /*
+                * It is not safe to access mlip after the AIL lock is
+                * dropped, so we must get a copy of li_lsn before we do
+                * so.  This is especially important on 32-bit platforms
+                * where accessing and updating 64-bit values like li_lsn
+                * is not atomic.
+                */
+               tail_lsn = mlip->li_lsn;
                spin_unlock(&ailp->xa_lock);
-               xfs_log_move_tail(mp, mlip->li_lsn);
+               xfs_log_move_tail(ailp->xa_mount, tail_lsn);
        } else {
                spin_unlock(&ailp->xa_lock);
        }
@@ -509,13 +521,13 @@ xfs_trans_update_ail(
  * is dropped before returning.
  */
 void
-xfs_trans_delete_ail(
-       xfs_mount_t     *mp,
+xfs_trans_ail_delete(
+       struct xfs_ail  *ailp,
        xfs_log_item_t  *lip) __releases(ailp->xa_lock)
 {
-       struct xfs_ail          *ailp = mp->m_ail;
        xfs_log_item_t          *dlip;
        xfs_log_item_t          *mlip;
+       xfs_lsn_t               tail_lsn;
 
        if (lip->li_flags & XFS_LI_IN_AIL) {
                mlip = xfs_ail_min(ailp);
@@ -529,8 +541,16 @@ xfs_trans_delete_ail(
 
                if (mlip == dlip) {
                        mlip = xfs_ail_min(ailp);
+                       /*
+                        * It is not safe to access mlip after the AIL lock
+                        * is dropped, so we must get a copy of li_lsn
+                        * before we do so.  This is especially important
+                        * on 32-bit platforms where accessing and updating
+                        * 64-bit values like li_lsn is not atomic.
+                        */
+                       tail_lsn = mlip ? mlip->li_lsn : 0;
                        spin_unlock(&ailp->xa_lock);
-                       xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0));
+                       xfs_log_move_tail(ailp->xa_mount, tail_lsn);
                } else {
                        spin_unlock(&ailp->xa_lock);
                }
@@ -540,6 +560,8 @@ xfs_trans_delete_ail(
                 * If the file system is not being shutdown, we are in
                 * serious trouble if we get to this stage.
                 */
+               struct xfs_mount        *mp = ailp->xa_mount;
+
                spin_unlock(&ailp->xa_lock);
                if (!XFS_FORCED_SHUTDOWN(mp)) {
                        xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,