xfs: remove nr_to_write writeback windup.
[safe/jmp/linux-2.6] / kernel / mutex.c
index ff42e97..4c0b7b3 100644 (file)
@@ -89,7 +89,7 @@ __mutex_lock_slowpath(atomic_t *lock_count);
  *
  * This function is similar to (but not equivalent to) down().
  */
-void inline __sched mutex_lock(struct mutex *lock)
+void __sched mutex_lock(struct mutex *lock)
 {
        might_sleep();
        /*
@@ -148,7 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 
        preempt_disable();
        mutex_acquire(&lock->dep_map, subclass, 0, ip);
-#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
+
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
        /*
         * Optimistic spinning.
         *
@@ -171,9 +172,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                struct thread_info *owner;
 
                /*
-                * If there are pending waiters, join them.
+                * If we own the BKL, then don't spin. The owner of
+                * the mutex might be waiting on us to release the BKL.
                 */
-               if (!list_empty(&lock->wait_list))
+               if (unlikely(current->lock_depth >= 0))
                        break;
 
                /*
@@ -184,6 +186,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                if (owner && !mutex_spin_on_owner(lock, owner))
                        break;
 
+               if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
+                       lock_acquired(&lock->dep_map, ip);
+                       mutex_set_owner(lock);
+                       preempt_enable();
+                       return 0;
+               }
+
                /*
                 * When there's no owner, we might have preempted between the
                 * owner acquiring the lock and setting the owner field. If
@@ -193,13 +202,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                if (!owner && (need_resched() || rt_task(task)))
                        break;
 
-               if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
-                       lock_acquired(&lock->dep_map, ip);
-                       mutex_set_owner(lock);
-                       preempt_enable();
-                       return 0;
-               }
-
                /*
                 * The cpu_relax() call is a compiler barrier which forces
                 * everything in this loop to be re-loaded. We don't need
@@ -254,7 +256,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 
                /* didnt get the lock, go to sleep: */
                spin_unlock_mutex(&lock->wait_lock, flags);
-               __schedule();
+               preempt_enable_no_resched();
+               schedule();
+               preempt_disable();
                spin_lock_mutex(&lock->wait_lock, flags);
        }
 
@@ -476,5 +480,28 @@ int __sched mutex_trylock(struct mutex *lock)
 
        return ret;
 }
-
 EXPORT_SYMBOL(mutex_trylock);
+
+/**
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
+ * @cnt: the atomic which we are to dec
+ * @lock: the mutex to return holding if we dec to 0
+ *
+ * return true and hold lock if we dec to 0, return false otherwise
+ */
+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
+{
+       /* dec if we can't possibly hit 0 */
+       if (atomic_add_unless(cnt, -1, 1))
+               return 0;
+       /* we might hit 0, so take the lock */
+       mutex_lock(lock);
+       if (!atomic_dec_and_test(cnt)) {
+               /* when we actually did the dec, we didn't hit 0 */
+               mutex_unlock(lock);
+               return 0;
+       }
+       /* we hit 0, and we hold the lock */
+       return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);