[ARM] nommu: remove fault-armv, mmap and mm-armv files from nommu build
[safe/jmp/linux-2.6] / kernel / mutex.c
index 7eb9606..7043db2 100644 (file)
@@ -81,15 +81,10 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__);
  */
 void fastcall __sched mutex_lock(struct mutex *lock)
 {
+       might_sleep();
        /*
         * The locking fastpath is the 1->0 transition from
         * 'unlocked' into 'locked' state.
-        *
-        * NOTE: if asm/mutex.h is included, then some architectures
-        * rely on mutex_lock() having _no other code_ here but this
-        * fastpath. That allows the assembly fastpath to do
-        * tail-merging optimizations. (If you want to put testcode
-        * here, do it under #ifndef CONFIG_MUTEX_DEBUG.)
         */
        __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
 }
@@ -115,8 +110,6 @@ void fastcall __sched mutex_unlock(struct mutex *lock)
        /*
         * The unlocking fastpath is the 0->1 transition from 'locked'
         * into 'unlocked' state:
-        *
-        * NOTE: no other code must be here - see mutex_lock() .
         */
        __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
 }
@@ -132,10 +125,11 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
        struct task_struct *task = current;
        struct mutex_waiter waiter;
        unsigned int old_val;
+       unsigned long flags;
 
        debug_mutex_init_waiter(&waiter);
 
-       spin_lock_mutex(&lock->wait_lock);
+       spin_lock_mutex(&lock->wait_lock, flags);
 
        debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip);
 
@@ -164,7 +158,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
                if (unlikely(state == TASK_INTERRUPTIBLE &&
                                                signal_pending(task))) {
                        mutex_remove_waiter(lock, &waiter, task->thread_info);
-                       spin_unlock_mutex(&lock->wait_lock);
+                       spin_unlock_mutex(&lock->wait_lock, flags);
 
                        debug_mutex_free_waiter(&waiter);
                        return -EINTR;
@@ -172,9 +166,9 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
                __set_task_state(task, state);
 
                /* didnt get the lock, go to sleep: */
-               spin_unlock_mutex(&lock->wait_lock);
+               spin_unlock_mutex(&lock->wait_lock, flags);
                schedule();
-               spin_lock_mutex(&lock->wait_lock);
+               spin_lock_mutex(&lock->wait_lock, flags);
        }
 
        /* got the lock - rejoice! */
@@ -185,7 +179,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
        if (likely(list_empty(&lock->wait_list)))
                atomic_set(&lock->count, 0);
 
-       spin_unlock_mutex(&lock->wait_lock);
+       spin_unlock_mutex(&lock->wait_lock, flags);
 
        debug_mutex_free_waiter(&waiter);
 
@@ -209,11 +203,12 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__)
 static fastcall noinline void
 __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
 {
-        struct mutex *lock = container_of(lock_count, struct mutex, count);
+       struct mutex *lock = container_of(lock_count, struct mutex, count);
+       unsigned long flags;
 
        DEBUG_WARN_ON(lock->owner != current_thread_info());
 
-       spin_lock_mutex(&lock->wait_lock);
+       spin_lock_mutex(&lock->wait_lock, flags);
 
        /*
         * some architectures leave the lock unlocked in the fastpath failure
@@ -238,7 +233,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
 
        debug_mutex_clear_owner(lock);
 
-       spin_unlock_mutex(&lock->wait_lock);
+       spin_unlock_mutex(&lock->wait_lock, flags);
 }
 
 /*
@@ -261,7 +256,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__);
  */
 int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
 {
-       /* NOTE: no other code must be here - see mutex_lock() */
+       might_sleep();
        return __mutex_fastpath_lock_retval
                        (&lock->count, __mutex_lock_interruptible_slowpath);
 }
@@ -283,9 +278,10 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__)
 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
 {
        struct mutex *lock = container_of(lock_count, struct mutex, count);
+       unsigned long flags;
        int prev;
 
-       spin_lock_mutex(&lock->wait_lock);
+       spin_lock_mutex(&lock->wait_lock, flags);
 
        prev = atomic_xchg(&lock->count, -1);
        if (likely(prev == 1))
@@ -294,7 +290,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
        if (likely(list_empty(&lock->wait_list)))
                atomic_set(&lock->count, 0);
 
-       spin_unlock_mutex(&lock->wait_lock);
+       spin_unlock_mutex(&lock->wait_lock, flags);
 
        return prev == 1;
 }
@@ -320,6 +316,3 @@ int fastcall mutex_trylock(struct mutex *lock)
 }
 
 EXPORT_SYMBOL(mutex_trylock);
-
-
-