git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
tty.h: remove dead define
[safe/jmp/linux-2.6]
/
kernel
/
mutex.c
diff --git
a/kernel/mutex.c
b/kernel/mutex.c
index
3aad0b7
..
d7fe50c
100644
(file)
--- a/
kernel/mutex.c
+++ b/
kernel/mutex.c
@@
-39,17
+39,19
@@
*
* It is not allowed to initialize an already locked mutex.
*/
*
* It is not allowed to initialize an already locked mutex.
*/
-__always_inline void fastcall __mutex_init(struct mutex *lock, const char *name)
+void
+__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
atomic_set(&lock->count, 1);
spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
{
atomic_set(&lock->count, 1);
spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
- debug_mutex_init(lock, name);
+ debug_mutex_init(lock, name
, key
);
}
EXPORT_SYMBOL(__mutex_init);
}
EXPORT_SYMBOL(__mutex_init);
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* We split the mutex lock/unlock logic into separate fastpath and
* slowpath functions, to reduce the register pressure on the fastpath.
/*
* We split the mutex lock/unlock logic into separate fastpath and
* slowpath functions, to reduce the register pressure on the fastpath.
@@
-91,6
+93,7
@@
void inline fastcall __sched mutex_lock(struct mutex *lock)
}
EXPORT_SYMBOL(mutex_lock);
}
EXPORT_SYMBOL(mutex_lock);
+#endif
static void fastcall noinline __sched
__mutex_unlock_slowpath(atomic_t *lock_count);
static void fastcall noinline __sched
__mutex_unlock_slowpath(atomic_t *lock_count);
@@
-121,7
+124,8
@@
EXPORT_SYMBOL(mutex_unlock);
* Lock a mutex (possibly interruptible), slowpath:
*/
static inline int __sched
* Lock a mutex (possibly interruptible), slowpath:
*/
static inline int __sched
-__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
+__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ unsigned long ip)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
{
struct task_struct *task = current;
struct mutex_waiter waiter;
@@
-131,12
+135,19
@@
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);
- debug_mutex_add_waiter(lock, &waiter, task->thread_info);
+ mutex_acquire(&lock->dep_map, subclass, 0, ip);
+ debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
+ old_val = atomic_xchg(&lock->count, -1);
+ if (old_val == 1)
+ goto done;
+
+ lock_contended(&lock->dep_map, ip);
+
for (;;) {
/*
* Lets try to take the lock again - this is needed even if
for (;;) {
/*
* Lets try to take the lock again - this is needed even if
@@
-157,7
+168,8
@@
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
*/
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
*/
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
- mutex_remove_waiter(lock, &waiter, task->thread_info);
+ mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+ mutex_release(&lock->dep_map, 1, ip);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
@@
-171,9
+183,11
@@
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
spin_lock_mutex(&lock->wait_lock, flags);
}
spin_lock_mutex(&lock->wait_lock, flags);
}
+done:
+ lock_acquired(&lock->dep_map);
/* got the lock - rejoice! */
/* got the lock - rejoice! */
- mutex_remove_waiter(lock, &waiter, task
->thread_info
);
- debug_mutex_set_owner(lock, task
->thread_info
);
+ mutex_remove_waiter(lock, &waiter, task
_thread_info(task)
);
+ debug_mutex_set_owner(lock, task
_thread_info(task)
);
/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
@@
-186,24
+200,37
@@
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
return 0;
}
return 0;
}
-static void fastcall noinline __sched
-__mutex_lock_slowpath(atomic_t *lock_count)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __sched
+mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
+ might_sleep();
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
+}
+
+EXPORT_SYMBOL_GPL(mutex_lock_nested);
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
+int __sched
+mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
+{
+ might_sleep();
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
}
}
+EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
+#endif
+
/*
* Release the lock, slowpath:
*/
static fastcall inline void
/*
* Release the lock, slowpath:
*/
static fastcall inline void
-__mutex_unlock_common_slowpath(atomic_t *lock_count)
+__mutex_unlock_common_slowpath(atomic_t *lock_count
, int nested
)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
unsigned long flags;
spin_lock_mutex(&lock->wait_lock, flags);
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
unsigned long flags;
spin_lock_mutex(&lock->wait_lock, flags);
+ mutex_release(&lock->dep_map, nested, _RET_IP_);
debug_mutex_unlock(lock);
/*
debug_mutex_unlock(lock);
/*
@@
-236,9
+263,10
@@
__mutex_unlock_common_slowpath(atomic_t *lock_count)
static fastcall noinline void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
static fastcall noinline void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
- __mutex_unlock_common_slowpath(lock_count);
+ __mutex_unlock_common_slowpath(lock_count
, 1
);
}
}
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock().
/*
* Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock().
@@
-266,13
+294,22
@@
int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock_interruptible);
EXPORT_SYMBOL(mutex_lock_interruptible);
+static void fastcall noinline __sched
+__mutex_lock_slowpath(atomic_t *lock_count)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
+}
+
static int fastcall noinline __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
static int fastcall noinline __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0
, _RET_IP_
);
}
}
+#endif
/*
* Spinlock based trylock, we take the spinlock and check whether we
/*
* Spinlock based trylock, we take the spinlock and check whether we
@@
-287,9
+324,10
@@
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
spin_lock_mutex(&lock->wait_lock, flags);
prev = atomic_xchg(&lock->count, -1);
spin_lock_mutex(&lock->wait_lock, flags);
prev = atomic_xchg(&lock->count, -1);
- if (likely(prev == 1))
+ if (likely(prev == 1))
{
debug_mutex_set_owner(lock, current_thread_info());
debug_mutex_set_owner(lock, current_thread_info());
-
+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ }
/* Set it back to 0 if there are no waiters: */
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
/* Set it back to 0 if there are no waiters: */
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
@@
-313,7
+351,7
@@
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
* This function must not be used in interrupt context. The
* mutex must be released by the same task that acquired it.
*/
* This function must not be used in interrupt context. The
* mutex must be released by the same task that acquired it.
*/
-int fastcall mutex_trylock(struct mutex *lock)
+int fastcall
__sched
mutex_trylock(struct mutex *lock)
{
return __mutex_fastpath_trylock(&lock->count,
__mutex_trylock_slowpath);
{
return __mutex_fastpath_trylock(&lock->count,
__mutex_trylock_slowpath);