X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=lib%2Frwsem-spinlock.c;h=ccf95bff798439c7afead0834370864ff41a735c;hb=e0c8233622cbd49d171bc57b60e725f2fb748750;hp=03b6097eb04e6ecbf39057799ed09f0e9f573fca;hpb=c4e05116a2c4d8187127dbf77ab790aa57a47388;p=safe%2Fjmp%2Flinux-2.6 diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 03b6097..ccf95bf 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -17,15 +17,37 @@ struct rwsem_waiter { #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; +int rwsem_is_locked(struct rw_semaphore *sem) +{ + int ret = 1; + unsigned long flags; + + if (spin_trylock_irqsave(&sem->wait_lock, flags)) { + ret = (sem->activity != 0); + spin_unlock_irqrestore(&sem->wait_lock, flags); + } + return ret; +} +EXPORT_SYMBOL(rwsem_is_locked); + /* * initialise the semaphore */ -void fastcall init_rwsem(struct rw_semaphore *sem) +void __init_rwsem(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) { +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)sem, sizeof(*sem)); + lockdep_init_map(&sem->dep_map, name, key, 0); +#endif sem->activity = 0; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); } +EXPORT_SYMBOL(__init_rwsem); /* * handle the lock release when processes blocked on it that can now run @@ -117,7 +139,7 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem) /* * get a read lock on the semaphore */ -void fastcall __sched __down_read(struct rw_semaphore *sem) +void __sched __down_read(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -160,7 +182,7 @@ void fastcall __sched __down_read(struct rw_semaphore *sem) /* * trylock for reading -- returns 1 if successful, 0 if contention */ -int fastcall __down_read_trylock(struct rw_semaphore *sem) +int __down_read_trylock(struct rw_semaphore *sem) { unsigned long flags; int ret = 0; @@ -183,7 +205,7 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) * get a write lock on the semaphore * - we increment the waiting count anyway to indicate an exclusive lock */ -void fastcall __sched __down_write(struct rw_semaphore *sem) +void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) { struct rwsem_waiter waiter; struct task_struct *tsk; @@ -223,10 +245,15 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) ; } +void __sched __down_write(struct rw_semaphore *sem) +{ + __down_write_nested(sem, 0); +} + /* * trylock for writing -- returns 1 if successful, 0 if contention */ -int fastcall __down_write_trylock(struct rw_semaphore *sem) +int __down_write_trylock(struct rw_semaphore *sem) { unsigned long flags; int ret = 0; @@ -247,7 +274,7 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) /* * release a read lock on the semaphore */ -void fastcall __up_read(struct rw_semaphore *sem) +void __up_read(struct rw_semaphore *sem) { unsigned long flags; @@ -262,7 +289,7 @@ void fastcall __up_read(struct rw_semaphore *sem) /* * release a write lock on the semaphore */ -void fastcall __up_write(struct rw_semaphore *sem) +void __up_write(struct rw_semaphore *sem) { unsigned long flags; @@ -279,7 +306,7 @@ void fastcall __up_write(struct rw_semaphore *sem) * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ -void fastcall __downgrade_write(struct rw_semaphore *sem) +void __downgrade_write(struct rw_semaphore *sem) { unsigned long flags; @@ -292,11 +319,3 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) spin_unlock_irqrestore(&sem->wait_lock, flags); } -EXPORT_SYMBOL(init_rwsem); -EXPORT_SYMBOL(__down_read); -EXPORT_SYMBOL(__down_read_trylock); -EXPORT_SYMBOL(__down_write); -EXPORT_SYMBOL(__down_write_trylock); -EXPORT_SYMBOL(__up_read); -EXPORT_SYMBOL(__up_write); -EXPORT_SYMBOL(__downgrade_write);