locks: clarify posix_locks_deadlock
[safe/jmp/linux-2.6] / include / asm-x86 / rwsem.h
1 /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
2  *
3  * Written by David Howells (dhowells@redhat.com).
4  *
5  * Derived from asm-x86/semaphore.h
6  *
7  *
8  * The MSW of the count is the negated number of active writers and waiting
9  * lockers, and the LSW is the total number of active locks
10  *
11  * The lock count is initialized to 0 (no active and no waiting lockers).
12  *
13  * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
14  * uncontended lock. This can be determined because XADD returns the old value.
15  * Readers increment by 1 and see a positive value when uncontended, negative
16  * if there are writers (and maybe) readers waiting (in which case it goes to
17  * sleep).
18  *
19  * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
20  * be extended to 65534 by manually checking the whole MSW rather than relying
21  * on the S flag.
22  *
23  * The value of ACTIVE_BIAS supports up to 65535 active processes.
24  *
25  * This should be totally fair - if anything is waiting, a process that wants a
26  * lock will go to the back of the queue. When the currently active lock is
27  * released, if there's a writer at the front of the queue, then that and only
28  * that will be woken up; if there's a bunch of consequtive readers at the
29  * front, then they'll all be woken up, but no other readers will be.
30  */
31
32 #ifndef _I386_RWSEM_H
33 #define _I386_RWSEM_H
34
35 #ifndef _LINUX_RWSEM_H
36 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
37 #endif
38
39 #ifdef __KERNEL__
40
41 #include <linux/list.h>
42 #include <linux/spinlock.h>
43 #include <linux/lockdep.h>
44
45 struct rwsem_waiter;
46
47 extern asmregparm struct rw_semaphore *
48  rwsem_down_read_failed(struct rw_semaphore *sem);
49 extern asmregparm struct rw_semaphore *
50  rwsem_down_write_failed(struct rw_semaphore *sem);
51 extern asmregparm struct rw_semaphore *
52  rwsem_wake(struct rw_semaphore *);
53 extern asmregparm struct rw_semaphore *
54  rwsem_downgrade_wake(struct rw_semaphore *sem);
55
56 /*
57  * the semaphore definition
58  */
59 struct rw_semaphore {
60         signed long             count;
61 #define RWSEM_UNLOCKED_VALUE            0x00000000
62 #define RWSEM_ACTIVE_BIAS               0x00000001
63 #define RWSEM_ACTIVE_MASK               0x0000ffff
64 #define RWSEM_WAITING_BIAS              (-0x00010000)
65 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
66 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
67         spinlock_t              wait_lock;
68         struct list_head        wait_list;
69 #ifdef CONFIG_DEBUG_LOCK_ALLOC
70         struct lockdep_map dep_map;
71 #endif
72 };
73
74 #ifdef CONFIG_DEBUG_LOCK_ALLOC
75 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
76 #else
77 # define __RWSEM_DEP_MAP_INIT(lockname)
78 #endif
79
80
81 #define __RWSEM_INITIALIZER(name) \
82 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
83   LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
84
85 #define DECLARE_RWSEM(name) \
86         struct rw_semaphore name = __RWSEM_INITIALIZER(name)
87
88 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
89                          struct lock_class_key *key);
90
91 #define init_rwsem(sem)                                         \
92 do {                                                            \
93         static struct lock_class_key __key;                     \
94                                                                 \
95         __init_rwsem((sem), #sem, &__key);                      \
96 } while (0)
97
98 /*
99  * lock for reading
100  */
101 static inline void __down_read(struct rw_semaphore *sem)
102 {
103         __asm__ __volatile__(
104                 "# beginning down_read\n\t"
105 LOCK_PREFIX     "  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
106                 "  jns        1f\n"
107                 "  call call_rwsem_down_read_failed\n"
108                 "1:\n\t"
109                 "# ending down_read\n\t"
110                 : "+m" (sem->count)
111                 : "a" (sem)
112                 : "memory", "cc");
113 }
114
115 /*
116  * trylock for reading -- returns 1 if successful, 0 if contention
117  */
118 static inline int __down_read_trylock(struct rw_semaphore *sem)
119 {
120         __s32 result, tmp;
121         __asm__ __volatile__(
122                 "# beginning __down_read_trylock\n\t"
123                 "  movl      %0,%1\n\t"
124                 "1:\n\t"
125                 "  movl      %1,%2\n\t"
126                 "  addl      %3,%2\n\t"
127                 "  jle       2f\n\t"
128 LOCK_PREFIX     "  cmpxchgl  %2,%0\n\t"
129                 "  jnz       1b\n\t"
130                 "2:\n\t"
131                 "# ending __down_read_trylock\n\t"
132                 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
133                 : "i" (RWSEM_ACTIVE_READ_BIAS)
134                 : "memory", "cc");
135         return result>=0 ? 1 : 0;
136 }
137
138 /*
139  * lock for writing
140  */
141 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
142 {
143         int tmp;
144
145         tmp = RWSEM_ACTIVE_WRITE_BIAS;
146         __asm__ __volatile__(
147                 "# beginning down_write\n\t"
148 LOCK_PREFIX     "  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
149                 "  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
150                 "  jz        1f\n"
151                 "  call call_rwsem_down_write_failed\n"
152                 "1:\n"
153                 "# ending down_write"
154                 : "+m" (sem->count), "=d" (tmp)
155                 : "a" (sem), "1" (tmp)
156                 : "memory", "cc");
157 }
158
159 static inline void __down_write(struct rw_semaphore *sem)
160 {
161         __down_write_nested(sem, 0);
162 }
163
164 /*
165  * trylock for writing -- returns 1 if successful, 0 if contention
166  */
167 static inline int __down_write_trylock(struct rw_semaphore *sem)
168 {
169         signed long ret = cmpxchg(&sem->count,
170                                   RWSEM_UNLOCKED_VALUE, 
171                                   RWSEM_ACTIVE_WRITE_BIAS);
172         if (ret == RWSEM_UNLOCKED_VALUE)
173                 return 1;
174         return 0;
175 }
176
177 /*
178  * unlock after reading
179  */
180 static inline void __up_read(struct rw_semaphore *sem)
181 {
182         __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
183         __asm__ __volatile__(
184                 "# beginning __up_read\n\t"
185 LOCK_PREFIX     "  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
186                 "  jns        1f\n\t"
187                 "  call call_rwsem_wake\n"
188                 "1:\n"
189                 "# ending __up_read\n"
190                 : "+m" (sem->count), "=d" (tmp)
191                 : "a" (sem), "1" (tmp)
192                 : "memory", "cc");
193 }
194
195 /*
196  * unlock after writing
197  */
198 static inline void __up_write(struct rw_semaphore *sem)
199 {
200         __asm__ __volatile__(
201                 "# beginning __up_write\n\t"
202                 "  movl      %2,%%edx\n\t"
203 LOCK_PREFIX     "  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
204                 "  jz       1f\n"
205                 "  call call_rwsem_wake\n"
206                 "1:\n\t"
207                 "# ending __up_write\n"
208                 : "+m" (sem->count)
209                 : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
210                 : "memory", "cc", "edx");
211 }
212
213 /*
214  * downgrade write lock to read lock
215  */
216 static inline void __downgrade_write(struct rw_semaphore *sem)
217 {
218         __asm__ __volatile__(
219                 "# beginning __downgrade_write\n\t"
220 LOCK_PREFIX     "  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
221                 "  jns       1f\n\t"
222                 "  call call_rwsem_downgrade_wake\n"
223                 "1:\n\t"
224                 "# ending __downgrade_write\n"
225                 : "+m" (sem->count)
226                 : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
227                 : "memory", "cc");
228 }
229
230 /*
231  * implement atomic add functionality
232  */
233 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
234 {
235         __asm__ __volatile__(
236 LOCK_PREFIX     "addl %1,%0"
237                 : "+m" (sem->count)
238                 : "ir" (delta));
239 }
240
241 /*
242  * implement exchange and add functionality
243  */
244 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
245 {
246         int tmp = delta;
247
248         __asm__ __volatile__(
249 LOCK_PREFIX     "xadd %0,%1"
250                 : "+r" (tmp), "+m" (sem->count)
251                 : : "memory");
252
253         return tmp+delta;
254 }
255
256 static inline int rwsem_is_locked(struct rw_semaphore *sem)
257 {
258         return (sem->count != 0);
259 }
260
261 #endif /* __KERNEL__ */
262 #endif /* _I386_RWSEM_H */