netns xfrm: fix "ip xfrm state|policy count" misreport
[safe/jmp/linux-2.6] / lib / rwsem-spinlock.c
1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2  * generic spinlock implementation
3  *
4  * Copyright (c) 2001   David Howells (dhowells@redhat.com).
5  * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6  * - Derived also from comments by Linus
7  */
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11
12 struct rwsem_waiter {
13         struct list_head list;
14         struct task_struct *task;
15         unsigned int flags;
16 #define RWSEM_WAITING_FOR_READ  0x00000001
17 #define RWSEM_WAITING_FOR_WRITE 0x00000002
18 };
19
20 int rwsem_is_locked(struct rw_semaphore *sem)
21 {
22         int ret = 1;
23         unsigned long flags;
24
25         if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
26                 ret = (sem->activity != 0);
27                 spin_unlock_irqrestore(&sem->wait_lock, flags);
28         }
29         return ret;
30 }
31 EXPORT_SYMBOL(rwsem_is_locked);
32
33 /*
34  * initialise the semaphore
35  */
36 void __init_rwsem(struct rw_semaphore *sem, const char *name,
37                   struct lock_class_key *key)
38 {
39 #ifdef CONFIG_DEBUG_LOCK_ALLOC
40         /*
41          * Make sure we are not reinitializing a held semaphore:
42          */
43         debug_check_no_locks_freed((void *)sem, sizeof(*sem));
44         lockdep_init_map(&sem->dep_map, name, key, 0);
45 #endif
46         sem->activity = 0;
47         spin_lock_init(&sem->wait_lock);
48         INIT_LIST_HEAD(&sem->wait_list);
49 }
50 EXPORT_SYMBOL(__init_rwsem);
51
52 /*
53  * handle the lock release when processes blocked on it that can now run
54  * - if we come here, then:
55  *   - the 'active count' _reached_ zero
56  *   - the 'waiting count' is non-zero
57  * - the spinlock must be held by the caller
58  * - woken process blocks are discarded from the list after having task zeroed
59  * - writers are only woken if wakewrite is non-zero
60  */
61 static inline struct rw_semaphore *
62 __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
63 {
64         struct rwsem_waiter *waiter;
65         struct task_struct *tsk;
66         int woken;
67
68         waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
69
70         if (!wakewrite) {
71                 if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
72                         goto out;
73                 goto dont_wake_writers;
74         }
75
76         /* if we are allowed to wake writers try to grant a single write lock
77          * if there's a writer at the front of the queue
78          * - we leave the 'waiting count' incremented to signify potential
79          *   contention
80          */
81         if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
82                 sem->activity = -1;
83                 list_del(&waiter->list);
84                 tsk = waiter->task;
85                 /* Don't touch waiter after ->task has been NULLed */
86                 smp_mb();
87                 waiter->task = NULL;
88                 wake_up_process(tsk);
89                 put_task_struct(tsk);
90                 goto out;
91         }
92
93         /* grant an infinite number of read locks to the front of the queue */
94  dont_wake_writers:
95         woken = 0;
96         while (waiter->flags & RWSEM_WAITING_FOR_READ) {
97                 struct list_head *next = waiter->list.next;
98
99                 list_del(&waiter->list);
100                 tsk = waiter->task;
101                 smp_mb();
102                 waiter->task = NULL;
103                 wake_up_process(tsk);
104                 put_task_struct(tsk);
105                 woken++;
106                 if (list_empty(&sem->wait_list))
107                         break;
108                 waiter = list_entry(next, struct rwsem_waiter, list);
109         }
110
111         sem->activity += woken;
112
113  out:
114         return sem;
115 }
116
117 /*
118  * wake a single writer
119  */
120 static inline struct rw_semaphore *
121 __rwsem_wake_one_writer(struct rw_semaphore *sem)
122 {
123         struct rwsem_waiter *waiter;
124         struct task_struct *tsk;
125
126         sem->activity = -1;
127
128         waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
129         list_del(&waiter->list);
130
131         tsk = waiter->task;
132         smp_mb();
133         waiter->task = NULL;
134         wake_up_process(tsk);
135         put_task_struct(tsk);
136         return sem;
137 }
138
139 /*
140  * get a read lock on the semaphore
141  */
142 void __sched __down_read(struct rw_semaphore *sem)
143 {
144         struct rwsem_waiter waiter;
145         struct task_struct *tsk;
146
147         spin_lock_irq(&sem->wait_lock);
148
149         if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
150                 /* granted */
151                 sem->activity++;
152                 spin_unlock_irq(&sem->wait_lock);
153                 goto out;
154         }
155
156         tsk = current;
157         set_task_state(tsk, TASK_UNINTERRUPTIBLE);
158
159         /* set up my own style of waitqueue */
160         waiter.task = tsk;
161         waiter.flags = RWSEM_WAITING_FOR_READ;
162         get_task_struct(tsk);
163
164         list_add_tail(&waiter.list, &sem->wait_list);
165
166         /* we don't need to touch the semaphore struct anymore */
167         spin_unlock_irq(&sem->wait_lock);
168
169         /* wait to be given the lock */
170         for (;;) {
171                 if (!waiter.task)
172                         break;
173                 schedule();
174                 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
175         }
176
177         tsk->state = TASK_RUNNING;
178  out:
179         ;
180 }
181
182 /*
183  * trylock for reading -- returns 1 if successful, 0 if contention
184  */
185 int __down_read_trylock(struct rw_semaphore *sem)
186 {
187         unsigned long flags;
188         int ret = 0;
189
190
191         spin_lock_irqsave(&sem->wait_lock, flags);
192
193         if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
194                 /* granted */
195                 sem->activity++;
196                 ret = 1;
197         }
198
199         spin_unlock_irqrestore(&sem->wait_lock, flags);
200
201         return ret;
202 }
203
204 /*
205  * get a write lock on the semaphore
206  * - we increment the waiting count anyway to indicate an exclusive lock
207  */
208 void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
209 {
210         struct rwsem_waiter waiter;
211         struct task_struct *tsk;
212
213         spin_lock_irq(&sem->wait_lock);
214
215         if (sem->activity == 0 && list_empty(&sem->wait_list)) {
216                 /* granted */
217                 sem->activity = -1;
218                 spin_unlock_irq(&sem->wait_lock);
219                 goto out;
220         }
221
222         tsk = current;
223         set_task_state(tsk, TASK_UNINTERRUPTIBLE);
224
225         /* set up my own style of waitqueue */
226         waiter.task = tsk;
227         waiter.flags = RWSEM_WAITING_FOR_WRITE;
228         get_task_struct(tsk);
229
230         list_add_tail(&waiter.list, &sem->wait_list);
231
232         /* we don't need to touch the semaphore struct anymore */
233         spin_unlock_irq(&sem->wait_lock);
234
235         /* wait to be given the lock */
236         for (;;) {
237                 if (!waiter.task)
238                         break;
239                 schedule();
240                 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
241         }
242
243         tsk->state = TASK_RUNNING;
244  out:
245         ;
246 }
247
248 void __sched __down_write(struct rw_semaphore *sem)
249 {
250         __down_write_nested(sem, 0);
251 }
252
253 /*
254  * trylock for writing -- returns 1 if successful, 0 if contention
255  */
256 int __down_write_trylock(struct rw_semaphore *sem)
257 {
258         unsigned long flags;
259         int ret = 0;
260
261         spin_lock_irqsave(&sem->wait_lock, flags);
262
263         if (sem->activity == 0 && list_empty(&sem->wait_list)) {
264                 /* granted */
265                 sem->activity = -1;
266                 ret = 1;
267         }
268
269         spin_unlock_irqrestore(&sem->wait_lock, flags);
270
271         return ret;
272 }
273
274 /*
275  * release a read lock on the semaphore
276  */
277 void __up_read(struct rw_semaphore *sem)
278 {
279         unsigned long flags;
280
281         spin_lock_irqsave(&sem->wait_lock, flags);
282
283         if (--sem->activity == 0 && !list_empty(&sem->wait_list))
284                 sem = __rwsem_wake_one_writer(sem);
285
286         spin_unlock_irqrestore(&sem->wait_lock, flags);
287 }
288
289 /*
290  * release a write lock on the semaphore
291  */
292 void __up_write(struct rw_semaphore *sem)
293 {
294         unsigned long flags;
295
296         spin_lock_irqsave(&sem->wait_lock, flags);
297
298         sem->activity = 0;
299         if (!list_empty(&sem->wait_list))
300                 sem = __rwsem_do_wake(sem, 1);
301
302         spin_unlock_irqrestore(&sem->wait_lock, flags);
303 }
304
305 /*
306  * downgrade a write lock into a read lock
307  * - just wake up any readers at the front of the queue
308  */
309 void __downgrade_write(struct rw_semaphore *sem)
310 {
311         unsigned long flags;
312
313         spin_lock_irqsave(&sem->wait_lock, flags);
314
315         sem->activity = 1;
316         if (!list_empty(&sem->wait_list))
317                 sem = __rwsem_do_wake(sem, 0);
318
319         spin_unlock_irqrestore(&sem->wait_lock, flags);
320 }
321