string: factorize skip_spaces and export it to be generally available
[safe/jmp/linux-2.6] / include / linux / spinlock_api_smp.h
1 #ifndef __LINUX_SPINLOCK_API_SMP_H
2 #define __LINUX_SPINLOCK_API_SMP_H
3
4 #ifndef __LINUX_SPINLOCK_H
5 # error "please don't include this file directly"
6 #endif
7
8 /*
9  * include/linux/spinlock_api_smp.h
10  *
11  * spinlock API declarations on SMP (and debug)
12  * (implemented in kernel/spinlock.c)
13  *
14  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15  * Released under the General Public License (GPL).
16  */
17
18 int in_lock_functions(unsigned long addr);
19
20 #define assert_spin_locked(x)   BUG_ON(!spin_is_locked(x))
21
22 void __lockfunc _spin_lock(spinlock_t *lock)            __acquires(lock);
23 void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
24                                                         __acquires(lock);
25 void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
26                                                         __acquires(lock);
27 void __lockfunc _read_lock(rwlock_t *lock)              __acquires(lock);
28 void __lockfunc _write_lock(rwlock_t *lock)             __acquires(lock);
29 void __lockfunc _spin_lock_bh(spinlock_t *lock)         __acquires(lock);
30 void __lockfunc _read_lock_bh(rwlock_t *lock)           __acquires(lock);
31 void __lockfunc _write_lock_bh(rwlock_t *lock)          __acquires(lock);
32 void __lockfunc _spin_lock_irq(spinlock_t *lock)        __acquires(lock);
33 void __lockfunc _read_lock_irq(rwlock_t *lock)          __acquires(lock);
34 void __lockfunc _write_lock_irq(rwlock_t *lock)         __acquires(lock);
35 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
36                                                         __acquires(lock);
37 unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
38                                                         __acquires(lock);
39 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
40                                                         __acquires(lock);
41 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
42                                                         __acquires(lock);
43 int __lockfunc _spin_trylock(spinlock_t *lock);
44 int __lockfunc _read_trylock(rwlock_t *lock);
45 int __lockfunc _write_trylock(rwlock_t *lock);
46 int __lockfunc _spin_trylock_bh(spinlock_t *lock);
47 void __lockfunc _spin_unlock(spinlock_t *lock)          __releases(lock);
48 void __lockfunc _read_unlock(rwlock_t *lock)            __releases(lock);
49 void __lockfunc _write_unlock(rwlock_t *lock)           __releases(lock);
50 void __lockfunc _spin_unlock_bh(spinlock_t *lock)       __releases(lock);
51 void __lockfunc _read_unlock_bh(rwlock_t *lock)         __releases(lock);
52 void __lockfunc _write_unlock_bh(rwlock_t *lock)        __releases(lock);
53 void __lockfunc _spin_unlock_irq(spinlock_t *lock)      __releases(lock);
54 void __lockfunc _read_unlock_irq(rwlock_t *lock)        __releases(lock);
55 void __lockfunc _write_unlock_irq(rwlock_t *lock)       __releases(lock);
56 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
57                                                         __releases(lock);
58 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
59                                                         __releases(lock);
60 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61                                                         __releases(lock);
62
63 #ifdef CONFIG_INLINE_SPIN_LOCK
64 #define _spin_lock(lock) __spin_lock(lock)
65 #endif
66
67 #ifdef CONFIG_INLINE_READ_LOCK
68 #define _read_lock(lock) __read_lock(lock)
69 #endif
70
71 #ifdef CONFIG_INLINE_WRITE_LOCK
72 #define _write_lock(lock) __write_lock(lock)
73 #endif
74
75 #ifdef CONFIG_INLINE_SPIN_LOCK_BH
76 #define _spin_lock_bh(lock) __spin_lock_bh(lock)
77 #endif
78
79 #ifdef CONFIG_INLINE_READ_LOCK_BH
80 #define _read_lock_bh(lock) __read_lock_bh(lock)
81 #endif
82
83 #ifdef CONFIG_INLINE_WRITE_LOCK_BH
84 #define _write_lock_bh(lock) __write_lock_bh(lock)
85 #endif
86
87 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
88 #define _spin_lock_irq(lock) __spin_lock_irq(lock)
89 #endif
90
91 #ifdef CONFIG_INLINE_READ_LOCK_IRQ
92 #define _read_lock_irq(lock) __read_lock_irq(lock)
93 #endif
94
95 #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
96 #define _write_lock_irq(lock) __write_lock_irq(lock)
97 #endif
98
99 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
100 #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
101 #endif
102
103 #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
104 #define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
105 #endif
106
107 #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
108 #define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
109 #endif
110
111 #ifdef CONFIG_INLINE_SPIN_TRYLOCK
112 #define _spin_trylock(lock) __spin_trylock(lock)
113 #endif
114
115 #ifdef CONFIG_INLINE_READ_TRYLOCK
116 #define _read_trylock(lock) __read_trylock(lock)
117 #endif
118
119 #ifdef CONFIG_INLINE_WRITE_TRYLOCK
120 #define _write_trylock(lock) __write_trylock(lock)
121 #endif
122
123 #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
124 #define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
125 #endif
126
127 #ifdef CONFIG_INLINE_SPIN_UNLOCK
128 #define _spin_unlock(lock) __spin_unlock(lock)
129 #endif
130
131 #ifdef CONFIG_INLINE_READ_UNLOCK
132 #define _read_unlock(lock) __read_unlock(lock)
133 #endif
134
135 #ifdef CONFIG_INLINE_WRITE_UNLOCK
136 #define _write_unlock(lock) __write_unlock(lock)
137 #endif
138
139 #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
140 #define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
141 #endif
142
143 #ifdef CONFIG_INLINE_READ_UNLOCK_BH
144 #define _read_unlock_bh(lock) __read_unlock_bh(lock)
145 #endif
146
147 #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
148 #define _write_unlock_bh(lock) __write_unlock_bh(lock)
149 #endif
150
151 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
152 #define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
153 #endif
154
155 #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
156 #define _read_unlock_irq(lock) __read_unlock_irq(lock)
157 #endif
158
159 #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
160 #define _write_unlock_irq(lock) __write_unlock_irq(lock)
161 #endif
162
163 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
164 #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
165 #endif
166
167 #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
168 #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
169 #endif
170
171 #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
172 #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
173 #endif
174
175 static inline int __spin_trylock(spinlock_t *lock)
176 {
177         preempt_disable();
178         if (_raw_spin_trylock(lock)) {
179                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
180                 return 1;
181         }
182         preempt_enable();
183         return 0;
184 }
185
186 static inline int __read_trylock(rwlock_t *lock)
187 {
188         preempt_disable();
189         if (_raw_read_trylock(lock)) {
190                 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
191                 return 1;
192         }
193         preempt_enable();
194         return 0;
195 }
196
197 static inline int __write_trylock(rwlock_t *lock)
198 {
199         preempt_disable();
200         if (_raw_write_trylock(lock)) {
201                 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
202                 return 1;
203         }
204         preempt_enable();
205         return 0;
206 }
207
208 /*
209  * If lockdep is enabled then we use the non-preemption spin-ops
210  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
211  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
212  */
213 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
214
215 static inline void __read_lock(rwlock_t *lock)
216 {
217         preempt_disable();
218         rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
219         LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
220 }
221
222 static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
223 {
224         unsigned long flags;
225
226         local_irq_save(flags);
227         preempt_disable();
228         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
229         /*
230          * On lockdep we dont want the hand-coded irq-enable of
231          * _raw_spin_lock_flags() code, because lockdep assumes
232          * that interrupts are not re-enabled during lock-acquire:
233          */
234 #ifdef CONFIG_LOCKDEP
235         LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
236 #else
237         _raw_spin_lock_flags(lock, &flags);
238 #endif
239         return flags;
240 }
241
242 static inline void __spin_lock_irq(spinlock_t *lock)
243 {
244         local_irq_disable();
245         preempt_disable();
246         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
247         LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
248 }
249
250 static inline void __spin_lock_bh(spinlock_t *lock)
251 {
252         local_bh_disable();
253         preempt_disable();
254         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
255         LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
256 }
257
258 static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
259 {
260         unsigned long flags;
261
262         local_irq_save(flags);
263         preempt_disable();
264         rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
265         LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
266                              _raw_read_lock_flags, &flags);
267         return flags;
268 }
269
270 static inline void __read_lock_irq(rwlock_t *lock)
271 {
272         local_irq_disable();
273         preempt_disable();
274         rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
275         LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
276 }
277
278 static inline void __read_lock_bh(rwlock_t *lock)
279 {
280         local_bh_disable();
281         preempt_disable();
282         rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
283         LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
284 }
285
286 static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
287 {
288         unsigned long flags;
289
290         local_irq_save(flags);
291         preempt_disable();
292         rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
293         LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
294                              _raw_write_lock_flags, &flags);
295         return flags;
296 }
297
298 static inline void __write_lock_irq(rwlock_t *lock)
299 {
300         local_irq_disable();
301         preempt_disable();
302         rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
303         LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
304 }
305
306 static inline void __write_lock_bh(rwlock_t *lock)
307 {
308         local_bh_disable();
309         preempt_disable();
310         rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
311         LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
312 }
313
314 static inline void __spin_lock(spinlock_t *lock)
315 {
316         preempt_disable();
317         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
318         LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
319 }
320
321 static inline void __write_lock(rwlock_t *lock)
322 {
323         preempt_disable();
324         rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
325         LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
326 }
327
328 #endif /* CONFIG_PREEMPT */
329
330 static inline void __spin_unlock(spinlock_t *lock)
331 {
332         spin_release(&lock->dep_map, 1, _RET_IP_);
333         _raw_spin_unlock(lock);
334         preempt_enable();
335 }
336
337 static inline void __write_unlock(rwlock_t *lock)
338 {
339         rwlock_release(&lock->dep_map, 1, _RET_IP_);
340         _raw_write_unlock(lock);
341         preempt_enable();
342 }
343
344 static inline void __read_unlock(rwlock_t *lock)
345 {
346         rwlock_release(&lock->dep_map, 1, _RET_IP_);
347         _raw_read_unlock(lock);
348         preempt_enable();
349 }
350
351 static inline void __spin_unlock_irqrestore(spinlock_t *lock,
352                                             unsigned long flags)
353 {
354         spin_release(&lock->dep_map, 1, _RET_IP_);
355         _raw_spin_unlock(lock);
356         local_irq_restore(flags);
357         preempt_enable();
358 }
359
360 static inline void __spin_unlock_irq(spinlock_t *lock)
361 {
362         spin_release(&lock->dep_map, 1, _RET_IP_);
363         _raw_spin_unlock(lock);
364         local_irq_enable();
365         preempt_enable();
366 }
367
368 static inline void __spin_unlock_bh(spinlock_t *lock)
369 {
370         spin_release(&lock->dep_map, 1, _RET_IP_);
371         _raw_spin_unlock(lock);
372         preempt_enable_no_resched();
373         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
374 }
375
376 static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
377 {
378         rwlock_release(&lock->dep_map, 1, _RET_IP_);
379         _raw_read_unlock(lock);
380         local_irq_restore(flags);
381         preempt_enable();
382 }
383
384 static inline void __read_unlock_irq(rwlock_t *lock)
385 {
386         rwlock_release(&lock->dep_map, 1, _RET_IP_);
387         _raw_read_unlock(lock);
388         local_irq_enable();
389         preempt_enable();
390 }
391
392 static inline void __read_unlock_bh(rwlock_t *lock)
393 {
394         rwlock_release(&lock->dep_map, 1, _RET_IP_);
395         _raw_read_unlock(lock);
396         preempt_enable_no_resched();
397         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
398 }
399
400 static inline void __write_unlock_irqrestore(rwlock_t *lock,
401                                              unsigned long flags)
402 {
403         rwlock_release(&lock->dep_map, 1, _RET_IP_);
404         _raw_write_unlock(lock);
405         local_irq_restore(flags);
406         preempt_enable();
407 }
408
409 static inline void __write_unlock_irq(rwlock_t *lock)
410 {
411         rwlock_release(&lock->dep_map, 1, _RET_IP_);
412         _raw_write_unlock(lock);
413         local_irq_enable();
414         preempt_enable();
415 }
416
417 static inline void __write_unlock_bh(rwlock_t *lock)
418 {
419         rwlock_release(&lock->dep_map, 1, _RET_IP_);
420         _raw_write_unlock(lock);
421         preempt_enable_no_resched();
422         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
423 }
424
425 static inline int __spin_trylock_bh(spinlock_t *lock)
426 {
427         local_bh_disable();
428         preempt_disable();
429         if (_raw_spin_trylock(lock)) {
430                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
431                 return 1;
432         }
433         preempt_enable_no_resched();
434         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
435         return 0;
436 }
437
438 #endif /* __LINUX_SPINLOCK_API_SMP_H */