[GFS2] Shrink gfs2_inode memory by half
[safe/jmp/linux-2.6] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <asm/uaccess.h>
24
25 #include "gfs2.h"
26 #include "incore.h"
27 #include "glock.h"
28 #include "glops.h"
29 #include "inode.h"
30 #include "lm.h"
31 #include "lops.h"
32 #include "meta_io.h"
33 #include "quota.h"
34 #include "super.h"
35 #include "util.h"
36
37 struct greedy {
38         struct gfs2_holder gr_gh;
39         struct delayed_work gr_work;
40 };
41
42 struct gfs2_gl_hash_bucket {
43         struct hlist_head hb_list;
44 };
45
46 typedef void (*glock_examiner) (struct gfs2_glock * gl);
47
48 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
49 static int dump_glock(struct gfs2_glock *gl);
50 static int dump_inode(struct gfs2_inode *ip);
51
52 #define GFS2_GL_HASH_SHIFT      15
53 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
54 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
55
56 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
57
58 /*
59  * Despite what you might think, the numbers below are not arbitrary :-)
60  * They are taken from the ipv4 routing hash code, which is well tested
61  * and thus should be nearly optimal. Later on we might tweek the numbers
62  * but for now this should be fine.
63  *
64  * The reason for putting the locks in a separate array from the list heads
65  * is that we can have fewer locks than list heads and save memory. We use
66  * the same hash function for both, but with a different hash mask.
67  */
68 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
69         defined(CONFIG_PROVE_LOCKING)
70
71 #ifdef CONFIG_LOCKDEP
72 # define GL_HASH_LOCK_SZ        256
73 #else
74 # if NR_CPUS >= 32
75 #  define GL_HASH_LOCK_SZ       4096
76 # elif NR_CPUS >= 16
77 #  define GL_HASH_LOCK_SZ       2048
78 # elif NR_CPUS >= 8
79 #  define GL_HASH_LOCK_SZ       1024
80 # elif NR_CPUS >= 4
81 #  define GL_HASH_LOCK_SZ       512
82 # else
83 #  define GL_HASH_LOCK_SZ       256
84 # endif
85 #endif
86
87 /* We never want more locks than chains */
88 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
89 # undef GL_HASH_LOCK_SZ
90 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
91 #endif
92
93 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
94
95 static inline rwlock_t *gl_lock_addr(unsigned int x)
96 {
97         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
98 }
99 #else /* not SMP, so no spinlocks required */
100 static inline rwlock_t *gl_lock_addr(unsigned int x)
101 {
102         return NULL;
103 }
104 #endif
105
106 /**
107  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
108  * @actual: the current state of the lock
109  * @requested: the lock state that was requested by the caller
110  * @flags: the modifier flags passed in by the caller
111  *
112  * Returns: 1 if the locks are compatible, 0 otherwise
113  */
114
115 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
116                                    int flags)
117 {
118         if (actual == requested)
119                 return 1;
120
121         if (flags & GL_EXACT)
122                 return 0;
123
124         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
125                 return 1;
126
127         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
128                 return 1;
129
130         return 0;
131 }
132
133 /**
134  * gl_hash() - Turn glock number into hash bucket number
135  * @lock: The glock number
136  *
137  * Returns: The number of the corresponding hash bucket
138  */
139
140 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
141                             const struct lm_lockname *name)
142 {
143         unsigned int h;
144
145         h = jhash(&name->ln_number, sizeof(u64), 0);
146         h = jhash(&name->ln_type, sizeof(unsigned int), h);
147         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
148         h &= GFS2_GL_HASH_MASK;
149
150         return h;
151 }
152
153 /**
154  * glock_free() - Perform a few checks and then release struct gfs2_glock
155  * @gl: The glock to release
156  *
157  * Also calls lock module to release its internal structure for this glock.
158  *
159  */
160
161 static void glock_free(struct gfs2_glock *gl)
162 {
163         struct gfs2_sbd *sdp = gl->gl_sbd;
164         struct inode *aspace = gl->gl_aspace;
165
166         gfs2_lm_put_lock(sdp, gl->gl_lock);
167
168         if (aspace)
169                 gfs2_aspace_put(aspace);
170
171         kmem_cache_free(gfs2_glock_cachep, gl);
172 }
173
174 /**
175  * gfs2_glock_hold() - increment reference count on glock
176  * @gl: The glock to hold
177  *
178  */
179
180 void gfs2_glock_hold(struct gfs2_glock *gl)
181 {
182         atomic_inc(&gl->gl_ref);
183 }
184
185 /**
186  * gfs2_glock_put() - Decrement reference count on glock
187  * @gl: The glock to put
188  *
189  */
190
191 int gfs2_glock_put(struct gfs2_glock *gl)
192 {
193         int rv = 0;
194         struct gfs2_sbd *sdp = gl->gl_sbd;
195
196         write_lock(gl_lock_addr(gl->gl_hash));
197         if (atomic_dec_and_test(&gl->gl_ref)) {
198                 hlist_del(&gl->gl_list);
199                 write_unlock(gl_lock_addr(gl->gl_hash));
200                 BUG_ON(spin_is_locked(&gl->gl_spin));
201                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
202                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
203                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
204                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
205                 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
206                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
207                 glock_free(gl);
208                 rv = 1;
209                 goto out;
210         }
211         write_unlock(gl_lock_addr(gl->gl_hash));
212 out:
213         return rv;
214 }
215
216 /**
217  * queue_empty - check to see if a glock's queue is empty
218  * @gl: the glock
219  * @head: the head of the queue to check
220  *
221  * This function protects the list in the event that a process already
222  * has a holder on the list and is adding a second holder for itself.
223  * The glmutex lock is what generally prevents processes from working
224  * on the same glock at once, but the special case of adding a second
225  * holder for yourself ("recursive" locking) doesn't involve locking
226  * glmutex, making the spin lock necessary.
227  *
228  * Returns: 1 if the queue is empty
229  */
230
231 static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
232 {
233         int empty;
234         spin_lock(&gl->gl_spin);
235         empty = list_empty(head);
236         spin_unlock(&gl->gl_spin);
237         return empty;
238 }
239
240 /**
241  * search_bucket() - Find struct gfs2_glock by lock number
242  * @bucket: the bucket to search
243  * @name: The lock name
244  *
245  * Returns: NULL, or the struct gfs2_glock with the requested number
246  */
247
248 static struct gfs2_glock *search_bucket(unsigned int hash,
249                                         const struct gfs2_sbd *sdp,
250                                         const struct lm_lockname *name)
251 {
252         struct gfs2_glock *gl;
253         struct hlist_node *h;
254
255         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
256                 if (!lm_name_equal(&gl->gl_name, name))
257                         continue;
258                 if (gl->gl_sbd != sdp)
259                         continue;
260
261                 atomic_inc(&gl->gl_ref);
262
263                 return gl;
264         }
265
266         return NULL;
267 }
268
269 /**
270  * gfs2_glock_find() - Find glock by lock number
271  * @sdp: The GFS2 superblock
272  * @name: The lock name
273  *
274  * Returns: NULL, or the struct gfs2_glock with the requested number
275  */
276
277 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
278                                           const struct lm_lockname *name)
279 {
280         unsigned int hash = gl_hash(sdp, name);
281         struct gfs2_glock *gl;
282
283         read_lock(gl_lock_addr(hash));
284         gl = search_bucket(hash, sdp, name);
285         read_unlock(gl_lock_addr(hash));
286
287         return gl;
288 }
289
290 /**
291  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
292  * @sdp: The GFS2 superblock
293  * @number: the lock number
294  * @glops: The glock_operations to use
295  * @create: If 0, don't create the glock if it doesn't exist
296  * @glp: the glock is returned here
297  *
298  * This does not lock a glock, just finds/creates structures for one.
299  *
300  * Returns: errno
301  */
302
303 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
304                    const struct gfs2_glock_operations *glops, int create,
305                    struct gfs2_glock **glp)
306 {
307         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
308         struct gfs2_glock *gl, *tmp;
309         unsigned int hash = gl_hash(sdp, &name);
310         int error;
311
312         read_lock(gl_lock_addr(hash));
313         gl = search_bucket(hash, sdp, &name);
314         read_unlock(gl_lock_addr(hash));
315
316         if (gl || !create) {
317                 *glp = gl;
318                 return 0;
319         }
320
321         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
322         if (!gl)
323                 return -ENOMEM;
324
325         gl->gl_flags = 0;
326         gl->gl_name = name;
327         atomic_set(&gl->gl_ref, 1);
328         gl->gl_state = LM_ST_UNLOCKED;
329         gl->gl_hash = hash;
330         gl->gl_owner = NULL;
331         gl->gl_ip = 0;
332         gl->gl_ops = glops;
333         gl->gl_req_gh = NULL;
334         gl->gl_req_bh = NULL;
335         gl->gl_vn = 0;
336         gl->gl_stamp = jiffies;
337         gl->gl_object = NULL;
338         gl->gl_sbd = sdp;
339         gl->gl_aspace = NULL;
340         lops_init_le(&gl->gl_le, &gfs2_glock_lops);
341
342         /* If this glock protects actual on-disk data or metadata blocks,
343            create a VFS inode to manage the pages/buffers holding them. */
344         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
345                 gl->gl_aspace = gfs2_aspace_get(sdp);
346                 if (!gl->gl_aspace) {
347                         error = -ENOMEM;
348                         goto fail;
349                 }
350         }
351
352         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
353         if (error)
354                 goto fail_aspace;
355
356         write_lock(gl_lock_addr(hash));
357         tmp = search_bucket(hash, sdp, &name);
358         if (tmp) {
359                 write_unlock(gl_lock_addr(hash));
360                 glock_free(gl);
361                 gl = tmp;
362         } else {
363                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
364                 write_unlock(gl_lock_addr(hash));
365         }
366
367         *glp = gl;
368
369         return 0;
370
371 fail_aspace:
372         if (gl->gl_aspace)
373                 gfs2_aspace_put(gl->gl_aspace);
374 fail:
375         kmem_cache_free(gfs2_glock_cachep, gl);
376         return error;
377 }
378
379 /**
380  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
381  * @gl: the glock
382  * @state: the state we're requesting
383  * @flags: the modifier flags
384  * @gh: the holder structure
385  *
386  */
387
388 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
389                       struct gfs2_holder *gh)
390 {
391         INIT_LIST_HEAD(&gh->gh_list);
392         gh->gh_gl = gl;
393         gh->gh_ip = (unsigned long)__builtin_return_address(0);
394         gh->gh_owner = current;
395         gh->gh_state = state;
396         gh->gh_flags = flags;
397         gh->gh_error = 0;
398         gh->gh_iflags = 0;
399
400         if (gh->gh_state == LM_ST_EXCLUSIVE)
401                 gh->gh_flags |= GL_LOCAL_EXCL;
402
403         gfs2_glock_hold(gl);
404 }
405
406 /**
407  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
408  * @state: the state we're requesting
409  * @flags: the modifier flags
410  * @gh: the holder structure
411  *
412  * Don't mess with the glock.
413  *
414  */
415
416 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
417 {
418         gh->gh_state = state;
419         gh->gh_flags = flags;
420         if (gh->gh_state == LM_ST_EXCLUSIVE)
421                 gh->gh_flags |= GL_LOCAL_EXCL;
422
423         gh->gh_iflags &= 1 << HIF_ALLOCED;
424         gh->gh_ip = (unsigned long)__builtin_return_address(0);
425 }
426
427 /**
428  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
429  * @gh: the holder structure
430  *
431  */
432
433 void gfs2_holder_uninit(struct gfs2_holder *gh)
434 {
435         gfs2_glock_put(gh->gh_gl);
436         gh->gh_gl = NULL;
437         gh->gh_ip = 0;
438 }
439
440 /**
441  * gfs2_holder_get - get a struct gfs2_holder structure
442  * @gl: the glock
443  * @state: the state we're requesting
444  * @flags: the modifier flags
445  * @gfp_flags:
446  *
447  * Figure out how big an impact this function has.  Either:
448  * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
449  * 2) Leave it like it is
450  *
451  * Returns: the holder structure, NULL on ENOMEM
452  */
453
454 static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
455                                            unsigned int state,
456                                            int flags, gfp_t gfp_flags)
457 {
458         struct gfs2_holder *gh;
459
460         gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
461         if (!gh)
462                 return NULL;
463
464         gfs2_holder_init(gl, state, flags, gh);
465         set_bit(HIF_ALLOCED, &gh->gh_iflags);
466         gh->gh_ip = (unsigned long)__builtin_return_address(0);
467         return gh;
468 }
469
470 /**
471  * gfs2_holder_put - get rid of a struct gfs2_holder structure
472  * @gh: the holder structure
473  *
474  */
475
476 static void gfs2_holder_put(struct gfs2_holder *gh)
477 {
478         gfs2_holder_uninit(gh);
479         kfree(gh);
480 }
481
482 static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh)
483 {
484         if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) {
485                 gfs2_holder_put(gh);
486                 return;
487         }
488         clear_bit(HIF_WAIT, &gh->gh_iflags);
489         smp_mb();
490         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
491 }
492
493 static int holder_wait(void *word)
494 {
495         schedule();
496         return 0;
497 }
498
499 static void wait_on_holder(struct gfs2_holder *gh)
500 {
501         might_sleep();
502         wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
503 }
504
505 /**
506  * rq_mutex - process a mutex request in the queue
507  * @gh: the glock holder
508  *
509  * Returns: 1 if the queue is blocked
510  */
511
512 static int rq_mutex(struct gfs2_holder *gh)
513 {
514         struct gfs2_glock *gl = gh->gh_gl;
515
516         list_del_init(&gh->gh_list);
517         /*  gh->gh_error never examined.  */
518         set_bit(GLF_LOCK, &gl->gl_flags);
519         clear_bit(HIF_WAIT, &gh->gh_flags);
520         smp_mb();
521         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
522
523         return 1;
524 }
525
526 /**
527  * rq_promote - process a promote request in the queue
528  * @gh: the glock holder
529  *
530  * Acquire a new inter-node lock, or change a lock state to more restrictive.
531  *
532  * Returns: 1 if the queue is blocked
533  */
534
535 static int rq_promote(struct gfs2_holder *gh)
536 {
537         struct gfs2_glock *gl = gh->gh_gl;
538         struct gfs2_sbd *sdp = gl->gl_sbd;
539         const struct gfs2_glock_operations *glops = gl->gl_ops;
540
541         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
542                 if (list_empty(&gl->gl_holders)) {
543                         gl->gl_req_gh = gh;
544                         set_bit(GLF_LOCK, &gl->gl_flags);
545                         spin_unlock(&gl->gl_spin);
546
547                         if (atomic_read(&sdp->sd_reclaim_count) >
548                             gfs2_tune_get(sdp, gt_reclaim_limit) &&
549                             !(gh->gh_flags & LM_FLAG_PRIORITY)) {
550                                 gfs2_reclaim_glock(sdp);
551                                 gfs2_reclaim_glock(sdp);
552                         }
553
554                         glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
555                         spin_lock(&gl->gl_spin);
556                 }
557                 return 1;
558         }
559
560         if (list_empty(&gl->gl_holders)) {
561                 set_bit(HIF_FIRST, &gh->gh_iflags);
562                 set_bit(GLF_LOCK, &gl->gl_flags);
563         } else {
564                 struct gfs2_holder *next_gh;
565                 if (gh->gh_flags & GL_LOCAL_EXCL)
566                         return 1;
567                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
568                                      gh_list);
569                 if (next_gh->gh_flags & GL_LOCAL_EXCL)
570                          return 1;
571         }
572
573         list_move_tail(&gh->gh_list, &gl->gl_holders);
574         gh->gh_error = 0;
575         set_bit(HIF_HOLDER, &gh->gh_iflags);
576
577         gfs2_holder_dispose_or_wake(gh);
578
579         return 0;
580 }
581
582 /**
583  * rq_demote - process a demote request in the queue
584  * @gh: the glock holder
585  *
586  * Returns: 1 if the queue is blocked
587  */
588
589 static int rq_demote(struct gfs2_holder *gh)
590 {
591         struct gfs2_glock *gl = gh->gh_gl;
592         const struct gfs2_glock_operations *glops = gl->gl_ops;
593
594         if (!list_empty(&gl->gl_holders))
595                 return 1;
596
597         if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
598                 list_del_init(&gh->gh_list);
599                 gh->gh_error = 0;
600                 spin_unlock(&gl->gl_spin);
601                 gfs2_holder_dispose_or_wake(gh);
602                 spin_lock(&gl->gl_spin);
603         } else {
604                 gl->gl_req_gh = gh;
605                 set_bit(GLF_LOCK, &gl->gl_flags);
606                 spin_unlock(&gl->gl_spin);
607
608                 if (gh->gh_state == LM_ST_UNLOCKED ||
609                     gl->gl_state != LM_ST_EXCLUSIVE)
610                         glops->go_drop_th(gl);
611                 else
612                         glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
613
614                 spin_lock(&gl->gl_spin);
615         }
616
617         return 0;
618 }
619
620 /**
621  * rq_greedy - process a queued request to drop greedy status
622  * @gh: the glock holder
623  *
624  * Returns: 1 if the queue is blocked
625  */
626
627 static int rq_greedy(struct gfs2_holder *gh)
628 {
629         struct gfs2_glock *gl = gh->gh_gl;
630
631         list_del_init(&gh->gh_list);
632         /*  gh->gh_error never examined.  */
633         clear_bit(GLF_GREEDY, &gl->gl_flags);
634         spin_unlock(&gl->gl_spin);
635
636         gfs2_holder_uninit(gh);
637         kfree(container_of(gh, struct greedy, gr_gh));
638
639         spin_lock(&gl->gl_spin);
640
641         return 0;
642 }
643
644 /**
645  * run_queue - process holder structures on a glock
646  * @gl: the glock
647  *
648  */
649 static void run_queue(struct gfs2_glock *gl)
650 {
651         struct gfs2_holder *gh;
652         int blocked = 1;
653
654         for (;;) {
655                 if (test_bit(GLF_LOCK, &gl->gl_flags))
656                         break;
657
658                 if (!list_empty(&gl->gl_waiters1)) {
659                         gh = list_entry(gl->gl_waiters1.next,
660                                         struct gfs2_holder, gh_list);
661
662                         if (test_bit(HIF_MUTEX, &gh->gh_iflags))
663                                 blocked = rq_mutex(gh);
664                         else
665                                 gfs2_assert_warn(gl->gl_sbd, 0);
666
667                 } else if (!list_empty(&gl->gl_waiters2) &&
668                            !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
669                         gh = list_entry(gl->gl_waiters2.next,
670                                         struct gfs2_holder, gh_list);
671
672                         if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
673                                 blocked = rq_demote(gh);
674                         else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
675                                 blocked = rq_greedy(gh);
676                         else
677                                 gfs2_assert_warn(gl->gl_sbd, 0);
678
679                 } else if (!list_empty(&gl->gl_waiters3)) {
680                         gh = list_entry(gl->gl_waiters3.next,
681                                         struct gfs2_holder, gh_list);
682
683                         if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
684                                 blocked = rq_promote(gh);
685                         else
686                                 gfs2_assert_warn(gl->gl_sbd, 0);
687
688                 } else
689                         break;
690
691                 if (blocked)
692                         break;
693         }
694 }
695
696 /**
697  * gfs2_glmutex_lock - acquire a local lock on a glock
698  * @gl: the glock
699  *
700  * Gives caller exclusive access to manipulate a glock structure.
701  */
702
703 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
704 {
705         struct gfs2_holder gh;
706
707         gfs2_holder_init(gl, 0, 0, &gh);
708         set_bit(HIF_MUTEX, &gh.gh_iflags);
709         if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
710                 BUG();
711
712         spin_lock(&gl->gl_spin);
713         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
714                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
715         } else {
716                 gl->gl_owner = current;
717                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
718                 clear_bit(HIF_WAIT, &gh.gh_iflags);
719                 smp_mb();
720                 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
721         }
722         spin_unlock(&gl->gl_spin);
723
724         wait_on_holder(&gh);
725         gfs2_holder_uninit(&gh);
726 }
727
728 /**
729  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
730  * @gl: the glock
731  *
732  * Returns: 1 if the glock is acquired
733  */
734
735 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
736 {
737         int acquired = 1;
738
739         spin_lock(&gl->gl_spin);
740         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
741                 acquired = 0;
742         } else {
743                 gl->gl_owner = current;
744                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
745         }
746         spin_unlock(&gl->gl_spin);
747
748         return acquired;
749 }
750
751 /**
752  * gfs2_glmutex_unlock - release a local lock on a glock
753  * @gl: the glock
754  *
755  */
756
757 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
758 {
759         spin_lock(&gl->gl_spin);
760         clear_bit(GLF_LOCK, &gl->gl_flags);
761         gl->gl_owner = NULL;
762         gl->gl_ip = 0;
763         run_queue(gl);
764         BUG_ON(!spin_is_locked(&gl->gl_spin));
765         spin_unlock(&gl->gl_spin);
766 }
767
768 /**
769  * handle_callback - add a demote request to a lock's queue
770  * @gl: the glock
771  * @state: the state the caller wants us to change to
772  *
773  * Note: This may fail sliently if we are out of memory.
774  */
775
776 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
777 {
778         struct gfs2_holder *gh, *new_gh = NULL;
779
780 restart:
781         spin_lock(&gl->gl_spin);
782
783         list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
784                 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
785                     gl->gl_req_gh != gh) {
786                         if (gh->gh_state != state)
787                                 gh->gh_state = LM_ST_UNLOCKED;
788                         goto out;
789                 }
790         }
791
792         if (new_gh) {
793                 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
794                 new_gh = NULL;
795         } else {
796                 spin_unlock(&gl->gl_spin);
797
798                 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
799                 if (!new_gh)
800                         return;
801                 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
802                 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
803                 set_bit(HIF_WAIT, &new_gh->gh_iflags);
804
805                 goto restart;
806         }
807
808 out:
809         spin_unlock(&gl->gl_spin);
810
811         if (new_gh)
812                 gfs2_holder_put(new_gh);
813 }
814
815 /**
816  * state_change - record that the glock is now in a different state
817  * @gl: the glock
818  * @new_state the new state
819  *
820  */
821
822 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
823 {
824         int held1, held2;
825
826         held1 = (gl->gl_state != LM_ST_UNLOCKED);
827         held2 = (new_state != LM_ST_UNLOCKED);
828
829         if (held1 != held2) {
830                 if (held2)
831                         gfs2_glock_hold(gl);
832                 else
833                         gfs2_glock_put(gl);
834         }
835
836         gl->gl_state = new_state;
837 }
838
839 /**
840  * xmote_bh - Called after the lock module is done acquiring a lock
841  * @gl: The glock in question
842  * @ret: the int returned from the lock module
843  *
844  */
845
846 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
847 {
848         struct gfs2_sbd *sdp = gl->gl_sbd;
849         const struct gfs2_glock_operations *glops = gl->gl_ops;
850         struct gfs2_holder *gh = gl->gl_req_gh;
851         int prev_state = gl->gl_state;
852         int op_done = 1;
853
854         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
855         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
856         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
857
858         state_change(gl, ret & LM_OUT_ST_MASK);
859
860         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
861                 if (glops->go_inval)
862                         glops->go_inval(gl, DIO_METADATA);
863         } else if (gl->gl_state == LM_ST_DEFERRED) {
864                 /* We might not want to do this here.
865                    Look at moving to the inode glops. */
866                 if (glops->go_inval)
867                         glops->go_inval(gl, 0);
868         }
869
870         /*  Deal with each possible exit condition  */
871
872         if (!gh)
873                 gl->gl_stamp = jiffies;
874         else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
875                 spin_lock(&gl->gl_spin);
876                 list_del_init(&gh->gh_list);
877                 gh->gh_error = -EIO;
878                 spin_unlock(&gl->gl_spin);
879         } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
880                 spin_lock(&gl->gl_spin);
881                 list_del_init(&gh->gh_list);
882                 if (gl->gl_state == gh->gh_state ||
883                     gl->gl_state == LM_ST_UNLOCKED) {
884                         gh->gh_error = 0;
885                 } else {
886                         if (gfs2_assert_warn(sdp, gh->gh_flags &
887                                         (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
888                                 fs_warn(sdp, "ret = 0x%.8X\n", ret);
889                         gh->gh_error = GLR_TRYFAILED;
890                 }
891                 spin_unlock(&gl->gl_spin);
892
893                 if (ret & LM_OUT_CANCELED)
894                         handle_callback(gl, LM_ST_UNLOCKED);
895
896         } else if (ret & LM_OUT_CANCELED) {
897                 spin_lock(&gl->gl_spin);
898                 list_del_init(&gh->gh_list);
899                 gh->gh_error = GLR_CANCELED;
900                 spin_unlock(&gl->gl_spin);
901
902         } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
903                 spin_lock(&gl->gl_spin);
904                 list_move_tail(&gh->gh_list, &gl->gl_holders);
905                 gh->gh_error = 0;
906                 set_bit(HIF_HOLDER, &gh->gh_iflags);
907                 spin_unlock(&gl->gl_spin);
908
909                 set_bit(HIF_FIRST, &gh->gh_iflags);
910
911                 op_done = 0;
912
913         } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
914                 spin_lock(&gl->gl_spin);
915                 list_del_init(&gh->gh_list);
916                 gh->gh_error = GLR_TRYFAILED;
917                 spin_unlock(&gl->gl_spin);
918
919         } else {
920                 if (gfs2_assert_withdraw(sdp, 0) == -1)
921                         fs_err(sdp, "ret = 0x%.8X\n", ret);
922         }
923
924         if (glops->go_xmote_bh)
925                 glops->go_xmote_bh(gl);
926
927         if (op_done) {
928                 spin_lock(&gl->gl_spin);
929                 gl->gl_req_gh = NULL;
930                 gl->gl_req_bh = NULL;
931                 clear_bit(GLF_LOCK, &gl->gl_flags);
932                 run_queue(gl);
933                 spin_unlock(&gl->gl_spin);
934         }
935
936         gfs2_glock_put(gl);
937
938         if (gh)
939                 gfs2_holder_dispose_or_wake(gh);
940 }
941
942 /**
943  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
944  * @gl: The glock in question
945  * @state: the requested state
946  * @flags: modifier flags to the lock call
947  *
948  */
949
950 void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
951 {
952         struct gfs2_sbd *sdp = gl->gl_sbd;
953         const struct gfs2_glock_operations *glops = gl->gl_ops;
954         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
955                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
956                                  LM_FLAG_PRIORITY);
957         unsigned int lck_ret;
958
959         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
960         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
961         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
962         gfs2_assert_warn(sdp, state != gl->gl_state);
963
964         if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
965                 glops->go_sync(gl);
966
967         gfs2_glock_hold(gl);
968         gl->gl_req_bh = xmote_bh;
969
970         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
971
972         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
973                 return;
974
975         if (lck_ret & LM_OUT_ASYNC)
976                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
977         else
978                 xmote_bh(gl, lck_ret);
979 }
980
981 /**
982  * drop_bh - Called after a lock module unlock completes
983  * @gl: the glock
984  * @ret: the return status
985  *
986  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
987  * Doesn't drop the reference on the glock the top half took out
988  *
989  */
990
991 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
992 {
993         struct gfs2_sbd *sdp = gl->gl_sbd;
994         const struct gfs2_glock_operations *glops = gl->gl_ops;
995         struct gfs2_holder *gh = gl->gl_req_gh;
996
997         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
998         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
999         gfs2_assert_warn(sdp, !ret);
1000
1001         state_change(gl, LM_ST_UNLOCKED);
1002
1003         if (glops->go_inval)
1004                 glops->go_inval(gl, DIO_METADATA);
1005
1006         if (gh) {
1007                 spin_lock(&gl->gl_spin);
1008                 list_del_init(&gh->gh_list);
1009                 gh->gh_error = 0;
1010                 spin_unlock(&gl->gl_spin);
1011         }
1012
1013         if (glops->go_drop_bh)
1014                 glops->go_drop_bh(gl);
1015
1016         spin_lock(&gl->gl_spin);
1017         gl->gl_req_gh = NULL;
1018         gl->gl_req_bh = NULL;
1019         clear_bit(GLF_LOCK, &gl->gl_flags);
1020         run_queue(gl);
1021         spin_unlock(&gl->gl_spin);
1022
1023         gfs2_glock_put(gl);
1024
1025         if (gh)
1026                 gfs2_holder_dispose_or_wake(gh);
1027 }
1028
1029 /**
1030  * gfs2_glock_drop_th - call into the lock module to unlock a lock
1031  * @gl: the glock
1032  *
1033  */
1034
1035 void gfs2_glock_drop_th(struct gfs2_glock *gl)
1036 {
1037         struct gfs2_sbd *sdp = gl->gl_sbd;
1038         const struct gfs2_glock_operations *glops = gl->gl_ops;
1039         unsigned int ret;
1040
1041         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1042         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1043         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1044
1045         if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1046                 glops->go_sync(gl);
1047
1048         gfs2_glock_hold(gl);
1049         gl->gl_req_bh = drop_bh;
1050
1051         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1052
1053         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1054                 return;
1055
1056         if (!ret)
1057                 drop_bh(gl, ret);
1058         else
1059                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1060 }
1061
1062 /**
1063  * do_cancels - cancel requests for locks stuck waiting on an expire flag
1064  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1065  *
1066  * Don't cancel GL_NOCANCEL requests.
1067  */
1068
1069 static void do_cancels(struct gfs2_holder *gh)
1070 {
1071         struct gfs2_glock *gl = gh->gh_gl;
1072
1073         spin_lock(&gl->gl_spin);
1074
1075         while (gl->gl_req_gh != gh &&
1076                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1077                !list_empty(&gh->gh_list)) {
1078                 if (gl->gl_req_bh && !(gl->gl_req_gh &&
1079                                      (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1080                         spin_unlock(&gl->gl_spin);
1081                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1082                         msleep(100);
1083                         spin_lock(&gl->gl_spin);
1084                 } else {
1085                         spin_unlock(&gl->gl_spin);
1086                         msleep(100);
1087                         spin_lock(&gl->gl_spin);
1088                 }
1089         }
1090
1091         spin_unlock(&gl->gl_spin);
1092 }
1093
1094 /**
1095  * glock_wait_internal - wait on a glock acquisition
1096  * @gh: the glock holder
1097  *
1098  * Returns: 0 on success
1099  */
1100
1101 static int glock_wait_internal(struct gfs2_holder *gh)
1102 {
1103         struct gfs2_glock *gl = gh->gh_gl;
1104         struct gfs2_sbd *sdp = gl->gl_sbd;
1105         const struct gfs2_glock_operations *glops = gl->gl_ops;
1106
1107         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1108                 return -EIO;
1109
1110         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1111                 spin_lock(&gl->gl_spin);
1112                 if (gl->gl_req_gh != gh &&
1113                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1114                     !list_empty(&gh->gh_list)) {
1115                         list_del_init(&gh->gh_list);
1116                         gh->gh_error = GLR_TRYFAILED;
1117                         run_queue(gl);
1118                         spin_unlock(&gl->gl_spin);
1119                         return gh->gh_error;
1120                 }
1121                 spin_unlock(&gl->gl_spin);
1122         }
1123
1124         if (gh->gh_flags & LM_FLAG_PRIORITY)
1125                 do_cancels(gh);
1126
1127         wait_on_holder(gh);
1128         if (gh->gh_error)
1129                 return gh->gh_error;
1130
1131         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1132         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1133                                                    gh->gh_flags));
1134
1135         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1136                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1137
1138                 if (glops->go_lock) {
1139                         gh->gh_error = glops->go_lock(gh);
1140                         if (gh->gh_error) {
1141                                 spin_lock(&gl->gl_spin);
1142                                 list_del_init(&gh->gh_list);
1143                                 spin_unlock(&gl->gl_spin);
1144                         }
1145                 }
1146
1147                 spin_lock(&gl->gl_spin);
1148                 gl->gl_req_gh = NULL;
1149                 gl->gl_req_bh = NULL;
1150                 clear_bit(GLF_LOCK, &gl->gl_flags);
1151                 run_queue(gl);
1152                 spin_unlock(&gl->gl_spin);
1153         }
1154
1155         return gh->gh_error;
1156 }
1157
1158 static inline struct gfs2_holder *
1159 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1160 {
1161         struct gfs2_holder *gh;
1162
1163         list_for_each_entry(gh, head, gh_list) {
1164                 if (gh->gh_owner == owner)
1165                         return gh;
1166         }
1167
1168         return NULL;
1169 }
1170
1171 /**
1172  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1173  * @gh: the holder structure to add
1174  *
1175  */
1176
1177 static void add_to_queue(struct gfs2_holder *gh)
1178 {
1179         struct gfs2_glock *gl = gh->gh_gl;
1180         struct gfs2_holder *existing;
1181
1182         BUG_ON(!gh->gh_owner);
1183         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1184                 BUG();
1185
1186         existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1187         if (existing) {
1188                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1189                 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
1190                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1191                                 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1192                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1193                 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
1194                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1195                                 gl->gl_name.ln_type, gl->gl_state);
1196                 BUG();
1197         }
1198
1199         existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1200         if (existing) {
1201                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1202                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1203                 BUG();
1204         }
1205
1206         if (gh->gh_flags & LM_FLAG_PRIORITY)
1207                 list_add(&gh->gh_list, &gl->gl_waiters3);
1208         else
1209                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1210 }
1211
1212 /**
1213  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1214  * @gh: the holder structure
1215  *
1216  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1217  *
1218  * Returns: 0, GLR_TRYFAILED, or errno on failure
1219  */
1220
1221 int gfs2_glock_nq(struct gfs2_holder *gh)
1222 {
1223         struct gfs2_glock *gl = gh->gh_gl;
1224         struct gfs2_sbd *sdp = gl->gl_sbd;
1225         int error = 0;
1226
1227 restart:
1228         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1229                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1230                 return -EIO;
1231         }
1232
1233         set_bit(HIF_PROMOTE, &gh->gh_iflags);
1234
1235         spin_lock(&gl->gl_spin);
1236         add_to_queue(gh);
1237         run_queue(gl);
1238         spin_unlock(&gl->gl_spin);
1239
1240         if (!(gh->gh_flags & GL_ASYNC)) {
1241                 error = glock_wait_internal(gh);
1242                 if (error == GLR_CANCELED) {
1243                         msleep(100);
1244                         goto restart;
1245                 }
1246         }
1247
1248         return error;
1249 }
1250
1251 /**
1252  * gfs2_glock_poll - poll to see if an async request has been completed
1253  * @gh: the holder
1254  *
1255  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1256  */
1257
1258 int gfs2_glock_poll(struct gfs2_holder *gh)
1259 {
1260         struct gfs2_glock *gl = gh->gh_gl;
1261         int ready = 0;
1262
1263         spin_lock(&gl->gl_spin);
1264
1265         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1266                 ready = 1;
1267         else if (list_empty(&gh->gh_list)) {
1268                 if (gh->gh_error == GLR_CANCELED) {
1269                         spin_unlock(&gl->gl_spin);
1270                         msleep(100);
1271                         if (gfs2_glock_nq(gh))
1272                                 return 1;
1273                         return 0;
1274                 } else
1275                         ready = 1;
1276         }
1277
1278         spin_unlock(&gl->gl_spin);
1279
1280         return ready;
1281 }
1282
1283 /**
1284  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1285  * @gh: the holder structure
1286  *
1287  * Returns: 0, GLR_TRYFAILED, or errno on failure
1288  */
1289
1290 int gfs2_glock_wait(struct gfs2_holder *gh)
1291 {
1292         int error;
1293
1294         error = glock_wait_internal(gh);
1295         if (error == GLR_CANCELED) {
1296                 msleep(100);
1297                 gh->gh_flags &= ~GL_ASYNC;
1298                 error = gfs2_glock_nq(gh);
1299         }
1300
1301         return error;
1302 }
1303
1304 /**
1305  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1306  * @gh: the glock holder
1307  *
1308  */
1309
1310 void gfs2_glock_dq(struct gfs2_holder *gh)
1311 {
1312         struct gfs2_glock *gl = gh->gh_gl;
1313         const struct gfs2_glock_operations *glops = gl->gl_ops;
1314
1315         if (gh->gh_flags & GL_NOCACHE)
1316                 handle_callback(gl, LM_ST_UNLOCKED);
1317
1318         gfs2_glmutex_lock(gl);
1319
1320         spin_lock(&gl->gl_spin);
1321         list_del_init(&gh->gh_list);
1322
1323         if (list_empty(&gl->gl_holders)) {
1324                 spin_unlock(&gl->gl_spin);
1325
1326                 if (glops->go_unlock)
1327                         glops->go_unlock(gh);
1328
1329                 gl->gl_stamp = jiffies;
1330
1331                 spin_lock(&gl->gl_spin);
1332         }
1333
1334         clear_bit(GLF_LOCK, &gl->gl_flags);
1335         run_queue(gl);
1336         spin_unlock(&gl->gl_spin);
1337 }
1338
1339 static void greedy_work(struct work_struct *work)
1340 {
1341         struct greedy *gr = container_of(work, struct greedy, gr_work.work);
1342         struct gfs2_holder *gh = &gr->gr_gh;
1343         struct gfs2_glock *gl = gh->gh_gl;
1344         const struct gfs2_glock_operations *glops = gl->gl_ops;
1345
1346         clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1347
1348         if (glops->go_greedy)
1349                 glops->go_greedy(gl);
1350
1351         spin_lock(&gl->gl_spin);
1352
1353         if (list_empty(&gl->gl_waiters2)) {
1354                 clear_bit(GLF_GREEDY, &gl->gl_flags);
1355                 spin_unlock(&gl->gl_spin);
1356                 gfs2_holder_uninit(gh);
1357                 kfree(gr);
1358         } else {
1359                 gfs2_glock_hold(gl);
1360                 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1361                 run_queue(gl);
1362                 spin_unlock(&gl->gl_spin);
1363                 gfs2_glock_put(gl);
1364         }
1365 }
1366
1367 /**
1368  * gfs2_glock_be_greedy -
1369  * @gl:
1370  * @time:
1371  *
1372  * Returns: 0 if go_greedy will be called, 1 otherwise
1373  */
1374
1375 int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1376 {
1377         struct greedy *gr;
1378         struct gfs2_holder *gh;
1379
1380         if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
1381             test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1382                 return 1;
1383
1384         gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1385         if (!gr) {
1386                 clear_bit(GLF_GREEDY, &gl->gl_flags);
1387                 return 1;
1388         }
1389         gh = &gr->gr_gh;
1390
1391         gfs2_holder_init(gl, 0, 0, gh);
1392         set_bit(HIF_GREEDY, &gh->gh_iflags);
1393         INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
1394
1395         set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1396         schedule_delayed_work(&gr->gr_work, time);
1397
1398         return 0;
1399 }
1400
1401 /**
1402  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1403  * @gh: the holder structure
1404  *
1405  */
1406
1407 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1408 {
1409         gfs2_glock_dq(gh);
1410         gfs2_holder_uninit(gh);
1411 }
1412
1413 /**
1414  * gfs2_glock_nq_num - acquire a glock based on lock number
1415  * @sdp: the filesystem
1416  * @number: the lock number
1417  * @glops: the glock operations for the type of glock
1418  * @state: the state to acquire the glock in
1419  * @flags: modifier flags for the aquisition
1420  * @gh: the struct gfs2_holder
1421  *
1422  * Returns: errno
1423  */
1424
1425 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1426                       const struct gfs2_glock_operations *glops,
1427                       unsigned int state, int flags, struct gfs2_holder *gh)
1428 {
1429         struct gfs2_glock *gl;
1430         int error;
1431
1432         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1433         if (!error) {
1434                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1435                 gfs2_glock_put(gl);
1436         }
1437
1438         return error;
1439 }
1440
1441 /**
1442  * glock_compare - Compare two struct gfs2_glock structures for sorting
1443  * @arg_a: the first structure
1444  * @arg_b: the second structure
1445  *
1446  */
1447
1448 static int glock_compare(const void *arg_a, const void *arg_b)
1449 {
1450         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1451         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1452         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1453         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1454
1455         if (a->ln_number > b->ln_number)
1456                 return 1;
1457         if (a->ln_number < b->ln_number)
1458                 return -1;
1459         if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
1460                 return 1;
1461         if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL))
1462                 return 1;
1463         return 0;
1464 }
1465
1466 /**
1467  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1468  * @num_gh: the number of structures
1469  * @ghs: an array of struct gfs2_holder structures
1470  *
1471  * Returns: 0 on success (all glocks acquired),
1472  *          errno on failure (no glocks acquired)
1473  */
1474
1475 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1476                      struct gfs2_holder **p)
1477 {
1478         unsigned int x;
1479         int error = 0;
1480
1481         for (x = 0; x < num_gh; x++)
1482                 p[x] = &ghs[x];
1483
1484         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1485
1486         for (x = 0; x < num_gh; x++) {
1487                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1488
1489                 error = gfs2_glock_nq(p[x]);
1490                 if (error) {
1491                         while (x--)
1492                                 gfs2_glock_dq(p[x]);
1493                         break;
1494                 }
1495         }
1496
1497         return error;
1498 }
1499
1500 /**
1501  * gfs2_glock_nq_m - acquire multiple glocks
1502  * @num_gh: the number of structures
1503  * @ghs: an array of struct gfs2_holder structures
1504  *
1505  * Figure out how big an impact this function has.  Either:
1506  * 1) Replace this code with code that calls gfs2_glock_prefetch()
1507  * 2) Forget async stuff and just call nq_m_sync()
1508  * 3) Leave it like it is
1509  *
1510  * Returns: 0 on success (all glocks acquired),
1511  *          errno on failure (no glocks acquired)
1512  */
1513
1514 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1515 {
1516         int *e;
1517         unsigned int x;
1518         int borked = 0, serious = 0;
1519         int error = 0;
1520
1521         if (!num_gh)
1522                 return 0;
1523
1524         if (num_gh == 1) {
1525                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1526                 return gfs2_glock_nq(ghs);
1527         }
1528
1529         e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1530         if (!e)
1531                 return -ENOMEM;
1532
1533         for (x = 0; x < num_gh; x++) {
1534                 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1535                 error = gfs2_glock_nq(&ghs[x]);
1536                 if (error) {
1537                         borked = 1;
1538                         serious = error;
1539                         num_gh = x;
1540                         break;
1541                 }
1542         }
1543
1544         for (x = 0; x < num_gh; x++) {
1545                 error = e[x] = glock_wait_internal(&ghs[x]);
1546                 if (error) {
1547                         borked = 1;
1548                         if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1549                                 serious = error;
1550                 }
1551         }
1552
1553         if (!borked) {
1554                 kfree(e);
1555                 return 0;
1556         }
1557
1558         for (x = 0; x < num_gh; x++)
1559                 if (!e[x])
1560                         gfs2_glock_dq(&ghs[x]);
1561
1562         if (serious)
1563                 error = serious;
1564         else {
1565                 for (x = 0; x < num_gh; x++)
1566                         gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1567                                           &ghs[x]);
1568                 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1569         }
1570
1571         kfree(e);
1572
1573         return error;
1574 }
1575
1576 /**
1577  * gfs2_glock_dq_m - release multiple glocks
1578  * @num_gh: the number of structures
1579  * @ghs: an array of struct gfs2_holder structures
1580  *
1581  */
1582
1583 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1584 {
1585         unsigned int x;
1586
1587         for (x = 0; x < num_gh; x++)
1588                 gfs2_glock_dq(&ghs[x]);
1589 }
1590
1591 /**
1592  * gfs2_glock_dq_uninit_m - release multiple glocks
1593  * @num_gh: the number of structures
1594  * @ghs: an array of struct gfs2_holder structures
1595  *
1596  */
1597
1598 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1599 {
1600         unsigned int x;
1601
1602         for (x = 0; x < num_gh; x++)
1603                 gfs2_glock_dq_uninit(&ghs[x]);
1604 }
1605
1606 /**
1607  * gfs2_lvb_hold - attach a LVB from a glock
1608  * @gl: The glock in question
1609  *
1610  */
1611
1612 int gfs2_lvb_hold(struct gfs2_glock *gl)
1613 {
1614         int error;
1615
1616         gfs2_glmutex_lock(gl);
1617
1618         if (!atomic_read(&gl->gl_lvb_count)) {
1619                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1620                 if (error) {
1621                         gfs2_glmutex_unlock(gl);
1622                         return error;
1623                 }
1624                 gfs2_glock_hold(gl);
1625         }
1626         atomic_inc(&gl->gl_lvb_count);
1627
1628         gfs2_glmutex_unlock(gl);
1629
1630         return 0;
1631 }
1632
1633 /**
1634  * gfs2_lvb_unhold - detach a LVB from a glock
1635  * @gl: The glock in question
1636  *
1637  */
1638
1639 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1640 {
1641         gfs2_glock_hold(gl);
1642         gfs2_glmutex_lock(gl);
1643
1644         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1645         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1646                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1647                 gl->gl_lvb = NULL;
1648                 gfs2_glock_put(gl);
1649         }
1650
1651         gfs2_glmutex_unlock(gl);
1652         gfs2_glock_put(gl);
1653 }
1654
1655 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1656                         unsigned int state)
1657 {
1658         struct gfs2_glock *gl;
1659
1660         gl = gfs2_glock_find(sdp, name);
1661         if (!gl)
1662                 return;
1663
1664         if (gl->gl_ops->go_callback)
1665                 gl->gl_ops->go_callback(gl, state);
1666         handle_callback(gl, state);
1667
1668         spin_lock(&gl->gl_spin);
1669         run_queue(gl);
1670         spin_unlock(&gl->gl_spin);
1671
1672         gfs2_glock_put(gl);
1673 }
1674
1675 /**
1676  * gfs2_glock_cb - Callback used by locking module
1677  * @sdp: Pointer to the superblock
1678  * @type: Type of callback
1679  * @data: Type dependent data pointer
1680  *
1681  * Called by the locking module when it wants to tell us something.
1682  * Either we need to drop a lock, one of our ASYNC requests completed, or
1683  * a journal from another client needs to be recovered.
1684  */
1685
1686 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1687 {
1688         struct gfs2_sbd *sdp = cb_data;
1689
1690         switch (type) {
1691         case LM_CB_NEED_E:
1692                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1693                 return;
1694
1695         case LM_CB_NEED_D:
1696                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1697                 return;
1698
1699         case LM_CB_NEED_S:
1700                 blocking_cb(sdp, data, LM_ST_SHARED);
1701                 return;
1702
1703         case LM_CB_ASYNC: {
1704                 struct lm_async_cb *async = data;
1705                 struct gfs2_glock *gl;
1706
1707                 gl = gfs2_glock_find(sdp, &async->lc_name);
1708                 if (gfs2_assert_warn(sdp, gl))
1709                         return;
1710                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1711                         gl->gl_req_bh(gl, async->lc_ret);
1712                 gfs2_glock_put(gl);
1713                 return;
1714         }
1715
1716         case LM_CB_NEED_RECOVERY:
1717                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1718                 if (sdp->sd_recoverd_process)
1719                         wake_up_process(sdp->sd_recoverd_process);
1720                 return;
1721
1722         case LM_CB_DROPLOCKS:
1723                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1724                 gfs2_quota_scan(sdp);
1725                 return;
1726
1727         default:
1728                 gfs2_assert_warn(sdp, 0);
1729                 return;
1730         }
1731 }
1732
1733 /**
1734  * demote_ok - Check to see if it's ok to unlock a glock
1735  * @gl: the glock
1736  *
1737  * Returns: 1 if it's ok
1738  */
1739
1740 static int demote_ok(struct gfs2_glock *gl)
1741 {
1742         const struct gfs2_glock_operations *glops = gl->gl_ops;
1743         int demote = 1;
1744
1745         if (test_bit(GLF_STICKY, &gl->gl_flags))
1746                 demote = 0;
1747         else if (glops->go_demote_ok)
1748                 demote = glops->go_demote_ok(gl);
1749
1750         return demote;
1751 }
1752
1753 /**
1754  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1755  * @gl: the glock
1756  *
1757  */
1758
1759 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1760 {
1761         struct gfs2_sbd *sdp = gl->gl_sbd;
1762
1763         spin_lock(&sdp->sd_reclaim_lock);
1764         if (list_empty(&gl->gl_reclaim)) {
1765                 gfs2_glock_hold(gl);
1766                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1767                 atomic_inc(&sdp->sd_reclaim_count);
1768         }
1769         spin_unlock(&sdp->sd_reclaim_lock);
1770
1771         wake_up(&sdp->sd_reclaim_wq);
1772 }
1773
1774 /**
1775  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1776  * @sdp: the filesystem
1777  *
1778  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1779  * different glock and we notice that there are a lot of glocks in the
1780  * reclaim list.
1781  *
1782  */
1783
1784 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1785 {
1786         struct gfs2_glock *gl;
1787
1788         spin_lock(&sdp->sd_reclaim_lock);
1789         if (list_empty(&sdp->sd_reclaim_list)) {
1790                 spin_unlock(&sdp->sd_reclaim_lock);
1791                 return;
1792         }
1793         gl = list_entry(sdp->sd_reclaim_list.next,
1794                         struct gfs2_glock, gl_reclaim);
1795         list_del_init(&gl->gl_reclaim);
1796         spin_unlock(&sdp->sd_reclaim_lock);
1797
1798         atomic_dec(&sdp->sd_reclaim_count);
1799         atomic_inc(&sdp->sd_reclaimed);
1800
1801         if (gfs2_glmutex_trylock(gl)) {
1802                 if (queue_empty(gl, &gl->gl_holders) &&
1803                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1804                         handle_callback(gl, LM_ST_UNLOCKED);
1805                 gfs2_glmutex_unlock(gl);
1806         }
1807
1808         gfs2_glock_put(gl);
1809 }
1810
1811 /**
1812  * examine_bucket - Call a function for glock in a hash bucket
1813  * @examiner: the function
1814  * @sdp: the filesystem
1815  * @bucket: the bucket
1816  *
1817  * Returns: 1 if the bucket has entries
1818  */
1819
1820 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1821                           unsigned int hash)
1822 {
1823         struct gfs2_glock *gl, *prev = NULL;
1824         int has_entries = 0;
1825         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1826
1827         read_lock(gl_lock_addr(hash));
1828         /* Can't use hlist_for_each_entry - don't want prefetch here */
1829         if (hlist_empty(head))
1830                 goto out;
1831         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1832         while(1) {
1833                 if (gl->gl_sbd == sdp) {
1834                         gfs2_glock_hold(gl);
1835                         read_unlock(gl_lock_addr(hash));
1836                         if (prev)
1837                                 gfs2_glock_put(prev);
1838                         prev = gl;
1839                         examiner(gl);
1840                         has_entries = 1;
1841                         read_lock(gl_lock_addr(hash));
1842                 }
1843                 if (gl->gl_list.next == NULL)
1844                         break;
1845                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1846         }
1847 out:
1848         read_unlock(gl_lock_addr(hash));
1849         if (prev)
1850                 gfs2_glock_put(prev);
1851         return has_entries;
1852 }
1853
1854 /**
1855  * scan_glock - look at a glock and see if we can reclaim it
1856  * @gl: the glock to look at
1857  *
1858  */
1859
1860 static void scan_glock(struct gfs2_glock *gl)
1861 {
1862         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1863                 return;
1864
1865         if (gfs2_glmutex_trylock(gl)) {
1866                 if (queue_empty(gl, &gl->gl_holders) &&
1867                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1868                         goto out_schedule;
1869                 gfs2_glmutex_unlock(gl);
1870         }
1871         return;
1872
1873 out_schedule:
1874         gfs2_glmutex_unlock(gl);
1875         gfs2_glock_schedule_for_reclaim(gl);
1876 }
1877
1878 /**
1879  * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1880  * @sdp: the filesystem
1881  *
1882  */
1883
1884 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1885 {
1886         unsigned int x;
1887
1888         for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1889                 examine_bucket(scan_glock, sdp, x);
1890 }
1891
1892 /**
1893  * clear_glock - look at a glock and see if we can free it from glock cache
1894  * @gl: the glock to look at
1895  *
1896  */
1897
1898 static void clear_glock(struct gfs2_glock *gl)
1899 {
1900         struct gfs2_sbd *sdp = gl->gl_sbd;
1901         int released;
1902
1903         spin_lock(&sdp->sd_reclaim_lock);
1904         if (!list_empty(&gl->gl_reclaim)) {
1905                 list_del_init(&gl->gl_reclaim);
1906                 atomic_dec(&sdp->sd_reclaim_count);
1907                 spin_unlock(&sdp->sd_reclaim_lock);
1908                 released = gfs2_glock_put(gl);
1909                 gfs2_assert(sdp, !released);
1910         } else {
1911                 spin_unlock(&sdp->sd_reclaim_lock);
1912         }
1913
1914         if (gfs2_glmutex_trylock(gl)) {
1915                 if (queue_empty(gl, &gl->gl_holders) &&
1916                     gl->gl_state != LM_ST_UNLOCKED)
1917                         handle_callback(gl, LM_ST_UNLOCKED);
1918                 gfs2_glmutex_unlock(gl);
1919         }
1920 }
1921
1922 /**
1923  * gfs2_gl_hash_clear - Empty out the glock hash table
1924  * @sdp: the filesystem
1925  * @wait: wait until it's all gone
1926  *
1927  * Called when unmounting the filesystem, or when inter-node lock manager
1928  * requests DROPLOCKS because it is running out of capacity.
1929  */
1930
1931 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1932 {
1933         unsigned long t;
1934         unsigned int x;
1935         int cont;
1936
1937         t = jiffies;
1938
1939         for (;;) {
1940                 cont = 0;
1941                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1942                         if (examine_bucket(clear_glock, sdp, x))
1943                                 cont = 1;
1944                 }
1945
1946                 if (!wait || !cont)
1947                         break;
1948
1949                 if (time_after_eq(jiffies,
1950                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1951                         fs_warn(sdp, "Unmount seems to be stalled. "
1952                                      "Dumping lock state...\n");
1953                         gfs2_dump_lockstate(sdp);
1954                         t = jiffies;
1955                 }
1956
1957                 invalidate_inodes(sdp->sd_vfs);
1958                 msleep(10);
1959         }
1960 }
1961
1962 /*
1963  *  Diagnostic routines to help debug distributed deadlock
1964  */
1965
1966 /**
1967  * dump_holder - print information about a glock holder
1968  * @str: a string naming the type of holder
1969  * @gh: the glock holder
1970  *
1971  * Returns: 0 on success, -ENOBUFS when we run out of space
1972  */
1973
1974 static int dump_holder(char *str, struct gfs2_holder *gh)
1975 {
1976         unsigned int x;
1977         int error = -ENOBUFS;
1978
1979         printk(KERN_INFO "  %s\n", str);
1980         printk(KERN_INFO "    owner = %ld\n",
1981                    (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
1982         printk(KERN_INFO "    gh_state = %u\n", gh->gh_state);
1983         printk(KERN_INFO "    gh_flags =");
1984         for (x = 0; x < 32; x++)
1985                 if (gh->gh_flags & (1 << x))
1986                         printk(" %u", x);
1987         printk(" \n");
1988         printk(KERN_INFO "    error = %d\n", gh->gh_error);
1989         printk(KERN_INFO "    gh_iflags =");
1990         for (x = 0; x < 32; x++)
1991                 if (test_bit(x, &gh->gh_iflags))
1992                         printk(" %u", x);
1993         printk(" \n");
1994         print_symbol(KERN_INFO "    initialized at: %s\n", gh->gh_ip);
1995
1996         error = 0;
1997
1998         return error;
1999 }
2000
2001 /**
2002  * dump_inode - print information about an inode
2003  * @ip: the inode
2004  *
2005  * Returns: 0 on success, -ENOBUFS when we run out of space
2006  */
2007
2008 static int dump_inode(struct gfs2_inode *ip)
2009 {
2010         unsigned int x;
2011         int error = -ENOBUFS;
2012
2013         printk(KERN_INFO "  Inode:\n");
2014         printk(KERN_INFO "    num = %llu %llu\n",
2015                     (unsigned long long)ip->i_num.no_formal_ino,
2016                     (unsigned long long)ip->i_num.no_addr);
2017         printk(KERN_INFO "    type = %u\n", IF2DT(ip->i_inode.i_mode));
2018         printk(KERN_INFO "    i_flags =");
2019         for (x = 0; x < 32; x++)
2020                 if (test_bit(x, &ip->i_flags))
2021                         printk(" %u", x);
2022         printk(" \n");
2023
2024         error = 0;
2025
2026         return error;
2027 }
2028
2029 /**
2030  * dump_glock - print information about a glock
2031  * @gl: the glock
2032  * @count: where we are in the buffer
2033  *
2034  * Returns: 0 on success, -ENOBUFS when we run out of space
2035  */
2036
2037 static int dump_glock(struct gfs2_glock *gl)
2038 {
2039         struct gfs2_holder *gh;
2040         unsigned int x;
2041         int error = -ENOBUFS;
2042
2043         spin_lock(&gl->gl_spin);
2044
2045         printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
2046                (unsigned long long)gl->gl_name.ln_number);
2047         printk(KERN_INFO "  gl_flags =");
2048         for (x = 0; x < 32; x++) {
2049                 if (test_bit(x, &gl->gl_flags))
2050                         printk(" %u", x);
2051         }
2052         printk(" \n");
2053         printk(KERN_INFO "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
2054         printk(KERN_INFO "  gl_state = %u\n", gl->gl_state);
2055         printk(KERN_INFO "  gl_owner = %s\n", gl->gl_owner->comm);
2056         print_symbol(KERN_INFO "  gl_ip = %s\n", gl->gl_ip);
2057         printk(KERN_INFO "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2058         printk(KERN_INFO "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2059         printk(KERN_INFO "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2060         printk(KERN_INFO "  object = %s\n", (gl->gl_object) ? "yes" : "no");
2061         printk(KERN_INFO "  le = %s\n",
2062                    (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2063         printk(KERN_INFO "  reclaim = %s\n",
2064                     (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2065         if (gl->gl_aspace)
2066                 printk(KERN_INFO "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
2067                        gl->gl_aspace->i_mapping->nrpages);
2068         else
2069                 printk(KERN_INFO "  aspace = no\n");
2070         printk(KERN_INFO "  ail = %d\n", atomic_read(&gl->gl_ail_count));
2071         if (gl->gl_req_gh) {
2072                 error = dump_holder("Request", gl->gl_req_gh);
2073                 if (error)
2074                         goto out;
2075         }
2076         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2077                 error = dump_holder("Holder", gh);
2078                 if (error)
2079                         goto out;
2080         }
2081         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2082                 error = dump_holder("Waiter1", gh);
2083                 if (error)
2084                         goto out;
2085         }
2086         list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2087                 error = dump_holder("Waiter2", gh);
2088                 if (error)
2089                         goto out;
2090         }
2091         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2092                 error = dump_holder("Waiter3", gh);
2093                 if (error)
2094                         goto out;
2095         }
2096         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
2097                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2098                     list_empty(&gl->gl_holders)) {
2099                         error = dump_inode(gl->gl_object);
2100                         if (error)
2101                                 goto out;
2102                 } else {
2103                         error = -ENOBUFS;
2104                         printk(KERN_INFO "  Inode: busy\n");
2105                 }
2106         }
2107
2108         error = 0;
2109
2110 out:
2111         spin_unlock(&gl->gl_spin);
2112         return error;
2113 }
2114
2115 /**
2116  * gfs2_dump_lockstate - print out the current lockstate
2117  * @sdp: the filesystem
2118  * @ub: the buffer to copy the information into
2119  *
2120  * If @ub is NULL, dump the lockstate to the console.
2121  *
2122  */
2123
2124 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2125 {
2126         struct gfs2_glock *gl;
2127         struct hlist_node *h;
2128         unsigned int x;
2129         int error = 0;
2130
2131         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2132
2133                 read_lock(gl_lock_addr(x));
2134
2135                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
2136                         if (gl->gl_sbd != sdp)
2137                                 continue;
2138
2139                         error = dump_glock(gl);
2140                         if (error)
2141                                 break;
2142                 }
2143
2144                 read_unlock(gl_lock_addr(x));
2145
2146                 if (error)
2147                         break;
2148         }
2149
2150
2151         return error;
2152 }
2153
2154 int __init gfs2_glock_init(void)
2155 {
2156         unsigned i;
2157         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2158                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2159         }
2160 #ifdef GL_HASH_LOCK_SZ
2161         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2162                 rwlock_init(&gl_hash_locks[i]);
2163         }
2164 #endif
2165         return 0;
2166 }
2167