[GFS2] Don't add glocks to the journal
[safe/jmp/linux-2.6] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lm.h"
39 #include "lops.h"
40 #include "meta_io.h"
41 #include "quota.h"
42 #include "super.h"
43 #include "util.h"
44
45 struct gfs2_gl_hash_bucket {
46         struct hlist_head hb_list;
47 };
48
49 struct glock_iter {
50         int hash;                     /* hash bucket index         */
51         struct gfs2_sbd *sdp;         /* incore superblock         */
52         struct gfs2_glock *gl;        /* current glock struct      */
53         struct seq_file *seq;         /* sequence file for debugfs */
54         char string[512];             /* scratch space             */
55 };
56
57 typedef void (*glock_examiner) (struct gfs2_glock * gl);
58
59 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
60 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
61 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
62 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
63 static void run_queue(struct gfs2_glock *gl);
64
65 static DECLARE_RWSEM(gfs2_umount_flush_sem);
66 static struct dentry *gfs2_root;
67 static struct task_struct *scand_process;
68 static unsigned int scand_secs = 5;
69 static struct workqueue_struct *glock_workqueue;
70
71 #define GFS2_GL_HASH_SHIFT      15
72 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
73 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
74
75 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
76 static struct dentry *gfs2_root;
77
78 /*
79  * Despite what you might think, the numbers below are not arbitrary :-)
80  * They are taken from the ipv4 routing hash code, which is well tested
81  * and thus should be nearly optimal. Later on we might tweek the numbers
82  * but for now this should be fine.
83  *
84  * The reason for putting the locks in a separate array from the list heads
85  * is that we can have fewer locks than list heads and save memory. We use
86  * the same hash function for both, but with a different hash mask.
87  */
88 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
89         defined(CONFIG_PROVE_LOCKING)
90
91 #ifdef CONFIG_LOCKDEP
92 # define GL_HASH_LOCK_SZ        256
93 #else
94 # if NR_CPUS >= 32
95 #  define GL_HASH_LOCK_SZ       4096
96 # elif NR_CPUS >= 16
97 #  define GL_HASH_LOCK_SZ       2048
98 # elif NR_CPUS >= 8
99 #  define GL_HASH_LOCK_SZ       1024
100 # elif NR_CPUS >= 4
101 #  define GL_HASH_LOCK_SZ       512
102 # else
103 #  define GL_HASH_LOCK_SZ       256
104 # endif
105 #endif
106
107 /* We never want more locks than chains */
108 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
109 # undef GL_HASH_LOCK_SZ
110 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
111 #endif
112
113 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
114
115 static inline rwlock_t *gl_lock_addr(unsigned int x)
116 {
117         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
118 }
119 #else /* not SMP, so no spinlocks required */
120 static inline rwlock_t *gl_lock_addr(unsigned int x)
121 {
122         return NULL;
123 }
124 #endif
125
126 /**
127  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
128  * @actual: the current state of the lock
129  * @requested: the lock state that was requested by the caller
130  * @flags: the modifier flags passed in by the caller
131  *
132  * Returns: 1 if the locks are compatible, 0 otherwise
133  */
134
135 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
136                                    int flags)
137 {
138         if (actual == requested)
139                 return 1;
140
141         if (flags & GL_EXACT)
142                 return 0;
143
144         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
145                 return 1;
146
147         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
148                 return 1;
149
150         return 0;
151 }
152
153 /**
154  * gl_hash() - Turn glock number into hash bucket number
155  * @lock: The glock number
156  *
157  * Returns: The number of the corresponding hash bucket
158  */
159
160 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
161                             const struct lm_lockname *name)
162 {
163         unsigned int h;
164
165         h = jhash(&name->ln_number, sizeof(u64), 0);
166         h = jhash(&name->ln_type, sizeof(unsigned int), h);
167         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
168         h &= GFS2_GL_HASH_MASK;
169
170         return h;
171 }
172
173 /**
174  * glock_free() - Perform a few checks and then release struct gfs2_glock
175  * @gl: The glock to release
176  *
177  * Also calls lock module to release its internal structure for this glock.
178  *
179  */
180
181 static void glock_free(struct gfs2_glock *gl)
182 {
183         struct gfs2_sbd *sdp = gl->gl_sbd;
184         struct inode *aspace = gl->gl_aspace;
185
186         gfs2_lm_put_lock(sdp, gl->gl_lock);
187
188         if (aspace)
189                 gfs2_aspace_put(aspace);
190
191         kmem_cache_free(gfs2_glock_cachep, gl);
192 }
193
194 /**
195  * gfs2_glock_hold() - increment reference count on glock
196  * @gl: The glock to hold
197  *
198  */
199
200 void gfs2_glock_hold(struct gfs2_glock *gl)
201 {
202         atomic_inc(&gl->gl_ref);
203 }
204
205 /**
206  * gfs2_glock_put() - Decrement reference count on glock
207  * @gl: The glock to put
208  *
209  */
210
211 int gfs2_glock_put(struct gfs2_glock *gl)
212 {
213         int rv = 0;
214         struct gfs2_sbd *sdp = gl->gl_sbd;
215
216         write_lock(gl_lock_addr(gl->gl_hash));
217         if (atomic_dec_and_test(&gl->gl_ref)) {
218                 hlist_del(&gl->gl_list);
219                 write_unlock(gl_lock_addr(gl->gl_hash));
220                 BUG_ON(spin_is_locked(&gl->gl_spin));
221                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
222                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
223                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
224                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
225                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
226                 glock_free(gl);
227                 rv = 1;
228                 goto out;
229         }
230         write_unlock(gl_lock_addr(gl->gl_hash));
231 out:
232         return rv;
233 }
234
235 /**
236  * search_bucket() - Find struct gfs2_glock by lock number
237  * @bucket: the bucket to search
238  * @name: The lock name
239  *
240  * Returns: NULL, or the struct gfs2_glock with the requested number
241  */
242
243 static struct gfs2_glock *search_bucket(unsigned int hash,
244                                         const struct gfs2_sbd *sdp,
245                                         const struct lm_lockname *name)
246 {
247         struct gfs2_glock *gl;
248         struct hlist_node *h;
249
250         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
251                 if (!lm_name_equal(&gl->gl_name, name))
252                         continue;
253                 if (gl->gl_sbd != sdp)
254                         continue;
255
256                 atomic_inc(&gl->gl_ref);
257
258                 return gl;
259         }
260
261         return NULL;
262 }
263
264 /**
265  * gfs2_glock_find() - Find glock by lock number
266  * @sdp: The GFS2 superblock
267  * @name: The lock name
268  *
269  * Returns: NULL, or the struct gfs2_glock with the requested number
270  */
271
272 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
273                                           const struct lm_lockname *name)
274 {
275         unsigned int hash = gl_hash(sdp, name);
276         struct gfs2_glock *gl;
277
278         read_lock(gl_lock_addr(hash));
279         gl = search_bucket(hash, sdp, name);
280         read_unlock(gl_lock_addr(hash));
281
282         return gl;
283 }
284
285 static void glock_work_func(struct work_struct *work)
286 {
287         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
288
289         spin_lock(&gl->gl_spin);
290         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
291                 set_bit(GLF_DEMOTE, &gl->gl_flags);
292         run_queue(gl);
293         spin_unlock(&gl->gl_spin);
294         gfs2_glock_put(gl);
295 }
296
297 /**
298  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
299  * @sdp: The GFS2 superblock
300  * @number: the lock number
301  * @glops: The glock_operations to use
302  * @create: If 0, don't create the glock if it doesn't exist
303  * @glp: the glock is returned here
304  *
305  * This does not lock a glock, just finds/creates structures for one.
306  *
307  * Returns: errno
308  */
309
310 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
311                    const struct gfs2_glock_operations *glops, int create,
312                    struct gfs2_glock **glp)
313 {
314         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
315         struct gfs2_glock *gl, *tmp;
316         unsigned int hash = gl_hash(sdp, &name);
317         int error;
318
319         read_lock(gl_lock_addr(hash));
320         gl = search_bucket(hash, sdp, &name);
321         read_unlock(gl_lock_addr(hash));
322
323         if (gl || !create) {
324                 *glp = gl;
325                 return 0;
326         }
327
328         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
329         if (!gl)
330                 return -ENOMEM;
331
332         gl->gl_flags = 0;
333         gl->gl_name = name;
334         atomic_set(&gl->gl_ref, 1);
335         gl->gl_state = LM_ST_UNLOCKED;
336         gl->gl_demote_state = LM_ST_EXCLUSIVE;
337         gl->gl_hash = hash;
338         gl->gl_owner_pid = 0;
339         gl->gl_ip = 0;
340         gl->gl_ops = glops;
341         gl->gl_req_gh = NULL;
342         gl->gl_req_bh = NULL;
343         gl->gl_vn = 0;
344         gl->gl_stamp = jiffies;
345         gl->gl_tchange = jiffies;
346         gl->gl_object = NULL;
347         gl->gl_sbd = sdp;
348         gl->gl_aspace = NULL;
349         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
350
351         /* If this glock protects actual on-disk data or metadata blocks,
352            create a VFS inode to manage the pages/buffers holding them. */
353         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
354                 gl->gl_aspace = gfs2_aspace_get(sdp);
355                 if (!gl->gl_aspace) {
356                         error = -ENOMEM;
357                         goto fail;
358                 }
359         }
360
361         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
362         if (error)
363                 goto fail_aspace;
364
365         write_lock(gl_lock_addr(hash));
366         tmp = search_bucket(hash, sdp, &name);
367         if (tmp) {
368                 write_unlock(gl_lock_addr(hash));
369                 glock_free(gl);
370                 gl = tmp;
371         } else {
372                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
373                 write_unlock(gl_lock_addr(hash));
374         }
375
376         *glp = gl;
377
378         return 0;
379
380 fail_aspace:
381         if (gl->gl_aspace)
382                 gfs2_aspace_put(gl->gl_aspace);
383 fail:
384         kmem_cache_free(gfs2_glock_cachep, gl);
385         return error;
386 }
387
388 /**
389  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
390  * @gl: the glock
391  * @state: the state we're requesting
392  * @flags: the modifier flags
393  * @gh: the holder structure
394  *
395  */
396
397 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
398                       struct gfs2_holder *gh)
399 {
400         INIT_LIST_HEAD(&gh->gh_list);
401         gh->gh_gl = gl;
402         gh->gh_ip = (unsigned long)__builtin_return_address(0);
403         gh->gh_owner_pid = current->pid;
404         gh->gh_state = state;
405         gh->gh_flags = flags;
406         gh->gh_error = 0;
407         gh->gh_iflags = 0;
408         gfs2_glock_hold(gl);
409 }
410
411 /**
412  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
413  * @state: the state we're requesting
414  * @flags: the modifier flags
415  * @gh: the holder structure
416  *
417  * Don't mess with the glock.
418  *
419  */
420
421 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
422 {
423         gh->gh_state = state;
424         gh->gh_flags = flags;
425         gh->gh_iflags = 0;
426         gh->gh_ip = (unsigned long)__builtin_return_address(0);
427 }
428
429 /**
430  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
431  * @gh: the holder structure
432  *
433  */
434
435 void gfs2_holder_uninit(struct gfs2_holder *gh)
436 {
437         gfs2_glock_put(gh->gh_gl);
438         gh->gh_gl = NULL;
439         gh->gh_ip = 0;
440 }
441
442 static void gfs2_holder_wake(struct gfs2_holder *gh)
443 {
444         clear_bit(HIF_WAIT, &gh->gh_iflags);
445         smp_mb__after_clear_bit();
446         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
447 }
448
449 static int just_schedule(void *word)
450 {
451         schedule();
452         return 0;
453 }
454
455 static void wait_on_holder(struct gfs2_holder *gh)
456 {
457         might_sleep();
458         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
459 }
460
461 static void gfs2_demote_wake(struct gfs2_glock *gl)
462 {
463         BUG_ON(!spin_is_locked(&gl->gl_spin));
464         gl->gl_demote_state = LM_ST_EXCLUSIVE;
465         clear_bit(GLF_DEMOTE, &gl->gl_flags);
466         smp_mb__after_clear_bit();
467         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
468 }
469
470 static void wait_on_demote(struct gfs2_glock *gl)
471 {
472         might_sleep();
473         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
474 }
475
476 /**
477  * rq_mutex - process a mutex request in the queue
478  * @gh: the glock holder
479  *
480  * Returns: 1 if the queue is blocked
481  */
482
483 static int rq_mutex(struct gfs2_holder *gh)
484 {
485         struct gfs2_glock *gl = gh->gh_gl;
486
487         list_del_init(&gh->gh_list);
488         /*  gh->gh_error never examined.  */
489         set_bit(GLF_LOCK, &gl->gl_flags);
490         clear_bit(HIF_WAIT, &gh->gh_iflags);
491         smp_mb();
492         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
493
494         return 1;
495 }
496
497 /**
498  * rq_promote - process a promote request in the queue
499  * @gh: the glock holder
500  *
501  * Acquire a new inter-node lock, or change a lock state to more restrictive.
502  *
503  * Returns: 1 if the queue is blocked
504  */
505
506 static int rq_promote(struct gfs2_holder *gh)
507 {
508         struct gfs2_glock *gl = gh->gh_gl;
509
510         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
511                 if (list_empty(&gl->gl_holders)) {
512                         gl->gl_req_gh = gh;
513                         set_bit(GLF_LOCK, &gl->gl_flags);
514                         spin_unlock(&gl->gl_spin);
515                         gfs2_glock_xmote_th(gh->gh_gl, gh);
516                         spin_lock(&gl->gl_spin);
517                 }
518                 return 1;
519         }
520
521         if (list_empty(&gl->gl_holders)) {
522                 set_bit(HIF_FIRST, &gh->gh_iflags);
523                 set_bit(GLF_LOCK, &gl->gl_flags);
524         } else {
525                 struct gfs2_holder *next_gh;
526                 if (gh->gh_state == LM_ST_EXCLUSIVE)
527                         return 1;
528                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
529                                      gh_list);
530                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
531                          return 1;
532         }
533
534         list_move_tail(&gh->gh_list, &gl->gl_holders);
535         gh->gh_error = 0;
536         set_bit(HIF_HOLDER, &gh->gh_iflags);
537
538         gfs2_holder_wake(gh);
539
540         return 0;
541 }
542
543 /**
544  * rq_demote - process a demote request in the queue
545  * @gh: the glock holder
546  *
547  * Returns: 1 if the queue is blocked
548  */
549
550 static int rq_demote(struct gfs2_glock *gl)
551 {
552         if (!list_empty(&gl->gl_holders))
553                 return 1;
554
555         if (gl->gl_state == gl->gl_demote_state ||
556             gl->gl_state == LM_ST_UNLOCKED) {
557                 gfs2_demote_wake(gl);
558                 return 0;
559         }
560
561         set_bit(GLF_LOCK, &gl->gl_flags);
562         set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
563
564         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
565             gl->gl_state != LM_ST_EXCLUSIVE) {
566                 spin_unlock(&gl->gl_spin);
567                 gfs2_glock_drop_th(gl);
568         } else {
569                 spin_unlock(&gl->gl_spin);
570                 gfs2_glock_xmote_th(gl, NULL);
571         }
572
573         spin_lock(&gl->gl_spin);
574         clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
575
576         return 0;
577 }
578
579 /**
580  * run_queue - process holder structures on a glock
581  * @gl: the glock
582  *
583  */
584 static void run_queue(struct gfs2_glock *gl)
585 {
586         struct gfs2_holder *gh;
587         int blocked = 1;
588
589         for (;;) {
590                 if (test_bit(GLF_LOCK, &gl->gl_flags))
591                         break;
592
593                 if (!list_empty(&gl->gl_waiters1)) {
594                         gh = list_entry(gl->gl_waiters1.next,
595                                         struct gfs2_holder, gh_list);
596                         blocked = rq_mutex(gh);
597                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
598                         blocked = rq_demote(gl);
599                         if (gl->gl_waiters2 && !blocked) {
600                                 set_bit(GLF_DEMOTE, &gl->gl_flags);
601                                 gl->gl_demote_state = LM_ST_UNLOCKED;
602                         }
603                         gl->gl_waiters2 = 0;
604                 } else if (!list_empty(&gl->gl_waiters3)) {
605                         gh = list_entry(gl->gl_waiters3.next,
606                                         struct gfs2_holder, gh_list);
607                         blocked = rq_promote(gh);
608                 } else
609                         break;
610
611                 if (blocked)
612                         break;
613         }
614 }
615
616 /**
617  * gfs2_glmutex_lock - acquire a local lock on a glock
618  * @gl: the glock
619  *
620  * Gives caller exclusive access to manipulate a glock structure.
621  */
622
623 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
624 {
625         struct gfs2_holder gh;
626
627         gfs2_holder_init(gl, 0, 0, &gh);
628         if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
629                 BUG();
630
631         spin_lock(&gl->gl_spin);
632         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
633                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
634         } else {
635                 gl->gl_owner_pid = current->pid;
636                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
637                 clear_bit(HIF_WAIT, &gh.gh_iflags);
638                 smp_mb();
639                 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
640         }
641         spin_unlock(&gl->gl_spin);
642
643         wait_on_holder(&gh);
644         gfs2_holder_uninit(&gh);
645 }
646
647 /**
648  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
649  * @gl: the glock
650  *
651  * Returns: 1 if the glock is acquired
652  */
653
654 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
655 {
656         int acquired = 1;
657
658         spin_lock(&gl->gl_spin);
659         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
660                 acquired = 0;
661         } else {
662                 gl->gl_owner_pid = current->pid;
663                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
664         }
665         spin_unlock(&gl->gl_spin);
666
667         return acquired;
668 }
669
670 /**
671  * gfs2_glmutex_unlock - release a local lock on a glock
672  * @gl: the glock
673  *
674  */
675
676 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
677 {
678         spin_lock(&gl->gl_spin);
679         clear_bit(GLF_LOCK, &gl->gl_flags);
680         gl->gl_owner_pid = 0;
681         gl->gl_ip = 0;
682         run_queue(gl);
683         BUG_ON(!spin_is_locked(&gl->gl_spin));
684         spin_unlock(&gl->gl_spin);
685 }
686
687 /**
688  * handle_callback - process a demote request
689  * @gl: the glock
690  * @state: the state the caller wants us to change to
691  *
692  * There are only two requests that we are going to see in actual
693  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
694  */
695
696 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
697                             int remote, unsigned long delay)
698 {
699         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
700
701         spin_lock(&gl->gl_spin);
702         set_bit(bit, &gl->gl_flags);
703         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
704                 gl->gl_demote_state = state;
705                 gl->gl_demote_time = jiffies;
706                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
707                     gl->gl_object) {
708                         gfs2_glock_schedule_for_reclaim(gl);
709                         spin_unlock(&gl->gl_spin);
710                         return;
711                 }
712         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
713                         gl->gl_demote_state != state) {
714                 if (test_bit(GLF_DEMOTE_IN_PROGRESS,  &gl->gl_flags)) 
715                         gl->gl_waiters2 = 1;
716                 else 
717                         gl->gl_demote_state = LM_ST_UNLOCKED;
718         }
719         spin_unlock(&gl->gl_spin);
720 }
721
722 /**
723  * state_change - record that the glock is now in a different state
724  * @gl: the glock
725  * @new_state the new state
726  *
727  */
728
729 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
730 {
731         int held1, held2;
732
733         held1 = (gl->gl_state != LM_ST_UNLOCKED);
734         held2 = (new_state != LM_ST_UNLOCKED);
735
736         if (held1 != held2) {
737                 if (held2)
738                         gfs2_glock_hold(gl);
739                 else
740                         gfs2_glock_put(gl);
741         }
742
743         gl->gl_state = new_state;
744         gl->gl_tchange = jiffies;
745 }
746
747 /**
748  * xmote_bh - Called after the lock module is done acquiring a lock
749  * @gl: The glock in question
750  * @ret: the int returned from the lock module
751  *
752  */
753
754 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
755 {
756         struct gfs2_sbd *sdp = gl->gl_sbd;
757         const struct gfs2_glock_operations *glops = gl->gl_ops;
758         struct gfs2_holder *gh = gl->gl_req_gh;
759         int prev_state = gl->gl_state;
760         int op_done = 1;
761
762         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
763         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
764         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
765
766         state_change(gl, ret & LM_OUT_ST_MASK);
767
768         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
769                 if (glops->go_inval)
770                         glops->go_inval(gl, DIO_METADATA);
771         } else if (gl->gl_state == LM_ST_DEFERRED) {
772                 /* We might not want to do this here.
773                    Look at moving to the inode glops. */
774                 if (glops->go_inval)
775                         glops->go_inval(gl, 0);
776         }
777
778         /*  Deal with each possible exit condition  */
779
780         if (!gh) {
781                 gl->gl_stamp = jiffies;
782                 if (ret & LM_OUT_CANCELED) {
783                         op_done = 0;
784                 } else {
785                         spin_lock(&gl->gl_spin);
786                         if (gl->gl_state != gl->gl_demote_state) {
787                                 gl->gl_req_bh = NULL;
788                                 spin_unlock(&gl->gl_spin);
789                                 gfs2_glock_drop_th(gl);
790                                 gfs2_glock_put(gl);
791                                 return;
792                         }
793                         gfs2_demote_wake(gl);
794                         spin_unlock(&gl->gl_spin);
795                 }
796         } else {
797                 spin_lock(&gl->gl_spin);
798                 list_del_init(&gh->gh_list);
799                 gh->gh_error = -EIO;
800                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
801                         goto out;
802                 gh->gh_error = GLR_CANCELED;
803                 if (ret & LM_OUT_CANCELED) 
804                         goto out;
805                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
806                         list_add_tail(&gh->gh_list, &gl->gl_holders);
807                         gh->gh_error = 0;
808                         set_bit(HIF_HOLDER, &gh->gh_iflags);
809                         set_bit(HIF_FIRST, &gh->gh_iflags);
810                         op_done = 0;
811                         goto out;
812                 }
813                 gh->gh_error = GLR_TRYFAILED;
814                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
815                         goto out;
816                 gh->gh_error = -EINVAL;
817                 if (gfs2_assert_withdraw(sdp, 0) == -1)
818                         fs_err(sdp, "ret = 0x%.8X\n", ret);
819 out:
820                 spin_unlock(&gl->gl_spin);
821         }
822
823         if (glops->go_xmote_bh)
824                 glops->go_xmote_bh(gl);
825
826         if (op_done) {
827                 spin_lock(&gl->gl_spin);
828                 gl->gl_req_gh = NULL;
829                 gl->gl_req_bh = NULL;
830                 clear_bit(GLF_LOCK, &gl->gl_flags);
831                 spin_unlock(&gl->gl_spin);
832         }
833
834         gfs2_glock_put(gl);
835
836         if (gh)
837                 gfs2_holder_wake(gh);
838 }
839
840 /**
841  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
842  * @gl: The glock in question
843  * @state: the requested state
844  * @flags: modifier flags to the lock call
845  *
846  */
847
848 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
849 {
850         struct gfs2_sbd *sdp = gl->gl_sbd;
851         int flags = gh ? gh->gh_flags : 0;
852         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
853         const struct gfs2_glock_operations *glops = gl->gl_ops;
854         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
855                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
856                                  LM_FLAG_PRIORITY);
857         unsigned int lck_ret;
858
859         if (glops->go_xmote_th)
860                 glops->go_xmote_th(gl);
861
862         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
863         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
864         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
865         gfs2_assert_warn(sdp, state != gl->gl_state);
866
867         gfs2_glock_hold(gl);
868         gl->gl_req_bh = xmote_bh;
869
870         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
871
872         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
873                 return;
874
875         if (lck_ret & LM_OUT_ASYNC)
876                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
877         else
878                 xmote_bh(gl, lck_ret);
879 }
880
881 /**
882  * drop_bh - Called after a lock module unlock completes
883  * @gl: the glock
884  * @ret: the return status
885  *
886  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
887  * Doesn't drop the reference on the glock the top half took out
888  *
889  */
890
891 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
892 {
893         struct gfs2_sbd *sdp = gl->gl_sbd;
894         const struct gfs2_glock_operations *glops = gl->gl_ops;
895         struct gfs2_holder *gh = gl->gl_req_gh;
896
897         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
898         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
899         gfs2_assert_warn(sdp, !ret);
900
901         state_change(gl, LM_ST_UNLOCKED);
902
903         if (glops->go_inval)
904                 glops->go_inval(gl, DIO_METADATA);
905
906         if (gh) {
907                 spin_lock(&gl->gl_spin);
908                 list_del_init(&gh->gh_list);
909                 gh->gh_error = 0;
910                 spin_unlock(&gl->gl_spin);
911         }
912
913         spin_lock(&gl->gl_spin);
914         gfs2_demote_wake(gl);
915         gl->gl_req_gh = NULL;
916         gl->gl_req_bh = NULL;
917         clear_bit(GLF_LOCK, &gl->gl_flags);
918         spin_unlock(&gl->gl_spin);
919
920         gfs2_glock_put(gl);
921
922         if (gh)
923                 gfs2_holder_wake(gh);
924 }
925
926 /**
927  * gfs2_glock_drop_th - call into the lock module to unlock a lock
928  * @gl: the glock
929  *
930  */
931
932 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
933 {
934         struct gfs2_sbd *sdp = gl->gl_sbd;
935         const struct gfs2_glock_operations *glops = gl->gl_ops;
936         unsigned int ret;
937
938         if (glops->go_xmote_th)
939                 glops->go_xmote_th(gl);
940
941         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
942         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
943         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
944
945         gfs2_glock_hold(gl);
946         gl->gl_req_bh = drop_bh;
947
948         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
949
950         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
951                 return;
952
953         if (!ret)
954                 drop_bh(gl, ret);
955         else
956                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
957 }
958
959 /**
960  * do_cancels - cancel requests for locks stuck waiting on an expire flag
961  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
962  *
963  * Don't cancel GL_NOCANCEL requests.
964  */
965
966 static void do_cancels(struct gfs2_holder *gh)
967 {
968         struct gfs2_glock *gl = gh->gh_gl;
969
970         spin_lock(&gl->gl_spin);
971
972         while (gl->gl_req_gh != gh &&
973                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
974                !list_empty(&gh->gh_list)) {
975                 if (gl->gl_req_bh && !(gl->gl_req_gh &&
976                                      (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
977                         spin_unlock(&gl->gl_spin);
978                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
979                         msleep(100);
980                         spin_lock(&gl->gl_spin);
981                 } else {
982                         spin_unlock(&gl->gl_spin);
983                         msleep(100);
984                         spin_lock(&gl->gl_spin);
985                 }
986         }
987
988         spin_unlock(&gl->gl_spin);
989 }
990
991 /**
992  * glock_wait_internal - wait on a glock acquisition
993  * @gh: the glock holder
994  *
995  * Returns: 0 on success
996  */
997
998 static int glock_wait_internal(struct gfs2_holder *gh)
999 {
1000         struct gfs2_glock *gl = gh->gh_gl;
1001         struct gfs2_sbd *sdp = gl->gl_sbd;
1002         const struct gfs2_glock_operations *glops = gl->gl_ops;
1003
1004         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1005                 return -EIO;
1006
1007         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1008                 spin_lock(&gl->gl_spin);
1009                 if (gl->gl_req_gh != gh &&
1010                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1011                     !list_empty(&gh->gh_list)) {
1012                         list_del_init(&gh->gh_list);
1013                         gh->gh_error = GLR_TRYFAILED;
1014                         run_queue(gl);
1015                         spin_unlock(&gl->gl_spin);
1016                         return gh->gh_error;
1017                 }
1018                 spin_unlock(&gl->gl_spin);
1019         }
1020
1021         if (gh->gh_flags & LM_FLAG_PRIORITY)
1022                 do_cancels(gh);
1023
1024         wait_on_holder(gh);
1025         if (gh->gh_error)
1026                 return gh->gh_error;
1027
1028         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1029         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1030                                                    gh->gh_flags));
1031
1032         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1033                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1034
1035                 if (glops->go_lock) {
1036                         gh->gh_error = glops->go_lock(gh);
1037                         if (gh->gh_error) {
1038                                 spin_lock(&gl->gl_spin);
1039                                 list_del_init(&gh->gh_list);
1040                                 spin_unlock(&gl->gl_spin);
1041                         }
1042                 }
1043
1044                 spin_lock(&gl->gl_spin);
1045                 gl->gl_req_gh = NULL;
1046                 gl->gl_req_bh = NULL;
1047                 clear_bit(GLF_LOCK, &gl->gl_flags);
1048                 run_queue(gl);
1049                 spin_unlock(&gl->gl_spin);
1050         }
1051
1052         return gh->gh_error;
1053 }
1054
1055 static inline struct gfs2_holder *
1056 find_holder_by_owner(struct list_head *head, pid_t pid)
1057 {
1058         struct gfs2_holder *gh;
1059
1060         list_for_each_entry(gh, head, gh_list) {
1061                 if (gh->gh_owner_pid == pid)
1062                         return gh;
1063         }
1064
1065         return NULL;
1066 }
1067
1068 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1069 {
1070         va_list args;
1071
1072         va_start(args, fmt);
1073         if (gi) {
1074                 vsprintf(gi->string, fmt, args);
1075                 seq_printf(gi->seq, gi->string);
1076         }
1077         else
1078                 vprintk(fmt, args);
1079         va_end(args);
1080 }
1081
1082 /**
1083  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1084  * @gh: the holder structure to add
1085  *
1086  */
1087
1088 static void add_to_queue(struct gfs2_holder *gh)
1089 {
1090         struct gfs2_glock *gl = gh->gh_gl;
1091         struct gfs2_holder *existing;
1092
1093         BUG_ON(!gh->gh_owner_pid);
1094         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1095                 BUG();
1096
1097         if (!(gh->gh_flags & GL_FLOCK)) {
1098                 existing = find_holder_by_owner(&gl->gl_holders, 
1099                                                 gh->gh_owner_pid);
1100                 if (existing) {
1101                         print_symbol(KERN_WARNING "original: %s\n", 
1102                                      existing->gh_ip);
1103                         printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1104                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1105                                existing->gh_gl->gl_name.ln_type, 
1106                                existing->gh_gl->gl_state);
1107                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1108                         printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1109                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1110                                gl->gl_name.ln_type, gl->gl_state);
1111                         BUG();
1112                 }
1113                 
1114                 existing = find_holder_by_owner(&gl->gl_waiters3, 
1115                                                 gh->gh_owner_pid);
1116                 if (existing) {
1117                         print_symbol(KERN_WARNING "original: %s\n", 
1118                                      existing->gh_ip);
1119                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1120                         BUG();
1121                 }
1122         }
1123
1124         if (gh->gh_flags & LM_FLAG_PRIORITY)
1125                 list_add(&gh->gh_list, &gl->gl_waiters3);
1126         else
1127                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1128 }
1129
1130 /**
1131  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1132  * @gh: the holder structure
1133  *
1134  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1135  *
1136  * Returns: 0, GLR_TRYFAILED, or errno on failure
1137  */
1138
1139 int gfs2_glock_nq(struct gfs2_holder *gh)
1140 {
1141         struct gfs2_glock *gl = gh->gh_gl;
1142         struct gfs2_sbd *sdp = gl->gl_sbd;
1143         int error = 0;
1144
1145 restart:
1146         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1147                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1148                 return -EIO;
1149         }
1150
1151         spin_lock(&gl->gl_spin);
1152         add_to_queue(gh);
1153         run_queue(gl);
1154         spin_unlock(&gl->gl_spin);
1155
1156         if (!(gh->gh_flags & GL_ASYNC)) {
1157                 error = glock_wait_internal(gh);
1158                 if (error == GLR_CANCELED) {
1159                         msleep(100);
1160                         goto restart;
1161                 }
1162         }
1163
1164         return error;
1165 }
1166
1167 /**
1168  * gfs2_glock_poll - poll to see if an async request has been completed
1169  * @gh: the holder
1170  *
1171  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1172  */
1173
1174 int gfs2_glock_poll(struct gfs2_holder *gh)
1175 {
1176         struct gfs2_glock *gl = gh->gh_gl;
1177         int ready = 0;
1178
1179         spin_lock(&gl->gl_spin);
1180
1181         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1182                 ready = 1;
1183         else if (list_empty(&gh->gh_list)) {
1184                 if (gh->gh_error == GLR_CANCELED) {
1185                         spin_unlock(&gl->gl_spin);
1186                         msleep(100);
1187                         if (gfs2_glock_nq(gh))
1188                                 return 1;
1189                         return 0;
1190                 } else
1191                         ready = 1;
1192         }
1193
1194         spin_unlock(&gl->gl_spin);
1195
1196         return ready;
1197 }
1198
1199 /**
1200  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1201  * @gh: the holder structure
1202  *
1203  * Returns: 0, GLR_TRYFAILED, or errno on failure
1204  */
1205
1206 int gfs2_glock_wait(struct gfs2_holder *gh)
1207 {
1208         int error;
1209
1210         error = glock_wait_internal(gh);
1211         if (error == GLR_CANCELED) {
1212                 msleep(100);
1213                 gh->gh_flags &= ~GL_ASYNC;
1214                 error = gfs2_glock_nq(gh);
1215         }
1216
1217         return error;
1218 }
1219
1220 /**
1221  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1222  * @gh: the glock holder
1223  *
1224  */
1225
1226 void gfs2_glock_dq(struct gfs2_holder *gh)
1227 {
1228         struct gfs2_glock *gl = gh->gh_gl;
1229         const struct gfs2_glock_operations *glops = gl->gl_ops;
1230         unsigned delay = 0;
1231
1232         if (gh->gh_flags & GL_NOCACHE)
1233                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1234
1235         gfs2_glmutex_lock(gl);
1236
1237         spin_lock(&gl->gl_spin);
1238         list_del_init(&gh->gh_list);
1239
1240         if (list_empty(&gl->gl_holders)) {
1241                 if (glops->go_unlock) {
1242                         spin_unlock(&gl->gl_spin);
1243                         glops->go_unlock(gh);
1244                         spin_lock(&gl->gl_spin);
1245                 }
1246                 gl->gl_stamp = jiffies;
1247         }
1248
1249         clear_bit(GLF_LOCK, &gl->gl_flags);
1250         spin_unlock(&gl->gl_spin);
1251
1252         gfs2_glock_hold(gl);
1253         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1254             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1255                 delay = gl->gl_ops->go_min_hold_time;
1256         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1257                 gfs2_glock_put(gl);
1258 }
1259
1260 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1261 {
1262         struct gfs2_glock *gl = gh->gh_gl;
1263         gfs2_glock_dq(gh);
1264         wait_on_demote(gl);
1265 }
1266
1267 /**
1268  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1269  * @gh: the holder structure
1270  *
1271  */
1272
1273 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1274 {
1275         gfs2_glock_dq(gh);
1276         gfs2_holder_uninit(gh);
1277 }
1278
1279 /**
1280  * gfs2_glock_nq_num - acquire a glock based on lock number
1281  * @sdp: the filesystem
1282  * @number: the lock number
1283  * @glops: the glock operations for the type of glock
1284  * @state: the state to acquire the glock in
1285  * @flags: modifier flags for the aquisition
1286  * @gh: the struct gfs2_holder
1287  *
1288  * Returns: errno
1289  */
1290
1291 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1292                       const struct gfs2_glock_operations *glops,
1293                       unsigned int state, int flags, struct gfs2_holder *gh)
1294 {
1295         struct gfs2_glock *gl;
1296         int error;
1297
1298         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1299         if (!error) {
1300                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1301                 gfs2_glock_put(gl);
1302         }
1303
1304         return error;
1305 }
1306
1307 /**
1308  * glock_compare - Compare two struct gfs2_glock structures for sorting
1309  * @arg_a: the first structure
1310  * @arg_b: the second structure
1311  *
1312  */
1313
1314 static int glock_compare(const void *arg_a, const void *arg_b)
1315 {
1316         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1317         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1318         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1319         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1320
1321         if (a->ln_number > b->ln_number)
1322                 return 1;
1323         if (a->ln_number < b->ln_number)
1324                 return -1;
1325         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1326         return 0;
1327 }
1328
1329 /**
1330  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1331  * @num_gh: the number of structures
1332  * @ghs: an array of struct gfs2_holder structures
1333  *
1334  * Returns: 0 on success (all glocks acquired),
1335  *          errno on failure (no glocks acquired)
1336  */
1337
1338 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1339                      struct gfs2_holder **p)
1340 {
1341         unsigned int x;
1342         int error = 0;
1343
1344         for (x = 0; x < num_gh; x++)
1345                 p[x] = &ghs[x];
1346
1347         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1348
1349         for (x = 0; x < num_gh; x++) {
1350                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1351
1352                 error = gfs2_glock_nq(p[x]);
1353                 if (error) {
1354                         while (x--)
1355                                 gfs2_glock_dq(p[x]);
1356                         break;
1357                 }
1358         }
1359
1360         return error;
1361 }
1362
1363 /**
1364  * gfs2_glock_nq_m - acquire multiple glocks
1365  * @num_gh: the number of structures
1366  * @ghs: an array of struct gfs2_holder structures
1367  *
1368  *
1369  * Returns: 0 on success (all glocks acquired),
1370  *          errno on failure (no glocks acquired)
1371  */
1372
1373 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1374 {
1375         struct gfs2_holder *tmp[4];
1376         struct gfs2_holder **pph = tmp;
1377         int error = 0;
1378
1379         switch(num_gh) {
1380         case 0:
1381                 return 0;
1382         case 1:
1383                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1384                 return gfs2_glock_nq(ghs);
1385         default:
1386                 if (num_gh <= 4)
1387                         break;
1388                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1389                 if (!pph)
1390                         return -ENOMEM;
1391         }
1392
1393         error = nq_m_sync(num_gh, ghs, pph);
1394
1395         if (pph != tmp)
1396                 kfree(pph);
1397
1398         return error;
1399 }
1400
1401 /**
1402  * gfs2_glock_dq_m - release multiple glocks
1403  * @num_gh: the number of structures
1404  * @ghs: an array of struct gfs2_holder structures
1405  *
1406  */
1407
1408 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1409 {
1410         unsigned int x;
1411
1412         for (x = 0; x < num_gh; x++)
1413                 gfs2_glock_dq(&ghs[x]);
1414 }
1415
1416 /**
1417  * gfs2_glock_dq_uninit_m - release multiple glocks
1418  * @num_gh: the number of structures
1419  * @ghs: an array of struct gfs2_holder structures
1420  *
1421  */
1422
1423 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1424 {
1425         unsigned int x;
1426
1427         for (x = 0; x < num_gh; x++)
1428                 gfs2_glock_dq_uninit(&ghs[x]);
1429 }
1430
1431 /**
1432  * gfs2_lvb_hold - attach a LVB from a glock
1433  * @gl: The glock in question
1434  *
1435  */
1436
1437 int gfs2_lvb_hold(struct gfs2_glock *gl)
1438 {
1439         int error;
1440
1441         gfs2_glmutex_lock(gl);
1442
1443         if (!atomic_read(&gl->gl_lvb_count)) {
1444                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1445                 if (error) {
1446                         gfs2_glmutex_unlock(gl);
1447                         return error;
1448                 }
1449                 gfs2_glock_hold(gl);
1450         }
1451         atomic_inc(&gl->gl_lvb_count);
1452
1453         gfs2_glmutex_unlock(gl);
1454
1455         return 0;
1456 }
1457
1458 /**
1459  * gfs2_lvb_unhold - detach a LVB from a glock
1460  * @gl: The glock in question
1461  *
1462  */
1463
1464 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1465 {
1466         gfs2_glock_hold(gl);
1467         gfs2_glmutex_lock(gl);
1468
1469         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1470         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1471                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1472                 gl->gl_lvb = NULL;
1473                 gfs2_glock_put(gl);
1474         }
1475
1476         gfs2_glmutex_unlock(gl);
1477         gfs2_glock_put(gl);
1478 }
1479
1480 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1481                         unsigned int state)
1482 {
1483         struct gfs2_glock *gl;
1484         unsigned long delay = 0;
1485         unsigned long holdtime;
1486         unsigned long now = jiffies;
1487
1488         gl = gfs2_glock_find(sdp, name);
1489         if (!gl)
1490                 return;
1491
1492         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1493         if (time_before(now, holdtime))
1494                 delay = holdtime - now;
1495
1496         handle_callback(gl, state, 1, delay);
1497         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1498                 gfs2_glock_put(gl);
1499 }
1500
1501 /**
1502  * gfs2_glock_cb - Callback used by locking module
1503  * @sdp: Pointer to the superblock
1504  * @type: Type of callback
1505  * @data: Type dependent data pointer
1506  *
1507  * Called by the locking module when it wants to tell us something.
1508  * Either we need to drop a lock, one of our ASYNC requests completed, or
1509  * a journal from another client needs to be recovered.
1510  */
1511
1512 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1513 {
1514         struct gfs2_sbd *sdp = cb_data;
1515
1516         switch (type) {
1517         case LM_CB_NEED_E:
1518                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1519                 return;
1520
1521         case LM_CB_NEED_D:
1522                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1523                 return;
1524
1525         case LM_CB_NEED_S:
1526                 blocking_cb(sdp, data, LM_ST_SHARED);
1527                 return;
1528
1529         case LM_CB_ASYNC: {
1530                 struct lm_async_cb *async = data;
1531                 struct gfs2_glock *gl;
1532
1533                 down_read(&gfs2_umount_flush_sem);
1534                 gl = gfs2_glock_find(sdp, &async->lc_name);
1535                 if (gfs2_assert_warn(sdp, gl))
1536                         return;
1537                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1538                         gl->gl_req_bh(gl, async->lc_ret);
1539                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1540                         gfs2_glock_put(gl);
1541                 up_read(&gfs2_umount_flush_sem);
1542                 return;
1543         }
1544
1545         case LM_CB_NEED_RECOVERY:
1546                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1547                 if (sdp->sd_recoverd_process)
1548                         wake_up_process(sdp->sd_recoverd_process);
1549                 return;
1550
1551         case LM_CB_DROPLOCKS:
1552                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1553                 gfs2_quota_scan(sdp);
1554                 return;
1555
1556         default:
1557                 gfs2_assert_warn(sdp, 0);
1558                 return;
1559         }
1560 }
1561
1562 /**
1563  * demote_ok - Check to see if it's ok to unlock a glock
1564  * @gl: the glock
1565  *
1566  * Returns: 1 if it's ok
1567  */
1568
1569 static int demote_ok(struct gfs2_glock *gl)
1570 {
1571         const struct gfs2_glock_operations *glops = gl->gl_ops;
1572         int demote = 1;
1573
1574         if (test_bit(GLF_STICKY, &gl->gl_flags))
1575                 demote = 0;
1576         else if (glops->go_demote_ok)
1577                 demote = glops->go_demote_ok(gl);
1578
1579         return demote;
1580 }
1581
1582 /**
1583  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1584  * @gl: the glock
1585  *
1586  */
1587
1588 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1589 {
1590         struct gfs2_sbd *sdp = gl->gl_sbd;
1591
1592         spin_lock(&sdp->sd_reclaim_lock);
1593         if (list_empty(&gl->gl_reclaim)) {
1594                 gfs2_glock_hold(gl);
1595                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1596                 atomic_inc(&sdp->sd_reclaim_count);
1597         }
1598         spin_unlock(&sdp->sd_reclaim_lock);
1599
1600         wake_up(&sdp->sd_reclaim_wq);
1601 }
1602
1603 /**
1604  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1605  * @sdp: the filesystem
1606  *
1607  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1608  * different glock and we notice that there are a lot of glocks in the
1609  * reclaim list.
1610  *
1611  */
1612
1613 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1614 {
1615         struct gfs2_glock *gl;
1616
1617         spin_lock(&sdp->sd_reclaim_lock);
1618         if (list_empty(&sdp->sd_reclaim_list)) {
1619                 spin_unlock(&sdp->sd_reclaim_lock);
1620                 return;
1621         }
1622         gl = list_entry(sdp->sd_reclaim_list.next,
1623                         struct gfs2_glock, gl_reclaim);
1624         list_del_init(&gl->gl_reclaim);
1625         spin_unlock(&sdp->sd_reclaim_lock);
1626
1627         atomic_dec(&sdp->sd_reclaim_count);
1628         atomic_inc(&sdp->sd_reclaimed);
1629
1630         if (gfs2_glmutex_trylock(gl)) {
1631                 if (list_empty(&gl->gl_holders) &&
1632                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1633                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1634                 gfs2_glmutex_unlock(gl);
1635         }
1636
1637         gfs2_glock_put(gl);
1638 }
1639
1640 /**
1641  * examine_bucket - Call a function for glock in a hash bucket
1642  * @examiner: the function
1643  * @sdp: the filesystem
1644  * @bucket: the bucket
1645  *
1646  * Returns: 1 if the bucket has entries
1647  */
1648
1649 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1650                           unsigned int hash)
1651 {
1652         struct gfs2_glock *gl, *prev = NULL;
1653         int has_entries = 0;
1654         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1655
1656         read_lock(gl_lock_addr(hash));
1657         /* Can't use hlist_for_each_entry - don't want prefetch here */
1658         if (hlist_empty(head))
1659                 goto out;
1660         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1661         while(1) {
1662                 if (!sdp || gl->gl_sbd == sdp) {
1663                         gfs2_glock_hold(gl);
1664                         read_unlock(gl_lock_addr(hash));
1665                         if (prev)
1666                                 gfs2_glock_put(prev);
1667                         prev = gl;
1668                         examiner(gl);
1669                         has_entries = 1;
1670                         read_lock(gl_lock_addr(hash));
1671                 }
1672                 if (gl->gl_list.next == NULL)
1673                         break;
1674                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1675         }
1676 out:
1677         read_unlock(gl_lock_addr(hash));
1678         if (prev)
1679                 gfs2_glock_put(prev);
1680         cond_resched();
1681         return has_entries;
1682 }
1683
1684 /**
1685  * scan_glock - look at a glock and see if we can reclaim it
1686  * @gl: the glock to look at
1687  *
1688  */
1689
1690 static void scan_glock(struct gfs2_glock *gl)
1691 {
1692         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1693                 return;
1694
1695         if (gfs2_glmutex_trylock(gl)) {
1696                 if (list_empty(&gl->gl_holders) &&
1697                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1698                         goto out_schedule;
1699                 gfs2_glmutex_unlock(gl);
1700         }
1701         return;
1702
1703 out_schedule:
1704         gfs2_glmutex_unlock(gl);
1705         gfs2_glock_schedule_for_reclaim(gl);
1706 }
1707
1708 /**
1709  * clear_glock - look at a glock and see if we can free it from glock cache
1710  * @gl: the glock to look at
1711  *
1712  */
1713
1714 static void clear_glock(struct gfs2_glock *gl)
1715 {
1716         struct gfs2_sbd *sdp = gl->gl_sbd;
1717         int released;
1718
1719         spin_lock(&sdp->sd_reclaim_lock);
1720         if (!list_empty(&gl->gl_reclaim)) {
1721                 list_del_init(&gl->gl_reclaim);
1722                 atomic_dec(&sdp->sd_reclaim_count);
1723                 spin_unlock(&sdp->sd_reclaim_lock);
1724                 released = gfs2_glock_put(gl);
1725                 gfs2_assert(sdp, !released);
1726         } else {
1727                 spin_unlock(&sdp->sd_reclaim_lock);
1728         }
1729
1730         if (gfs2_glmutex_trylock(gl)) {
1731                 if (list_empty(&gl->gl_holders) &&
1732                     gl->gl_state != LM_ST_UNLOCKED)
1733                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1734                 gfs2_glmutex_unlock(gl);
1735         }
1736 }
1737
1738 /**
1739  * gfs2_gl_hash_clear - Empty out the glock hash table
1740  * @sdp: the filesystem
1741  * @wait: wait until it's all gone
1742  *
1743  * Called when unmounting the filesystem, or when inter-node lock manager
1744  * requests DROPLOCKS because it is running out of capacity.
1745  */
1746
1747 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1748 {
1749         unsigned long t;
1750         unsigned int x;
1751         int cont;
1752
1753         t = jiffies;
1754
1755         for (;;) {
1756                 cont = 0;
1757                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1758                         if (examine_bucket(clear_glock, sdp, x))
1759                                 cont = 1;
1760                 }
1761
1762                 if (!wait || !cont)
1763                         break;
1764
1765                 if (time_after_eq(jiffies,
1766                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1767                         fs_warn(sdp, "Unmount seems to be stalled. "
1768                                      "Dumping lock state...\n");
1769                         gfs2_dump_lockstate(sdp);
1770                         t = jiffies;
1771                 }
1772
1773                 down_write(&gfs2_umount_flush_sem);
1774                 invalidate_inodes(sdp->sd_vfs);
1775                 up_write(&gfs2_umount_flush_sem);
1776                 msleep(10);
1777         }
1778 }
1779
1780 /*
1781  *  Diagnostic routines to help debug distributed deadlock
1782  */
1783
1784 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1785                               unsigned long address)
1786 {
1787         char buffer[KSYM_SYMBOL_LEN];
1788
1789         sprint_symbol(buffer, address);
1790         print_dbg(gi, fmt, buffer);
1791 }
1792
1793 /**
1794  * dump_holder - print information about a glock holder
1795  * @str: a string naming the type of holder
1796  * @gh: the glock holder
1797  *
1798  * Returns: 0 on success, -ENOBUFS when we run out of space
1799  */
1800
1801 static int dump_holder(struct glock_iter *gi, char *str,
1802                        struct gfs2_holder *gh)
1803 {
1804         unsigned int x;
1805         struct task_struct *gh_owner;
1806
1807         print_dbg(gi, "  %s\n", str);
1808         if (gh->gh_owner_pid) {
1809                 print_dbg(gi, "    owner = %ld ", (long)gh->gh_owner_pid);
1810                 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1811                 if (gh_owner)
1812                         print_dbg(gi, "(%s)\n", gh_owner->comm);
1813                 else
1814                         print_dbg(gi, "(ended)\n");
1815         } else
1816                 print_dbg(gi, "    owner = -1\n");
1817         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1818         print_dbg(gi, "    gh_flags =");
1819         for (x = 0; x < 32; x++)
1820                 if (gh->gh_flags & (1 << x))
1821                         print_dbg(gi, " %u", x);
1822         print_dbg(gi, " \n");
1823         print_dbg(gi, "    error = %d\n", gh->gh_error);
1824         print_dbg(gi, "    gh_iflags =");
1825         for (x = 0; x < 32; x++)
1826                 if (test_bit(x, &gh->gh_iflags))
1827                         print_dbg(gi, " %u", x);
1828         print_dbg(gi, " \n");
1829         gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1830
1831         return 0;
1832 }
1833
1834 /**
1835  * dump_inode - print information about an inode
1836  * @ip: the inode
1837  *
1838  * Returns: 0 on success, -ENOBUFS when we run out of space
1839  */
1840
1841 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1842 {
1843         unsigned int x;
1844
1845         print_dbg(gi, "  Inode:\n");
1846         print_dbg(gi, "    num = %llu/%llu\n",
1847                   (unsigned long long)ip->i_no_formal_ino,
1848                   (unsigned long long)ip->i_no_addr);
1849         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1850         print_dbg(gi, "    i_flags =");
1851         for (x = 0; x < 32; x++)
1852                 if (test_bit(x, &ip->i_flags))
1853                         print_dbg(gi, " %u", x);
1854         print_dbg(gi, " \n");
1855         return 0;
1856 }
1857
1858 /**
1859  * dump_glock - print information about a glock
1860  * @gl: the glock
1861  * @count: where we are in the buffer
1862  *
1863  * Returns: 0 on success, -ENOBUFS when we run out of space
1864  */
1865
1866 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1867 {
1868         struct gfs2_holder *gh;
1869         unsigned int x;
1870         int error = -ENOBUFS;
1871         struct task_struct *gl_owner;
1872
1873         spin_lock(&gl->gl_spin);
1874
1875         print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1876                    (unsigned long long)gl->gl_name.ln_number);
1877         print_dbg(gi, "  gl_flags =");
1878         for (x = 0; x < 32; x++) {
1879                 if (test_bit(x, &gl->gl_flags))
1880                         print_dbg(gi, " %u", x);
1881         }
1882         if (!test_bit(GLF_LOCK, &gl->gl_flags))
1883                 print_dbg(gi, " (unlocked)");
1884         print_dbg(gi, " \n");
1885         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1886         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1887         if (gl->gl_owner_pid) {
1888                 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1889                 if (gl_owner)
1890                         print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1891                                   gl->gl_owner_pid, gl_owner->comm);
1892                 else
1893                         print_dbg(gi, "  gl_owner = %d (ended)\n",
1894                                   gl->gl_owner_pid);
1895         } else
1896                 print_dbg(gi, "  gl_owner = -1\n");
1897         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1898         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1899         print_dbg(gi, "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1900         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1901         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1902         print_dbg(gi, "  reclaim = %s\n",
1903                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1904         if (gl->gl_aspace)
1905                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1906                            gl->gl_aspace->i_mapping->nrpages);
1907         else
1908                 print_dbg(gi, "  aspace = no\n");
1909         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1910         if (gl->gl_req_gh) {
1911                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1912                 if (error)
1913                         goto out;
1914         }
1915         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1916                 error = dump_holder(gi, "Holder", gh);
1917                 if (error)
1918                         goto out;
1919         }
1920         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1921                 error = dump_holder(gi, "Waiter1", gh);
1922                 if (error)
1923                         goto out;
1924         }
1925         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1926                 error = dump_holder(gi, "Waiter3", gh);
1927                 if (error)
1928                         goto out;
1929         }
1930         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1931                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1932                           gl->gl_demote_state, (unsigned long long)
1933                           (jiffies - gl->gl_demote_time)*(1000000/HZ));
1934         }
1935         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1936                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1937                         list_empty(&gl->gl_holders)) {
1938                         error = dump_inode(gi, gl->gl_object);
1939                         if (error)
1940                                 goto out;
1941                 } else {
1942                         error = -ENOBUFS;
1943                         print_dbg(gi, "  Inode: busy\n");
1944                 }
1945         }
1946
1947         error = 0;
1948
1949 out:
1950         spin_unlock(&gl->gl_spin);
1951         return error;
1952 }
1953
1954 /**
1955  * gfs2_dump_lockstate - print out the current lockstate
1956  * @sdp: the filesystem
1957  * @ub: the buffer to copy the information into
1958  *
1959  * If @ub is NULL, dump the lockstate to the console.
1960  *
1961  */
1962
1963 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1964 {
1965         struct gfs2_glock *gl;
1966         struct hlist_node *h;
1967         unsigned int x;
1968         int error = 0;
1969
1970         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1971
1972                 read_lock(gl_lock_addr(x));
1973
1974                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1975                         if (gl->gl_sbd != sdp)
1976                                 continue;
1977
1978                         error = dump_glock(NULL, gl);
1979                         if (error)
1980                                 break;
1981                 }
1982
1983                 read_unlock(gl_lock_addr(x));
1984
1985                 if (error)
1986                         break;
1987         }
1988
1989
1990         return error;
1991 }
1992
1993 /**
1994  * gfs2_scand - Look for cached glocks and inodes to toss from memory
1995  * @sdp: Pointer to GFS2 superblock
1996  *
1997  * One of these daemons runs, finding candidates to add to sd_reclaim_list.
1998  * See gfs2_glockd()
1999  */
2000
2001 static int gfs2_scand(void *data)
2002 {
2003         unsigned x;
2004         unsigned delay;
2005
2006         while (!kthread_should_stop()) {
2007                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2008                         examine_bucket(scan_glock, NULL, x);
2009                 if (freezing(current))
2010                         refrigerator();
2011                 delay = scand_secs;
2012                 if (delay < 1)
2013                         delay = 1;
2014                 schedule_timeout_interruptible(delay * HZ);
2015         }
2016
2017         return 0;
2018 }
2019
2020
2021
2022 int __init gfs2_glock_init(void)
2023 {
2024         unsigned i;
2025         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2026                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2027         }
2028 #ifdef GL_HASH_LOCK_SZ
2029         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2030                 rwlock_init(&gl_hash_locks[i]);
2031         }
2032 #endif
2033
2034         scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2035         if (IS_ERR(scand_process))
2036                 return PTR_ERR(scand_process);
2037
2038         glock_workqueue = create_workqueue("glock_workqueue");
2039         if (IS_ERR(glock_workqueue)) {
2040                 kthread_stop(scand_process);
2041                 return PTR_ERR(glock_workqueue);
2042         }
2043
2044         return 0;
2045 }
2046
2047 void gfs2_glock_exit(void)
2048 {
2049         destroy_workqueue(glock_workqueue);
2050         kthread_stop(scand_process);
2051 }
2052
2053 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2054 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2055
2056 static int gfs2_glock_iter_next(struct glock_iter *gi)
2057 {
2058         struct gfs2_glock *gl;
2059
2060 restart:
2061         read_lock(gl_lock_addr(gi->hash));
2062         gl = gi->gl;
2063         if (gl) {
2064                 gi->gl = hlist_entry(gl->gl_list.next,
2065                                      struct gfs2_glock, gl_list);
2066                 if (gi->gl)
2067                         gfs2_glock_hold(gi->gl);
2068         }
2069         read_unlock(gl_lock_addr(gi->hash));
2070         if (gl)
2071                 gfs2_glock_put(gl);
2072         if (gl && gi->gl == NULL)
2073                 gi->hash++;
2074         while(gi->gl == NULL) {
2075                 if (gi->hash >= GFS2_GL_HASH_SIZE)
2076                         return 1;
2077                 read_lock(gl_lock_addr(gi->hash));
2078                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2079                                      struct gfs2_glock, gl_list);
2080                 if (gi->gl)
2081                         gfs2_glock_hold(gi->gl);
2082                 read_unlock(gl_lock_addr(gi->hash));
2083                 gi->hash++;
2084         }
2085
2086         if (gi->sdp != gi->gl->gl_sbd)
2087                 goto restart;
2088
2089         return 0;
2090 }
2091
2092 static void gfs2_glock_iter_free(struct glock_iter *gi)
2093 {
2094         if (gi->gl)
2095                 gfs2_glock_put(gi->gl);
2096         kfree(gi);
2097 }
2098
2099 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2100 {
2101         struct glock_iter *gi;
2102
2103         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2104         if (!gi)
2105                 return NULL;
2106
2107         gi->sdp = sdp;
2108         gi->hash = 0;
2109         gi->seq = NULL;
2110         gi->gl = NULL;
2111         memset(gi->string, 0, sizeof(gi->string));
2112
2113         if (gfs2_glock_iter_next(gi)) {
2114                 gfs2_glock_iter_free(gi);
2115                 return NULL;
2116         }
2117
2118         return gi;
2119 }
2120
2121 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2122 {
2123         struct glock_iter *gi;
2124         loff_t n = *pos;
2125
2126         gi = gfs2_glock_iter_init(file->private);
2127         if (!gi)
2128                 return NULL;
2129
2130         while(n--) {
2131                 if (gfs2_glock_iter_next(gi)) {
2132                         gfs2_glock_iter_free(gi);
2133                         return NULL;
2134                 }
2135         }
2136
2137         return gi;
2138 }
2139
2140 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2141                                  loff_t *pos)
2142 {
2143         struct glock_iter *gi = iter_ptr;
2144
2145         (*pos)++;
2146
2147         if (gfs2_glock_iter_next(gi)) {
2148                 gfs2_glock_iter_free(gi);
2149                 return NULL;
2150         }
2151
2152         return gi;
2153 }
2154
2155 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2156 {
2157         struct glock_iter *gi = iter_ptr;
2158         if (gi)
2159                 gfs2_glock_iter_free(gi);
2160 }
2161
2162 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2163 {
2164         struct glock_iter *gi = iter_ptr;
2165
2166         gi->seq = file;
2167         dump_glock(gi, gi->gl);
2168
2169         return 0;
2170 }
2171
2172 static const struct seq_operations gfs2_glock_seq_ops = {
2173         .start = gfs2_glock_seq_start,
2174         .next  = gfs2_glock_seq_next,
2175         .stop  = gfs2_glock_seq_stop,
2176         .show  = gfs2_glock_seq_show,
2177 };
2178
2179 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2180 {
2181         struct seq_file *seq;
2182         int ret;
2183
2184         ret = seq_open(file, &gfs2_glock_seq_ops);
2185         if (ret)
2186                 return ret;
2187
2188         seq = file->private_data;
2189         seq->private = inode->i_private;
2190
2191         return 0;
2192 }
2193
2194 static const struct file_operations gfs2_debug_fops = {
2195         .owner   = THIS_MODULE,
2196         .open    = gfs2_debugfs_open,
2197         .read    = seq_read,
2198         .llseek  = seq_lseek,
2199         .release = seq_release
2200 };
2201
2202 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2203 {
2204         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2205         if (!sdp->debugfs_dir)
2206                 return -ENOMEM;
2207         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2208                                                          S_IFREG | S_IRUGO,
2209                                                          sdp->debugfs_dir, sdp,
2210                                                          &gfs2_debug_fops);
2211         if (!sdp->debugfs_dentry_glocks)
2212                 return -ENOMEM;
2213
2214         return 0;
2215 }
2216
2217 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2218 {
2219         if (sdp && sdp->debugfs_dir) {
2220                 if (sdp->debugfs_dentry_glocks) {
2221                         debugfs_remove(sdp->debugfs_dentry_glocks);
2222                         sdp->debugfs_dentry_glocks = NULL;
2223                 }
2224                 debugfs_remove(sdp->debugfs_dir);
2225                 sdp->debugfs_dir = NULL;
2226         }
2227 }
2228
2229 int gfs2_register_debugfs(void)
2230 {
2231         gfs2_root = debugfs_create_dir("gfs2", NULL);
2232         return gfs2_root ? 0 : -ENOMEM;
2233 }
2234
2235 void gfs2_unregister_debugfs(void)
2236 {
2237         debugfs_remove(gfs2_root);
2238         gfs2_root = NULL;
2239 }