[GFS2] Eliminate gl_req_bh
[safe/jmp/linux-2.6] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lm.h"
39 #include "lops.h"
40 #include "meta_io.h"
41 #include "quota.h"
42 #include "super.h"
43 #include "util.h"
44
45 struct gfs2_gl_hash_bucket {
46         struct hlist_head hb_list;
47 };
48
49 struct glock_iter {
50         int hash;                     /* hash bucket index         */
51         struct gfs2_sbd *sdp;         /* incore superblock         */
52         struct gfs2_glock *gl;        /* current glock struct      */
53         struct seq_file *seq;         /* sequence file for debugfs */
54         char string[512];             /* scratch space             */
55 };
56
57 typedef void (*glock_examiner) (struct gfs2_glock * gl);
58
59 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
60 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
61 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
62 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
63 static void run_queue(struct gfs2_glock *gl);
64
65 static DECLARE_RWSEM(gfs2_umount_flush_sem);
66 static struct dentry *gfs2_root;
67 static struct task_struct *scand_process;
68 static unsigned int scand_secs = 5;
69 static struct workqueue_struct *glock_workqueue;
70
71 #define GFS2_GL_HASH_SHIFT      15
72 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
73 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
74
75 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
76 static struct dentry *gfs2_root;
77
78 /*
79  * Despite what you might think, the numbers below are not arbitrary :-)
80  * They are taken from the ipv4 routing hash code, which is well tested
81  * and thus should be nearly optimal. Later on we might tweek the numbers
82  * but for now this should be fine.
83  *
84  * The reason for putting the locks in a separate array from the list heads
85  * is that we can have fewer locks than list heads and save memory. We use
86  * the same hash function for both, but with a different hash mask.
87  */
88 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
89         defined(CONFIG_PROVE_LOCKING)
90
91 #ifdef CONFIG_LOCKDEP
92 # define GL_HASH_LOCK_SZ        256
93 #else
94 # if NR_CPUS >= 32
95 #  define GL_HASH_LOCK_SZ       4096
96 # elif NR_CPUS >= 16
97 #  define GL_HASH_LOCK_SZ       2048
98 # elif NR_CPUS >= 8
99 #  define GL_HASH_LOCK_SZ       1024
100 # elif NR_CPUS >= 4
101 #  define GL_HASH_LOCK_SZ       512
102 # else
103 #  define GL_HASH_LOCK_SZ       256
104 # endif
105 #endif
106
107 /* We never want more locks than chains */
108 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
109 # undef GL_HASH_LOCK_SZ
110 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
111 #endif
112
113 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
114
115 static inline rwlock_t *gl_lock_addr(unsigned int x)
116 {
117         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
118 }
119 #else /* not SMP, so no spinlocks required */
120 static inline rwlock_t *gl_lock_addr(unsigned int x)
121 {
122         return NULL;
123 }
124 #endif
125
126 /**
127  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
128  * @actual: the current state of the lock
129  * @requested: the lock state that was requested by the caller
130  * @flags: the modifier flags passed in by the caller
131  *
132  * Returns: 1 if the locks are compatible, 0 otherwise
133  */
134
135 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
136                                    int flags)
137 {
138         if (actual == requested)
139                 return 1;
140
141         if (flags & GL_EXACT)
142                 return 0;
143
144         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
145                 return 1;
146
147         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
148                 return 1;
149
150         return 0;
151 }
152
153 /**
154  * gl_hash() - Turn glock number into hash bucket number
155  * @lock: The glock number
156  *
157  * Returns: The number of the corresponding hash bucket
158  */
159
160 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
161                             const struct lm_lockname *name)
162 {
163         unsigned int h;
164
165         h = jhash(&name->ln_number, sizeof(u64), 0);
166         h = jhash(&name->ln_type, sizeof(unsigned int), h);
167         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
168         h &= GFS2_GL_HASH_MASK;
169
170         return h;
171 }
172
173 /**
174  * glock_free() - Perform a few checks and then release struct gfs2_glock
175  * @gl: The glock to release
176  *
177  * Also calls lock module to release its internal structure for this glock.
178  *
179  */
180
181 static void glock_free(struct gfs2_glock *gl)
182 {
183         struct gfs2_sbd *sdp = gl->gl_sbd;
184         struct inode *aspace = gl->gl_aspace;
185
186         gfs2_lm_put_lock(sdp, gl->gl_lock);
187
188         if (aspace)
189                 gfs2_aspace_put(aspace);
190
191         kmem_cache_free(gfs2_glock_cachep, gl);
192 }
193
194 /**
195  * gfs2_glock_hold() - increment reference count on glock
196  * @gl: The glock to hold
197  *
198  */
199
200 static void gfs2_glock_hold(struct gfs2_glock *gl)
201 {
202         atomic_inc(&gl->gl_ref);
203 }
204
205 /**
206  * gfs2_glock_put() - Decrement reference count on glock
207  * @gl: The glock to put
208  *
209  */
210
211 int gfs2_glock_put(struct gfs2_glock *gl)
212 {
213         int rv = 0;
214         struct gfs2_sbd *sdp = gl->gl_sbd;
215
216         write_lock(gl_lock_addr(gl->gl_hash));
217         if (atomic_dec_and_test(&gl->gl_ref)) {
218                 hlist_del(&gl->gl_list);
219                 write_unlock(gl_lock_addr(gl->gl_hash));
220                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
221                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
222                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
223                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
224                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
225                 glock_free(gl);
226                 rv = 1;
227                 goto out;
228         }
229         write_unlock(gl_lock_addr(gl->gl_hash));
230 out:
231         return rv;
232 }
233
234 /**
235  * search_bucket() - Find struct gfs2_glock by lock number
236  * @bucket: the bucket to search
237  * @name: The lock name
238  *
239  * Returns: NULL, or the struct gfs2_glock with the requested number
240  */
241
242 static struct gfs2_glock *search_bucket(unsigned int hash,
243                                         const struct gfs2_sbd *sdp,
244                                         const struct lm_lockname *name)
245 {
246         struct gfs2_glock *gl;
247         struct hlist_node *h;
248
249         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
250                 if (!lm_name_equal(&gl->gl_name, name))
251                         continue;
252                 if (gl->gl_sbd != sdp)
253                         continue;
254
255                 atomic_inc(&gl->gl_ref);
256
257                 return gl;
258         }
259
260         return NULL;
261 }
262
263 /**
264  * gfs2_glock_find() - Find glock by lock number
265  * @sdp: The GFS2 superblock
266  * @name: The lock name
267  *
268  * Returns: NULL, or the struct gfs2_glock with the requested number
269  */
270
271 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
272                                           const struct lm_lockname *name)
273 {
274         unsigned int hash = gl_hash(sdp, name);
275         struct gfs2_glock *gl;
276
277         read_lock(gl_lock_addr(hash));
278         gl = search_bucket(hash, sdp, name);
279         read_unlock(gl_lock_addr(hash));
280
281         return gl;
282 }
283
284 static void glock_work_func(struct work_struct *work)
285 {
286         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
287
288         spin_lock(&gl->gl_spin);
289         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
290                 set_bit(GLF_DEMOTE, &gl->gl_flags);
291         run_queue(gl);
292         spin_unlock(&gl->gl_spin);
293         gfs2_glock_put(gl);
294 }
295
296 /**
297  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
298  * @sdp: The GFS2 superblock
299  * @number: the lock number
300  * @glops: The glock_operations to use
301  * @create: If 0, don't create the glock if it doesn't exist
302  * @glp: the glock is returned here
303  *
304  * This does not lock a glock, just finds/creates structures for one.
305  *
306  * Returns: errno
307  */
308
309 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
310                    const struct gfs2_glock_operations *glops, int create,
311                    struct gfs2_glock **glp)
312 {
313         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
314         struct gfs2_glock *gl, *tmp;
315         unsigned int hash = gl_hash(sdp, &name);
316         int error;
317
318         read_lock(gl_lock_addr(hash));
319         gl = search_bucket(hash, sdp, &name);
320         read_unlock(gl_lock_addr(hash));
321
322         if (gl || !create) {
323                 *glp = gl;
324                 return 0;
325         }
326
327         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
328         if (!gl)
329                 return -ENOMEM;
330
331         gl->gl_flags = 0;
332         gl->gl_name = name;
333         atomic_set(&gl->gl_ref, 1);
334         gl->gl_state = LM_ST_UNLOCKED;
335         gl->gl_demote_state = LM_ST_EXCLUSIVE;
336         gl->gl_hash = hash;
337         gl->gl_owner_pid = NULL;
338         gl->gl_ip = 0;
339         gl->gl_ops = glops;
340         gl->gl_req_gh = NULL;
341         gl->gl_vn = 0;
342         gl->gl_stamp = jiffies;
343         gl->gl_tchange = jiffies;
344         gl->gl_object = NULL;
345         gl->gl_sbd = sdp;
346         gl->gl_aspace = NULL;
347         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
348
349         /* If this glock protects actual on-disk data or metadata blocks,
350            create a VFS inode to manage the pages/buffers holding them. */
351         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
352                 gl->gl_aspace = gfs2_aspace_get(sdp);
353                 if (!gl->gl_aspace) {
354                         error = -ENOMEM;
355                         goto fail;
356                 }
357         }
358
359         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
360         if (error)
361                 goto fail_aspace;
362
363         write_lock(gl_lock_addr(hash));
364         tmp = search_bucket(hash, sdp, &name);
365         if (tmp) {
366                 write_unlock(gl_lock_addr(hash));
367                 glock_free(gl);
368                 gl = tmp;
369         } else {
370                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
371                 write_unlock(gl_lock_addr(hash));
372         }
373
374         *glp = gl;
375
376         return 0;
377
378 fail_aspace:
379         if (gl->gl_aspace)
380                 gfs2_aspace_put(gl->gl_aspace);
381 fail:
382         kmem_cache_free(gfs2_glock_cachep, gl);
383         return error;
384 }
385
386 /**
387  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
388  * @gl: the glock
389  * @state: the state we're requesting
390  * @flags: the modifier flags
391  * @gh: the holder structure
392  *
393  */
394
395 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
396                       struct gfs2_holder *gh)
397 {
398         INIT_LIST_HEAD(&gh->gh_list);
399         gh->gh_gl = gl;
400         gh->gh_ip = (unsigned long)__builtin_return_address(0);
401         gh->gh_owner_pid = get_pid(task_pid(current));
402         gh->gh_state = state;
403         gh->gh_flags = flags;
404         gh->gh_error = 0;
405         gh->gh_iflags = 0;
406         gfs2_glock_hold(gl);
407 }
408
409 /**
410  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
411  * @state: the state we're requesting
412  * @flags: the modifier flags
413  * @gh: the holder structure
414  *
415  * Don't mess with the glock.
416  *
417  */
418
419 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
420 {
421         gh->gh_state = state;
422         gh->gh_flags = flags;
423         gh->gh_iflags = 0;
424         gh->gh_ip = (unsigned long)__builtin_return_address(0);
425 }
426
427 /**
428  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
429  * @gh: the holder structure
430  *
431  */
432
433 void gfs2_holder_uninit(struct gfs2_holder *gh)
434 {
435         put_pid(gh->gh_owner_pid);
436         gfs2_glock_put(gh->gh_gl);
437         gh->gh_gl = NULL;
438         gh->gh_ip = 0;
439 }
440
441 static void gfs2_holder_wake(struct gfs2_holder *gh)
442 {
443         clear_bit(HIF_WAIT, &gh->gh_iflags);
444         smp_mb__after_clear_bit();
445         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
446 }
447
448 static int just_schedule(void *word)
449 {
450         schedule();
451         return 0;
452 }
453
454 static void wait_on_holder(struct gfs2_holder *gh)
455 {
456         might_sleep();
457         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
458 }
459
460 static void gfs2_demote_wake(struct gfs2_glock *gl)
461 {
462         gl->gl_demote_state = LM_ST_EXCLUSIVE;
463         clear_bit(GLF_DEMOTE, &gl->gl_flags);
464         smp_mb__after_clear_bit();
465         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
466 }
467
468 static void wait_on_demote(struct gfs2_glock *gl)
469 {
470         might_sleep();
471         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
472 }
473
474 /**
475  * rq_mutex - process a mutex request in the queue
476  * @gh: the glock holder
477  *
478  * Returns: 1 if the queue is blocked
479  */
480
481 static int rq_mutex(struct gfs2_holder *gh)
482 {
483         struct gfs2_glock *gl = gh->gh_gl;
484
485         list_del_init(&gh->gh_list);
486         /*  gh->gh_error never examined.  */
487         set_bit(GLF_LOCK, &gl->gl_flags);
488         clear_bit(HIF_WAIT, &gh->gh_iflags);
489         smp_mb();
490         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
491
492         return 1;
493 }
494
495 /**
496  * rq_promote - process a promote request in the queue
497  * @gh: the glock holder
498  *
499  * Acquire a new inter-node lock, or change a lock state to more restrictive.
500  *
501  * Returns: 1 if the queue is blocked
502  */
503
504 static int rq_promote(struct gfs2_holder *gh)
505 {
506         struct gfs2_glock *gl = gh->gh_gl;
507
508         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
509                 if (list_empty(&gl->gl_holders)) {
510                         gl->gl_req_gh = gh;
511                         set_bit(GLF_LOCK, &gl->gl_flags);
512                         spin_unlock(&gl->gl_spin);
513                         gfs2_glock_xmote_th(gh->gh_gl, gh);
514                         spin_lock(&gl->gl_spin);
515                 }
516                 return 1;
517         }
518
519         if (list_empty(&gl->gl_holders)) {
520                 set_bit(HIF_FIRST, &gh->gh_iflags);
521                 set_bit(GLF_LOCK, &gl->gl_flags);
522         } else {
523                 struct gfs2_holder *next_gh;
524                 if (gh->gh_state == LM_ST_EXCLUSIVE)
525                         return 1;
526                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
527                                      gh_list);
528                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
529                          return 1;
530         }
531
532         list_move_tail(&gh->gh_list, &gl->gl_holders);
533         gh->gh_error = 0;
534         set_bit(HIF_HOLDER, &gh->gh_iflags);
535
536         gfs2_holder_wake(gh);
537
538         return 0;
539 }
540
541 /**
542  * rq_demote - process a demote request in the queue
543  * @gh: the glock holder
544  *
545  * Returns: 1 if the queue is blocked
546  */
547
548 static int rq_demote(struct gfs2_glock *gl)
549 {
550         if (!list_empty(&gl->gl_holders))
551                 return 1;
552
553         if (gl->gl_state == gl->gl_demote_state ||
554             gl->gl_state == LM_ST_UNLOCKED) {
555                 gfs2_demote_wake(gl);
556                 return 0;
557         }
558
559         set_bit(GLF_LOCK, &gl->gl_flags);
560         set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
561
562         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
563             gl->gl_state != LM_ST_EXCLUSIVE) {
564                 spin_unlock(&gl->gl_spin);
565                 gfs2_glock_drop_th(gl);
566         } else {
567                 spin_unlock(&gl->gl_spin);
568                 gfs2_glock_xmote_th(gl, NULL);
569         }
570
571         spin_lock(&gl->gl_spin);
572         clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
573
574         return 0;
575 }
576
577 /**
578  * run_queue - process holder structures on a glock
579  * @gl: the glock
580  *
581  */
582 static void run_queue(struct gfs2_glock *gl)
583 {
584         struct gfs2_holder *gh;
585         int blocked = 1;
586
587         for (;;) {
588                 if (test_bit(GLF_LOCK, &gl->gl_flags))
589                         break;
590
591                 if (!list_empty(&gl->gl_waiters1)) {
592                         gh = list_entry(gl->gl_waiters1.next,
593                                         struct gfs2_holder, gh_list);
594                         blocked = rq_mutex(gh);
595                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
596                         blocked = rq_demote(gl);
597                         if (test_bit(GLF_WAITERS2, &gl->gl_flags) &&
598                                      !blocked) {
599                                 set_bit(GLF_DEMOTE, &gl->gl_flags);
600                                 gl->gl_demote_state = LM_ST_UNLOCKED;
601                         }
602                         clear_bit(GLF_WAITERS2, &gl->gl_flags);
603                 } else if (!list_empty(&gl->gl_waiters3)) {
604                         gh = list_entry(gl->gl_waiters3.next,
605                                         struct gfs2_holder, gh_list);
606                         blocked = rq_promote(gh);
607                 } else
608                         break;
609
610                 if (blocked)
611                         break;
612         }
613 }
614
615 /**
616  * gfs2_glmutex_lock - acquire a local lock on a glock
617  * @gl: the glock
618  *
619  * Gives caller exclusive access to manipulate a glock structure.
620  */
621
622 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
623 {
624         spin_lock(&gl->gl_spin);
625         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
626                 struct gfs2_holder gh;
627
628                 gfs2_holder_init(gl, 0, 0, &gh);
629                 set_bit(HIF_WAIT, &gh.gh_iflags);
630                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
631                 spin_unlock(&gl->gl_spin);
632                 wait_on_holder(&gh);
633                 gfs2_holder_uninit(&gh);
634         } else {
635                 gl->gl_owner_pid = get_pid(task_pid(current));
636                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
637                 spin_unlock(&gl->gl_spin);
638         }
639 }
640
641 /**
642  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
643  * @gl: the glock
644  *
645  * Returns: 1 if the glock is acquired
646  */
647
648 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
649 {
650         int acquired = 1;
651
652         spin_lock(&gl->gl_spin);
653         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
654                 acquired = 0;
655         } else {
656                 gl->gl_owner_pid = get_pid(task_pid(current));
657                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
658         }
659         spin_unlock(&gl->gl_spin);
660
661         return acquired;
662 }
663
664 /**
665  * gfs2_glmutex_unlock - release a local lock on a glock
666  * @gl: the glock
667  *
668  */
669
670 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
671 {
672         struct pid *pid;
673
674         spin_lock(&gl->gl_spin);
675         clear_bit(GLF_LOCK, &gl->gl_flags);
676         pid = gl->gl_owner_pid;
677         gl->gl_owner_pid = NULL;
678         gl->gl_ip = 0;
679         run_queue(gl);
680         spin_unlock(&gl->gl_spin);
681
682         put_pid(pid);
683 }
684
685 /**
686  * handle_callback - process a demote request
687  * @gl: the glock
688  * @state: the state the caller wants us to change to
689  *
690  * There are only two requests that we are going to see in actual
691  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
692  */
693
694 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
695                             int remote, unsigned long delay)
696 {
697         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
698
699         spin_lock(&gl->gl_spin);
700         set_bit(bit, &gl->gl_flags);
701         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
702                 gl->gl_demote_state = state;
703                 gl->gl_demote_time = jiffies;
704                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
705                     gl->gl_object) {
706                         gfs2_glock_schedule_for_reclaim(gl);
707                         spin_unlock(&gl->gl_spin);
708                         return;
709                 }
710         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
711                         gl->gl_demote_state != state) {
712                 if (test_bit(GLF_DEMOTE_IN_PROGRESS,  &gl->gl_flags)) 
713                         set_bit(GLF_WAITERS2, &gl->gl_flags);
714                 else 
715                         gl->gl_demote_state = LM_ST_UNLOCKED;
716         }
717         spin_unlock(&gl->gl_spin);
718 }
719
720 /**
721  * state_change - record that the glock is now in a different state
722  * @gl: the glock
723  * @new_state the new state
724  *
725  */
726
727 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
728 {
729         int held1, held2;
730
731         held1 = (gl->gl_state != LM_ST_UNLOCKED);
732         held2 = (new_state != LM_ST_UNLOCKED);
733
734         if (held1 != held2) {
735                 if (held2)
736                         gfs2_glock_hold(gl);
737                 else
738                         gfs2_glock_put(gl);
739         }
740
741         gl->gl_state = new_state;
742         gl->gl_tchange = jiffies;
743 }
744
745 /**
746  * drop_bh - Called after a lock module unlock completes
747  * @gl: the glock
748  * @ret: the return status
749  *
750  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
751  * Doesn't drop the reference on the glock the top half took out
752  *
753  */
754
755 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
756 {
757         struct gfs2_sbd *sdp = gl->gl_sbd;
758         const struct gfs2_glock_operations *glops = gl->gl_ops;
759         struct gfs2_holder *gh = gl->gl_req_gh;
760
761         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
762         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
763         gfs2_assert_warn(sdp, !ret);
764
765         state_change(gl, LM_ST_UNLOCKED);
766
767         if (glops->go_inval)
768                 glops->go_inval(gl, DIO_METADATA);
769
770         if (gh) {
771                 spin_lock(&gl->gl_spin);
772                 list_del_init(&gh->gh_list);
773                 gh->gh_error = 0;
774                 spin_unlock(&gl->gl_spin);
775         }
776
777         spin_lock(&gl->gl_spin);
778         gfs2_demote_wake(gl);
779         gl->gl_req_gh = NULL;
780         clear_bit(GLF_LOCK, &gl->gl_flags);
781         spin_unlock(&gl->gl_spin);
782
783         gfs2_glock_put(gl);
784
785         if (gh)
786                 gfs2_holder_wake(gh);
787 }
788
789 /**
790  * xmote_bh - Called after the lock module is done acquiring a lock
791  * @gl: The glock in question
792  * @ret: the int returned from the lock module
793  *
794  */
795
796 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
797 {
798         struct gfs2_sbd *sdp = gl->gl_sbd;
799         const struct gfs2_glock_operations *glops = gl->gl_ops;
800         struct gfs2_holder *gh = gl->gl_req_gh;
801         int prev_state = gl->gl_state;
802         int op_done = 1;
803
804         if ((ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
805                 drop_bh(gl, ret);
806                 return;
807         }
808
809         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
810         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
811         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
812
813         state_change(gl, ret & LM_OUT_ST_MASK);
814
815         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
816                 if (glops->go_inval)
817                         glops->go_inval(gl, DIO_METADATA);
818         } else if (gl->gl_state == LM_ST_DEFERRED) {
819                 /* We might not want to do this here.
820                    Look at moving to the inode glops. */
821                 if (glops->go_inval)
822                         glops->go_inval(gl, 0);
823         }
824
825         /*  Deal with each possible exit condition  */
826
827         if (!gh) {
828                 gl->gl_stamp = jiffies;
829                 if (ret & LM_OUT_CANCELED) {
830                         op_done = 0;
831                 } else {
832                         spin_lock(&gl->gl_spin);
833                         if (gl->gl_state != gl->gl_demote_state) {
834                                 spin_unlock(&gl->gl_spin);
835                                 gfs2_glock_drop_th(gl);
836                                 gfs2_glock_put(gl);
837                                 return;
838                         }
839                         gfs2_demote_wake(gl);
840                         spin_unlock(&gl->gl_spin);
841                 }
842         } else {
843                 spin_lock(&gl->gl_spin);
844                 list_del_init(&gh->gh_list);
845                 gh->gh_error = -EIO;
846                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
847                         goto out;
848                 gh->gh_error = GLR_CANCELED;
849                 if (ret & LM_OUT_CANCELED) 
850                         goto out;
851                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
852                         list_add_tail(&gh->gh_list, &gl->gl_holders);
853                         gh->gh_error = 0;
854                         set_bit(HIF_HOLDER, &gh->gh_iflags);
855                         set_bit(HIF_FIRST, &gh->gh_iflags);
856                         op_done = 0;
857                         goto out;
858                 }
859                 gh->gh_error = GLR_TRYFAILED;
860                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
861                         goto out;
862                 gh->gh_error = -EINVAL;
863                 if (gfs2_assert_withdraw(sdp, 0) == -1)
864                         fs_err(sdp, "ret = 0x%.8X\n", ret);
865 out:
866                 spin_unlock(&gl->gl_spin);
867         }
868
869         if (glops->go_xmote_bh)
870                 glops->go_xmote_bh(gl);
871
872         if (op_done) {
873                 spin_lock(&gl->gl_spin);
874                 gl->gl_req_gh = NULL;
875                 clear_bit(GLF_LOCK, &gl->gl_flags);
876                 spin_unlock(&gl->gl_spin);
877         }
878
879         gfs2_glock_put(gl);
880
881         if (gh)
882                 gfs2_holder_wake(gh);
883 }
884
885 /**
886  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
887  * @gl: The glock in question
888  * @state: the requested state
889  * @flags: modifier flags to the lock call
890  *
891  */
892
893 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
894 {
895         struct gfs2_sbd *sdp = gl->gl_sbd;
896         int flags = gh ? gh->gh_flags : 0;
897         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
898         const struct gfs2_glock_operations *glops = gl->gl_ops;
899         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
900                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
901                                  LM_FLAG_PRIORITY);
902         unsigned int lck_ret;
903
904         if (glops->go_xmote_th)
905                 glops->go_xmote_th(gl);
906
907         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
908         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
909         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
910         gfs2_assert_warn(sdp, state != gl->gl_state);
911
912         gfs2_glock_hold(gl);
913
914         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
915
916         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
917                 return;
918
919         if (lck_ret & LM_OUT_ASYNC)
920                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
921         else
922                 xmote_bh(gl, lck_ret);
923 }
924
925 /**
926  * gfs2_glock_drop_th - call into the lock module to unlock a lock
927  * @gl: the glock
928  *
929  */
930
931 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
932 {
933         struct gfs2_sbd *sdp = gl->gl_sbd;
934         const struct gfs2_glock_operations *glops = gl->gl_ops;
935         unsigned int ret;
936
937         if (glops->go_xmote_th)
938                 glops->go_xmote_th(gl);
939
940         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
941         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
942         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
943
944         gfs2_glock_hold(gl);
945
946         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
947
948         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
949                 return;
950
951         if (!ret)
952                 drop_bh(gl, ret);
953         else
954                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
955 }
956
957 /**
958  * do_cancels - cancel requests for locks stuck waiting on an expire flag
959  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
960  *
961  * Don't cancel GL_NOCANCEL requests.
962  */
963
964 static void do_cancels(struct gfs2_holder *gh)
965 {
966         struct gfs2_glock *gl = gh->gh_gl;
967
968         spin_lock(&gl->gl_spin);
969
970         while (gl->gl_req_gh != gh &&
971                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
972                !list_empty(&gh->gh_list)) {
973                 if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
974                         spin_unlock(&gl->gl_spin);
975                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
976                         msleep(100);
977                         spin_lock(&gl->gl_spin);
978                 } else {
979                         spin_unlock(&gl->gl_spin);
980                         msleep(100);
981                         spin_lock(&gl->gl_spin);
982                 }
983         }
984
985         spin_unlock(&gl->gl_spin);
986 }
987
988 /**
989  * glock_wait_internal - wait on a glock acquisition
990  * @gh: the glock holder
991  *
992  * Returns: 0 on success
993  */
994
995 static int glock_wait_internal(struct gfs2_holder *gh)
996 {
997         struct gfs2_glock *gl = gh->gh_gl;
998         struct gfs2_sbd *sdp = gl->gl_sbd;
999         const struct gfs2_glock_operations *glops = gl->gl_ops;
1000
1001         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1002                 return -EIO;
1003
1004         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1005                 spin_lock(&gl->gl_spin);
1006                 if (gl->gl_req_gh != gh &&
1007                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1008                     !list_empty(&gh->gh_list)) {
1009                         list_del_init(&gh->gh_list);
1010                         gh->gh_error = GLR_TRYFAILED;
1011                         run_queue(gl);
1012                         spin_unlock(&gl->gl_spin);
1013                         return gh->gh_error;
1014                 }
1015                 spin_unlock(&gl->gl_spin);
1016         }
1017
1018         if (gh->gh_flags & LM_FLAG_PRIORITY)
1019                 do_cancels(gh);
1020
1021         wait_on_holder(gh);
1022         if (gh->gh_error)
1023                 return gh->gh_error;
1024
1025         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1026         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1027                                                    gh->gh_flags));
1028
1029         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1030                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1031
1032                 if (glops->go_lock) {
1033                         gh->gh_error = glops->go_lock(gh);
1034                         if (gh->gh_error) {
1035                                 spin_lock(&gl->gl_spin);
1036                                 list_del_init(&gh->gh_list);
1037                                 spin_unlock(&gl->gl_spin);
1038                         }
1039                 }
1040
1041                 spin_lock(&gl->gl_spin);
1042                 gl->gl_req_gh = NULL;
1043                 clear_bit(GLF_LOCK, &gl->gl_flags);
1044                 run_queue(gl);
1045                 spin_unlock(&gl->gl_spin);
1046         }
1047
1048         return gh->gh_error;
1049 }
1050
1051 static inline struct gfs2_holder *
1052 find_holder_by_owner(struct list_head *head, struct pid *pid)
1053 {
1054         struct gfs2_holder *gh;
1055
1056         list_for_each_entry(gh, head, gh_list) {
1057                 if (gh->gh_owner_pid == pid)
1058                         return gh;
1059         }
1060
1061         return NULL;
1062 }
1063
1064 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1065 {
1066         va_list args;
1067
1068         va_start(args, fmt);
1069         if (gi) {
1070                 vsprintf(gi->string, fmt, args);
1071                 seq_printf(gi->seq, gi->string);
1072         }
1073         else
1074                 vprintk(fmt, args);
1075         va_end(args);
1076 }
1077
1078 /**
1079  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1080  * @gh: the holder structure to add
1081  *
1082  */
1083
1084 static void add_to_queue(struct gfs2_holder *gh)
1085 {
1086         struct gfs2_glock *gl = gh->gh_gl;
1087         struct gfs2_holder *existing;
1088
1089         BUG_ON(gh->gh_owner_pid == NULL);
1090         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1091                 BUG();
1092
1093         if (!(gh->gh_flags & GL_FLOCK)) {
1094                 existing = find_holder_by_owner(&gl->gl_holders, 
1095                                                 gh->gh_owner_pid);
1096                 if (existing) {
1097                         print_symbol(KERN_WARNING "original: %s\n", 
1098                                      existing->gh_ip);
1099                         printk(KERN_INFO "pid : %d\n",
1100                                         pid_nr(existing->gh_owner_pid));
1101                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1102                                existing->gh_gl->gl_name.ln_type, 
1103                                existing->gh_gl->gl_state);
1104                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1105                         printk(KERN_INFO "pid : %d\n",
1106                                         pid_nr(gh->gh_owner_pid));
1107                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1108                                gl->gl_name.ln_type, gl->gl_state);
1109                         BUG();
1110                 }
1111                 
1112                 existing = find_holder_by_owner(&gl->gl_waiters3, 
1113                                                 gh->gh_owner_pid);
1114                 if (existing) {
1115                         print_symbol(KERN_WARNING "original: %s\n", 
1116                                      existing->gh_ip);
1117                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1118                         BUG();
1119                 }
1120         }
1121
1122         if (gh->gh_flags & LM_FLAG_PRIORITY)
1123                 list_add(&gh->gh_list, &gl->gl_waiters3);
1124         else
1125                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1126 }
1127
1128 /**
1129  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1130  * @gh: the holder structure
1131  *
1132  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1133  *
1134  * Returns: 0, GLR_TRYFAILED, or errno on failure
1135  */
1136
1137 int gfs2_glock_nq(struct gfs2_holder *gh)
1138 {
1139         struct gfs2_glock *gl = gh->gh_gl;
1140         struct gfs2_sbd *sdp = gl->gl_sbd;
1141         int error = 0;
1142
1143 restart:
1144         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1145                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1146                 return -EIO;
1147         }
1148
1149         spin_lock(&gl->gl_spin);
1150         add_to_queue(gh);
1151         run_queue(gl);
1152         spin_unlock(&gl->gl_spin);
1153
1154         if (!(gh->gh_flags & GL_ASYNC)) {
1155                 error = glock_wait_internal(gh);
1156                 if (error == GLR_CANCELED) {
1157                         msleep(100);
1158                         goto restart;
1159                 }
1160         }
1161
1162         return error;
1163 }
1164
1165 /**
1166  * gfs2_glock_poll - poll to see if an async request has been completed
1167  * @gh: the holder
1168  *
1169  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1170  */
1171
1172 int gfs2_glock_poll(struct gfs2_holder *gh)
1173 {
1174         struct gfs2_glock *gl = gh->gh_gl;
1175         int ready = 0;
1176
1177         spin_lock(&gl->gl_spin);
1178
1179         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1180                 ready = 1;
1181         else if (list_empty(&gh->gh_list)) {
1182                 if (gh->gh_error == GLR_CANCELED) {
1183                         spin_unlock(&gl->gl_spin);
1184                         msleep(100);
1185                         if (gfs2_glock_nq(gh))
1186                                 return 1;
1187                         return 0;
1188                 } else
1189                         ready = 1;
1190         }
1191
1192         spin_unlock(&gl->gl_spin);
1193
1194         return ready;
1195 }
1196
1197 /**
1198  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1199  * @gh: the holder structure
1200  *
1201  * Returns: 0, GLR_TRYFAILED, or errno on failure
1202  */
1203
1204 int gfs2_glock_wait(struct gfs2_holder *gh)
1205 {
1206         int error;
1207
1208         error = glock_wait_internal(gh);
1209         if (error == GLR_CANCELED) {
1210                 msleep(100);
1211                 gh->gh_flags &= ~GL_ASYNC;
1212                 error = gfs2_glock_nq(gh);
1213         }
1214
1215         return error;
1216 }
1217
1218 /**
1219  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1220  * @gh: the glock holder
1221  *
1222  */
1223
1224 void gfs2_glock_dq(struct gfs2_holder *gh)
1225 {
1226         struct gfs2_glock *gl = gh->gh_gl;
1227         const struct gfs2_glock_operations *glops = gl->gl_ops;
1228         unsigned delay = 0;
1229
1230         if (gh->gh_flags & GL_NOCACHE)
1231                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1232
1233         gfs2_glmutex_lock(gl);
1234
1235         spin_lock(&gl->gl_spin);
1236         list_del_init(&gh->gh_list);
1237
1238         if (list_empty(&gl->gl_holders)) {
1239                 if (glops->go_unlock) {
1240                         spin_unlock(&gl->gl_spin);
1241                         glops->go_unlock(gh);
1242                         spin_lock(&gl->gl_spin);
1243                 }
1244                 gl->gl_stamp = jiffies;
1245         }
1246
1247         clear_bit(GLF_LOCK, &gl->gl_flags);
1248         spin_unlock(&gl->gl_spin);
1249
1250         gfs2_glock_hold(gl);
1251         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1252             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1253                 delay = gl->gl_ops->go_min_hold_time;
1254         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1255                 gfs2_glock_put(gl);
1256 }
1257
1258 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1259 {
1260         struct gfs2_glock *gl = gh->gh_gl;
1261         gfs2_glock_dq(gh);
1262         wait_on_demote(gl);
1263 }
1264
1265 /**
1266  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1267  * @gh: the holder structure
1268  *
1269  */
1270
1271 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1272 {
1273         gfs2_glock_dq(gh);
1274         gfs2_holder_uninit(gh);
1275 }
1276
1277 /**
1278  * gfs2_glock_nq_num - acquire a glock based on lock number
1279  * @sdp: the filesystem
1280  * @number: the lock number
1281  * @glops: the glock operations for the type of glock
1282  * @state: the state to acquire the glock in
1283  * @flags: modifier flags for the aquisition
1284  * @gh: the struct gfs2_holder
1285  *
1286  * Returns: errno
1287  */
1288
1289 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1290                       const struct gfs2_glock_operations *glops,
1291                       unsigned int state, int flags, struct gfs2_holder *gh)
1292 {
1293         struct gfs2_glock *gl;
1294         int error;
1295
1296         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1297         if (!error) {
1298                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1299                 gfs2_glock_put(gl);
1300         }
1301
1302         return error;
1303 }
1304
1305 /**
1306  * glock_compare - Compare two struct gfs2_glock structures for sorting
1307  * @arg_a: the first structure
1308  * @arg_b: the second structure
1309  *
1310  */
1311
1312 static int glock_compare(const void *arg_a, const void *arg_b)
1313 {
1314         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1315         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1316         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1317         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1318
1319         if (a->ln_number > b->ln_number)
1320                 return 1;
1321         if (a->ln_number < b->ln_number)
1322                 return -1;
1323         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1324         return 0;
1325 }
1326
1327 /**
1328  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1329  * @num_gh: the number of structures
1330  * @ghs: an array of struct gfs2_holder structures
1331  *
1332  * Returns: 0 on success (all glocks acquired),
1333  *          errno on failure (no glocks acquired)
1334  */
1335
1336 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1337                      struct gfs2_holder **p)
1338 {
1339         unsigned int x;
1340         int error = 0;
1341
1342         for (x = 0; x < num_gh; x++)
1343                 p[x] = &ghs[x];
1344
1345         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1346
1347         for (x = 0; x < num_gh; x++) {
1348                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1349
1350                 error = gfs2_glock_nq(p[x]);
1351                 if (error) {
1352                         while (x--)
1353                                 gfs2_glock_dq(p[x]);
1354                         break;
1355                 }
1356         }
1357
1358         return error;
1359 }
1360
1361 /**
1362  * gfs2_glock_nq_m - acquire multiple glocks
1363  * @num_gh: the number of structures
1364  * @ghs: an array of struct gfs2_holder structures
1365  *
1366  *
1367  * Returns: 0 on success (all glocks acquired),
1368  *          errno on failure (no glocks acquired)
1369  */
1370
1371 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1372 {
1373         struct gfs2_holder *tmp[4];
1374         struct gfs2_holder **pph = tmp;
1375         int error = 0;
1376
1377         switch(num_gh) {
1378         case 0:
1379                 return 0;
1380         case 1:
1381                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1382                 return gfs2_glock_nq(ghs);
1383         default:
1384                 if (num_gh <= 4)
1385                         break;
1386                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1387                 if (!pph)
1388                         return -ENOMEM;
1389         }
1390
1391         error = nq_m_sync(num_gh, ghs, pph);
1392
1393         if (pph != tmp)
1394                 kfree(pph);
1395
1396         return error;
1397 }
1398
1399 /**
1400  * gfs2_glock_dq_m - release multiple glocks
1401  * @num_gh: the number of structures
1402  * @ghs: an array of struct gfs2_holder structures
1403  *
1404  */
1405
1406 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1407 {
1408         unsigned int x;
1409
1410         for (x = 0; x < num_gh; x++)
1411                 gfs2_glock_dq(&ghs[x]);
1412 }
1413
1414 /**
1415  * gfs2_glock_dq_uninit_m - release multiple glocks
1416  * @num_gh: the number of structures
1417  * @ghs: an array of struct gfs2_holder structures
1418  *
1419  */
1420
1421 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1422 {
1423         unsigned int x;
1424
1425         for (x = 0; x < num_gh; x++)
1426                 gfs2_glock_dq_uninit(&ghs[x]);
1427 }
1428
1429 /**
1430  * gfs2_lvb_hold - attach a LVB from a glock
1431  * @gl: The glock in question
1432  *
1433  */
1434
1435 int gfs2_lvb_hold(struct gfs2_glock *gl)
1436 {
1437         int error;
1438
1439         gfs2_glmutex_lock(gl);
1440
1441         if (!atomic_read(&gl->gl_lvb_count)) {
1442                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1443                 if (error) {
1444                         gfs2_glmutex_unlock(gl);
1445                         return error;
1446                 }
1447                 gfs2_glock_hold(gl);
1448         }
1449         atomic_inc(&gl->gl_lvb_count);
1450
1451         gfs2_glmutex_unlock(gl);
1452
1453         return 0;
1454 }
1455
1456 /**
1457  * gfs2_lvb_unhold - detach a LVB from a glock
1458  * @gl: The glock in question
1459  *
1460  */
1461
1462 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1463 {
1464         gfs2_glock_hold(gl);
1465         gfs2_glmutex_lock(gl);
1466
1467         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1468         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1469                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1470                 gl->gl_lvb = NULL;
1471                 gfs2_glock_put(gl);
1472         }
1473
1474         gfs2_glmutex_unlock(gl);
1475         gfs2_glock_put(gl);
1476 }
1477
1478 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1479                         unsigned int state)
1480 {
1481         struct gfs2_glock *gl;
1482         unsigned long delay = 0;
1483         unsigned long holdtime;
1484         unsigned long now = jiffies;
1485
1486         gl = gfs2_glock_find(sdp, name);
1487         if (!gl)
1488                 return;
1489
1490         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1491         if (time_before(now, holdtime))
1492                 delay = holdtime - now;
1493
1494         handle_callback(gl, state, 1, delay);
1495         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1496                 gfs2_glock_put(gl);
1497 }
1498
1499 /**
1500  * gfs2_glock_cb - Callback used by locking module
1501  * @sdp: Pointer to the superblock
1502  * @type: Type of callback
1503  * @data: Type dependent data pointer
1504  *
1505  * Called by the locking module when it wants to tell us something.
1506  * Either we need to drop a lock, one of our ASYNC requests completed, or
1507  * a journal from another client needs to be recovered.
1508  */
1509
1510 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1511 {
1512         struct gfs2_sbd *sdp = cb_data;
1513
1514         switch (type) {
1515         case LM_CB_NEED_E:
1516                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1517                 return;
1518
1519         case LM_CB_NEED_D:
1520                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1521                 return;
1522
1523         case LM_CB_NEED_S:
1524                 blocking_cb(sdp, data, LM_ST_SHARED);
1525                 return;
1526
1527         case LM_CB_ASYNC: {
1528                 struct lm_async_cb *async = data;
1529                 struct gfs2_glock *gl;
1530
1531                 down_read(&gfs2_umount_flush_sem);
1532                 gl = gfs2_glock_find(sdp, &async->lc_name);
1533                 if (gfs2_assert_warn(sdp, gl))
1534                         return;
1535                 xmote_bh(gl, async->lc_ret);
1536                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1537                         gfs2_glock_put(gl);
1538                 up_read(&gfs2_umount_flush_sem);
1539                 return;
1540         }
1541
1542         case LM_CB_NEED_RECOVERY:
1543                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1544                 if (sdp->sd_recoverd_process)
1545                         wake_up_process(sdp->sd_recoverd_process);
1546                 return;
1547
1548         case LM_CB_DROPLOCKS:
1549                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1550                 gfs2_quota_scan(sdp);
1551                 return;
1552
1553         default:
1554                 gfs2_assert_warn(sdp, 0);
1555                 return;
1556         }
1557 }
1558
1559 /**
1560  * demote_ok - Check to see if it's ok to unlock a glock
1561  * @gl: the glock
1562  *
1563  * Returns: 1 if it's ok
1564  */
1565
1566 static int demote_ok(struct gfs2_glock *gl)
1567 {
1568         const struct gfs2_glock_operations *glops = gl->gl_ops;
1569         int demote = 1;
1570
1571         if (test_bit(GLF_STICKY, &gl->gl_flags))
1572                 demote = 0;
1573         else if (glops->go_demote_ok)
1574                 demote = glops->go_demote_ok(gl);
1575
1576         return demote;
1577 }
1578
1579 /**
1580  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1581  * @gl: the glock
1582  *
1583  */
1584
1585 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1586 {
1587         struct gfs2_sbd *sdp = gl->gl_sbd;
1588
1589         spin_lock(&sdp->sd_reclaim_lock);
1590         if (list_empty(&gl->gl_reclaim)) {
1591                 gfs2_glock_hold(gl);
1592                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1593                 atomic_inc(&sdp->sd_reclaim_count);
1594                 spin_unlock(&sdp->sd_reclaim_lock);
1595                 wake_up(&sdp->sd_reclaim_wq);
1596         } else
1597                 spin_unlock(&sdp->sd_reclaim_lock);
1598 }
1599
1600 /**
1601  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1602  * @sdp: the filesystem
1603  *
1604  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1605  * different glock and we notice that there are a lot of glocks in the
1606  * reclaim list.
1607  *
1608  */
1609
1610 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1611 {
1612         struct gfs2_glock *gl;
1613
1614         spin_lock(&sdp->sd_reclaim_lock);
1615         if (list_empty(&sdp->sd_reclaim_list)) {
1616                 spin_unlock(&sdp->sd_reclaim_lock);
1617                 return;
1618         }
1619         gl = list_entry(sdp->sd_reclaim_list.next,
1620                         struct gfs2_glock, gl_reclaim);
1621         list_del_init(&gl->gl_reclaim);
1622         spin_unlock(&sdp->sd_reclaim_lock);
1623
1624         atomic_dec(&sdp->sd_reclaim_count);
1625         atomic_inc(&sdp->sd_reclaimed);
1626
1627         if (gfs2_glmutex_trylock(gl)) {
1628                 if (list_empty(&gl->gl_holders) &&
1629                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1630                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1631                 gfs2_glmutex_unlock(gl);
1632         }
1633
1634         gfs2_glock_put(gl);
1635 }
1636
1637 /**
1638  * examine_bucket - Call a function for glock in a hash bucket
1639  * @examiner: the function
1640  * @sdp: the filesystem
1641  * @bucket: the bucket
1642  *
1643  * Returns: 1 if the bucket has entries
1644  */
1645
1646 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1647                           unsigned int hash)
1648 {
1649         struct gfs2_glock *gl, *prev = NULL;
1650         int has_entries = 0;
1651         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1652
1653         read_lock(gl_lock_addr(hash));
1654         /* Can't use hlist_for_each_entry - don't want prefetch here */
1655         if (hlist_empty(head))
1656                 goto out;
1657         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1658         while(1) {
1659                 if (!sdp || gl->gl_sbd == sdp) {
1660                         gfs2_glock_hold(gl);
1661                         read_unlock(gl_lock_addr(hash));
1662                         if (prev)
1663                                 gfs2_glock_put(prev);
1664                         prev = gl;
1665                         examiner(gl);
1666                         has_entries = 1;
1667                         read_lock(gl_lock_addr(hash));
1668                 }
1669                 if (gl->gl_list.next == NULL)
1670                         break;
1671                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1672         }
1673 out:
1674         read_unlock(gl_lock_addr(hash));
1675         if (prev)
1676                 gfs2_glock_put(prev);
1677         cond_resched();
1678         return has_entries;
1679 }
1680
1681 /**
1682  * scan_glock - look at a glock and see if we can reclaim it
1683  * @gl: the glock to look at
1684  *
1685  */
1686
1687 static void scan_glock(struct gfs2_glock *gl)
1688 {
1689         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1690                 return;
1691
1692         if (gfs2_glmutex_trylock(gl)) {
1693                 if (list_empty(&gl->gl_holders) &&
1694                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1695                         goto out_schedule;
1696                 gfs2_glmutex_unlock(gl);
1697         }
1698         return;
1699
1700 out_schedule:
1701         gfs2_glmutex_unlock(gl);
1702         gfs2_glock_schedule_for_reclaim(gl);
1703 }
1704
1705 /**
1706  * clear_glock - look at a glock and see if we can free it from glock cache
1707  * @gl: the glock to look at
1708  *
1709  */
1710
1711 static void clear_glock(struct gfs2_glock *gl)
1712 {
1713         struct gfs2_sbd *sdp = gl->gl_sbd;
1714         int released;
1715
1716         spin_lock(&sdp->sd_reclaim_lock);
1717         if (!list_empty(&gl->gl_reclaim)) {
1718                 list_del_init(&gl->gl_reclaim);
1719                 atomic_dec(&sdp->sd_reclaim_count);
1720                 spin_unlock(&sdp->sd_reclaim_lock);
1721                 released = gfs2_glock_put(gl);
1722                 gfs2_assert(sdp, !released);
1723         } else {
1724                 spin_unlock(&sdp->sd_reclaim_lock);
1725         }
1726
1727         if (gfs2_glmutex_trylock(gl)) {
1728                 if (list_empty(&gl->gl_holders) &&
1729                     gl->gl_state != LM_ST_UNLOCKED)
1730                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1731                 gfs2_glmutex_unlock(gl);
1732         }
1733 }
1734
1735 /**
1736  * gfs2_gl_hash_clear - Empty out the glock hash table
1737  * @sdp: the filesystem
1738  * @wait: wait until it's all gone
1739  *
1740  * Called when unmounting the filesystem, or when inter-node lock manager
1741  * requests DROPLOCKS because it is running out of capacity.
1742  */
1743
1744 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1745 {
1746         unsigned long t;
1747         unsigned int x;
1748         int cont;
1749
1750         t = jiffies;
1751
1752         for (;;) {
1753                 cont = 0;
1754                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1755                         if (examine_bucket(clear_glock, sdp, x))
1756                                 cont = 1;
1757                 }
1758
1759                 if (!wait || !cont)
1760                         break;
1761
1762                 if (time_after_eq(jiffies,
1763                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1764                         fs_warn(sdp, "Unmount seems to be stalled. "
1765                                      "Dumping lock state...\n");
1766                         gfs2_dump_lockstate(sdp);
1767                         t = jiffies;
1768                 }
1769
1770                 down_write(&gfs2_umount_flush_sem);
1771                 invalidate_inodes(sdp->sd_vfs);
1772                 up_write(&gfs2_umount_flush_sem);
1773                 msleep(10);
1774         }
1775 }
1776
1777 /*
1778  *  Diagnostic routines to help debug distributed deadlock
1779  */
1780
1781 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1782                               unsigned long address)
1783 {
1784         char buffer[KSYM_SYMBOL_LEN];
1785
1786         sprint_symbol(buffer, address);
1787         print_dbg(gi, fmt, buffer);
1788 }
1789
1790 /**
1791  * dump_holder - print information about a glock holder
1792  * @str: a string naming the type of holder
1793  * @gh: the glock holder
1794  *
1795  * Returns: 0 on success, -ENOBUFS when we run out of space
1796  */
1797
1798 static int dump_holder(struct glock_iter *gi, char *str,
1799                        struct gfs2_holder *gh)
1800 {
1801         unsigned int x;
1802         struct task_struct *gh_owner;
1803
1804         print_dbg(gi, "  %s\n", str);
1805         if (gh->gh_owner_pid) {
1806                 print_dbg(gi, "    owner = %ld ",
1807                                 (long)pid_nr(gh->gh_owner_pid));
1808                 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1809                 if (gh_owner)
1810                         print_dbg(gi, "(%s)\n", gh_owner->comm);
1811                 else
1812                         print_dbg(gi, "(ended)\n");
1813         } else
1814                 print_dbg(gi, "    owner = -1\n");
1815         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1816         print_dbg(gi, "    gh_flags =");
1817         for (x = 0; x < 32; x++)
1818                 if (gh->gh_flags & (1 << x))
1819                         print_dbg(gi, " %u", x);
1820         print_dbg(gi, " \n");
1821         print_dbg(gi, "    error = %d\n", gh->gh_error);
1822         print_dbg(gi, "    gh_iflags =");
1823         for (x = 0; x < 32; x++)
1824                 if (test_bit(x, &gh->gh_iflags))
1825                         print_dbg(gi, " %u", x);
1826         print_dbg(gi, " \n");
1827         gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1828
1829         return 0;
1830 }
1831
1832 /**
1833  * dump_inode - print information about an inode
1834  * @ip: the inode
1835  *
1836  * Returns: 0 on success, -ENOBUFS when we run out of space
1837  */
1838
1839 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1840 {
1841         unsigned int x;
1842
1843         print_dbg(gi, "  Inode:\n");
1844         print_dbg(gi, "    num = %llu/%llu\n",
1845                   (unsigned long long)ip->i_no_formal_ino,
1846                   (unsigned long long)ip->i_no_addr);
1847         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1848         print_dbg(gi, "    i_flags =");
1849         for (x = 0; x < 32; x++)
1850                 if (test_bit(x, &ip->i_flags))
1851                         print_dbg(gi, " %u", x);
1852         print_dbg(gi, " \n");
1853         return 0;
1854 }
1855
1856 /**
1857  * dump_glock - print information about a glock
1858  * @gl: the glock
1859  * @count: where we are in the buffer
1860  *
1861  * Returns: 0 on success, -ENOBUFS when we run out of space
1862  */
1863
1864 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1865 {
1866         struct gfs2_holder *gh;
1867         unsigned int x;
1868         int error = -ENOBUFS;
1869         struct task_struct *gl_owner;
1870
1871         spin_lock(&gl->gl_spin);
1872
1873         print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1874                    (unsigned long long)gl->gl_name.ln_number);
1875         print_dbg(gi, "  gl_flags =");
1876         for (x = 0; x < 32; x++) {
1877                 if (test_bit(x, &gl->gl_flags))
1878                         print_dbg(gi, " %u", x);
1879         }
1880         if (!test_bit(GLF_LOCK, &gl->gl_flags))
1881                 print_dbg(gi, " (unlocked)");
1882         print_dbg(gi, " \n");
1883         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1884         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1885         if (gl->gl_owner_pid) {
1886                 gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID);
1887                 if (gl_owner)
1888                         print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1889                                   pid_nr(gl->gl_owner_pid), gl_owner->comm);
1890                 else
1891                         print_dbg(gi, "  gl_owner = %d (ended)\n",
1892                                   pid_nr(gl->gl_owner_pid));
1893         } else
1894                 print_dbg(gi, "  gl_owner = -1\n");
1895         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1896         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1897         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1898         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1899         print_dbg(gi, "  reclaim = %s\n",
1900                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1901         if (gl->gl_aspace)
1902                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1903                            gl->gl_aspace->i_mapping->nrpages);
1904         else
1905                 print_dbg(gi, "  aspace = no\n");
1906         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1907         if (gl->gl_req_gh) {
1908                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1909                 if (error)
1910                         goto out;
1911         }
1912         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1913                 error = dump_holder(gi, "Holder", gh);
1914                 if (error)
1915                         goto out;
1916         }
1917         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1918                 error = dump_holder(gi, "Waiter1", gh);
1919                 if (error)
1920                         goto out;
1921         }
1922         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1923                 error = dump_holder(gi, "Waiter3", gh);
1924                 if (error)
1925                         goto out;
1926         }
1927         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1928                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1929                           gl->gl_demote_state, (unsigned long long)
1930                           (jiffies - gl->gl_demote_time)*(1000000/HZ));
1931         }
1932         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1933                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1934                         list_empty(&gl->gl_holders)) {
1935                         error = dump_inode(gi, gl->gl_object);
1936                         if (error)
1937                                 goto out;
1938                 } else {
1939                         error = -ENOBUFS;
1940                         print_dbg(gi, "  Inode: busy\n");
1941                 }
1942         }
1943
1944         error = 0;
1945
1946 out:
1947         spin_unlock(&gl->gl_spin);
1948         return error;
1949 }
1950
1951 /**
1952  * gfs2_dump_lockstate - print out the current lockstate
1953  * @sdp: the filesystem
1954  * @ub: the buffer to copy the information into
1955  *
1956  * If @ub is NULL, dump the lockstate to the console.
1957  *
1958  */
1959
1960 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1961 {
1962         struct gfs2_glock *gl;
1963         struct hlist_node *h;
1964         unsigned int x;
1965         int error = 0;
1966
1967         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1968
1969                 read_lock(gl_lock_addr(x));
1970
1971                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1972                         if (gl->gl_sbd != sdp)
1973                                 continue;
1974
1975                         error = dump_glock(NULL, gl);
1976                         if (error)
1977                                 break;
1978                 }
1979
1980                 read_unlock(gl_lock_addr(x));
1981
1982                 if (error)
1983                         break;
1984         }
1985
1986
1987         return error;
1988 }
1989
1990 /**
1991  * gfs2_scand - Look for cached glocks and inodes to toss from memory
1992  * @sdp: Pointer to GFS2 superblock
1993  *
1994  * One of these daemons runs, finding candidates to add to sd_reclaim_list.
1995  * See gfs2_glockd()
1996  */
1997
1998 static int gfs2_scand(void *data)
1999 {
2000         unsigned x;
2001         unsigned delay;
2002
2003         while (!kthread_should_stop()) {
2004                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2005                         examine_bucket(scan_glock, NULL, x);
2006                 if (freezing(current))
2007                         refrigerator();
2008                 delay = scand_secs;
2009                 if (delay < 1)
2010                         delay = 1;
2011                 schedule_timeout_interruptible(delay * HZ);
2012         }
2013
2014         return 0;
2015 }
2016
2017
2018
2019 int __init gfs2_glock_init(void)
2020 {
2021         unsigned i;
2022         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2023                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2024         }
2025 #ifdef GL_HASH_LOCK_SZ
2026         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2027                 rwlock_init(&gl_hash_locks[i]);
2028         }
2029 #endif
2030
2031         scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2032         if (IS_ERR(scand_process))
2033                 return PTR_ERR(scand_process);
2034
2035         glock_workqueue = create_workqueue("glock_workqueue");
2036         if (IS_ERR(glock_workqueue)) {
2037                 kthread_stop(scand_process);
2038                 return PTR_ERR(glock_workqueue);
2039         }
2040
2041         return 0;
2042 }
2043
2044 void gfs2_glock_exit(void)
2045 {
2046         destroy_workqueue(glock_workqueue);
2047         kthread_stop(scand_process);
2048 }
2049
2050 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2051 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2052
2053 static int gfs2_glock_iter_next(struct glock_iter *gi)
2054 {
2055         struct gfs2_glock *gl;
2056
2057 restart:
2058         read_lock(gl_lock_addr(gi->hash));
2059         gl = gi->gl;
2060         if (gl) {
2061                 gi->gl = hlist_entry(gl->gl_list.next,
2062                                      struct gfs2_glock, gl_list);
2063                 if (gi->gl)
2064                         gfs2_glock_hold(gi->gl);
2065         }
2066         read_unlock(gl_lock_addr(gi->hash));
2067         if (gl)
2068                 gfs2_glock_put(gl);
2069         if (gl && gi->gl == NULL)
2070                 gi->hash++;
2071         while(gi->gl == NULL) {
2072                 if (gi->hash >= GFS2_GL_HASH_SIZE)
2073                         return 1;
2074                 read_lock(gl_lock_addr(gi->hash));
2075                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2076                                      struct gfs2_glock, gl_list);
2077                 if (gi->gl)
2078                         gfs2_glock_hold(gi->gl);
2079                 read_unlock(gl_lock_addr(gi->hash));
2080                 gi->hash++;
2081         }
2082
2083         if (gi->sdp != gi->gl->gl_sbd)
2084                 goto restart;
2085
2086         return 0;
2087 }
2088
2089 static void gfs2_glock_iter_free(struct glock_iter *gi)
2090 {
2091         if (gi->gl)
2092                 gfs2_glock_put(gi->gl);
2093         kfree(gi);
2094 }
2095
2096 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2097 {
2098         struct glock_iter *gi;
2099
2100         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2101         if (!gi)
2102                 return NULL;
2103
2104         gi->sdp = sdp;
2105         gi->hash = 0;
2106         gi->seq = NULL;
2107         gi->gl = NULL;
2108         memset(gi->string, 0, sizeof(gi->string));
2109
2110         if (gfs2_glock_iter_next(gi)) {
2111                 gfs2_glock_iter_free(gi);
2112                 return NULL;
2113         }
2114
2115         return gi;
2116 }
2117
2118 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2119 {
2120         struct glock_iter *gi;
2121         loff_t n = *pos;
2122
2123         gi = gfs2_glock_iter_init(file->private);
2124         if (!gi)
2125                 return NULL;
2126
2127         while(n--) {
2128                 if (gfs2_glock_iter_next(gi)) {
2129                         gfs2_glock_iter_free(gi);
2130                         return NULL;
2131                 }
2132         }
2133
2134         return gi;
2135 }
2136
2137 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2138                                  loff_t *pos)
2139 {
2140         struct glock_iter *gi = iter_ptr;
2141
2142         (*pos)++;
2143
2144         if (gfs2_glock_iter_next(gi)) {
2145                 gfs2_glock_iter_free(gi);
2146                 return NULL;
2147         }
2148
2149         return gi;
2150 }
2151
2152 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2153 {
2154         struct glock_iter *gi = iter_ptr;
2155         if (gi)
2156                 gfs2_glock_iter_free(gi);
2157 }
2158
2159 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2160 {
2161         struct glock_iter *gi = iter_ptr;
2162
2163         gi->seq = file;
2164         dump_glock(gi, gi->gl);
2165
2166         return 0;
2167 }
2168
2169 static const struct seq_operations gfs2_glock_seq_ops = {
2170         .start = gfs2_glock_seq_start,
2171         .next  = gfs2_glock_seq_next,
2172         .stop  = gfs2_glock_seq_stop,
2173         .show  = gfs2_glock_seq_show,
2174 };
2175
2176 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2177 {
2178         struct seq_file *seq;
2179         int ret;
2180
2181         ret = seq_open(file, &gfs2_glock_seq_ops);
2182         if (ret)
2183                 return ret;
2184
2185         seq = file->private_data;
2186         seq->private = inode->i_private;
2187
2188         return 0;
2189 }
2190
2191 static const struct file_operations gfs2_debug_fops = {
2192         .owner   = THIS_MODULE,
2193         .open    = gfs2_debugfs_open,
2194         .read    = seq_read,
2195         .llseek  = seq_lseek,
2196         .release = seq_release
2197 };
2198
2199 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2200 {
2201         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2202         if (!sdp->debugfs_dir)
2203                 return -ENOMEM;
2204         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2205                                                          S_IFREG | S_IRUGO,
2206                                                          sdp->debugfs_dir, sdp,
2207                                                          &gfs2_debug_fops);
2208         if (!sdp->debugfs_dentry_glocks)
2209                 return -ENOMEM;
2210
2211         return 0;
2212 }
2213
2214 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2215 {
2216         if (sdp && sdp->debugfs_dir) {
2217                 if (sdp->debugfs_dentry_glocks) {
2218                         debugfs_remove(sdp->debugfs_dentry_glocks);
2219                         sdp->debugfs_dentry_glocks = NULL;
2220                 }
2221                 debugfs_remove(sdp->debugfs_dir);
2222                 sdp->debugfs_dir = NULL;
2223         }
2224 }
2225
2226 int gfs2_register_debugfs(void)
2227 {
2228         gfs2_root = debugfs_create_dir("gfs2", NULL);
2229         return gfs2_root ? 0 : -ENOMEM;
2230 }
2231
2232 void gfs2_unregister_debugfs(void)
2233 {
2234         debugfs_remove(gfs2_root);
2235         gfs2_root = NULL;
2236 }