[GFS2] Fix bz 224480 and cleanup glock demotion code
[safe/jmp/linux-2.6] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28
29 #include "gfs2.h"
30 #include "incore.h"
31 #include "glock.h"
32 #include "glops.h"
33 #include "inode.h"
34 #include "lm.h"
35 #include "lops.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "super.h"
39 #include "util.h"
40
41 struct gfs2_gl_hash_bucket {
42         struct hlist_head hb_list;
43 };
44
45 struct glock_iter {
46         int hash;                     /* hash bucket index         */
47         struct gfs2_sbd *sdp;         /* incore superblock         */
48         struct gfs2_glock *gl;        /* current glock struct      */
49         struct hlist_head *hb_list;   /* current hash bucket ptr   */
50         struct seq_file *seq;         /* sequence file for debugfs */
51         char string[512];             /* scratch space             */
52 };
53
54 typedef void (*glock_examiner) (struct gfs2_glock * gl);
55
56 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
57 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
58 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
59 static DECLARE_RWSEM(gfs2_umount_flush_sem);
60 static struct dentry *gfs2_root;
61
62 #define GFS2_GL_HASH_SHIFT      15
63 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
64 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
65
66 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
67
68 /*
69  * Despite what you might think, the numbers below are not arbitrary :-)
70  * They are taken from the ipv4 routing hash code, which is well tested
71  * and thus should be nearly optimal. Later on we might tweek the numbers
72  * but for now this should be fine.
73  *
74  * The reason for putting the locks in a separate array from the list heads
75  * is that we can have fewer locks than list heads and save memory. We use
76  * the same hash function for both, but with a different hash mask.
77  */
78 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
79         defined(CONFIG_PROVE_LOCKING)
80
81 #ifdef CONFIG_LOCKDEP
82 # define GL_HASH_LOCK_SZ        256
83 #else
84 # if NR_CPUS >= 32
85 #  define GL_HASH_LOCK_SZ       4096
86 # elif NR_CPUS >= 16
87 #  define GL_HASH_LOCK_SZ       2048
88 # elif NR_CPUS >= 8
89 #  define GL_HASH_LOCK_SZ       1024
90 # elif NR_CPUS >= 4
91 #  define GL_HASH_LOCK_SZ       512
92 # else
93 #  define GL_HASH_LOCK_SZ       256
94 # endif
95 #endif
96
97 /* We never want more locks than chains */
98 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
99 # undef GL_HASH_LOCK_SZ
100 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
101 #endif
102
103 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
104
105 static inline rwlock_t *gl_lock_addr(unsigned int x)
106 {
107         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
108 }
109 #else /* not SMP, so no spinlocks required */
110 static inline rwlock_t *gl_lock_addr(unsigned int x)
111 {
112         return NULL;
113 }
114 #endif
115
116 /**
117  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
118  * @actual: the current state of the lock
119  * @requested: the lock state that was requested by the caller
120  * @flags: the modifier flags passed in by the caller
121  *
122  * Returns: 1 if the locks are compatible, 0 otherwise
123  */
124
125 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
126                                    int flags)
127 {
128         if (actual == requested)
129                 return 1;
130
131         if (flags & GL_EXACT)
132                 return 0;
133
134         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
135                 return 1;
136
137         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
138                 return 1;
139
140         return 0;
141 }
142
143 /**
144  * gl_hash() - Turn glock number into hash bucket number
145  * @lock: The glock number
146  *
147  * Returns: The number of the corresponding hash bucket
148  */
149
150 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
151                             const struct lm_lockname *name)
152 {
153         unsigned int h;
154
155         h = jhash(&name->ln_number, sizeof(u64), 0);
156         h = jhash(&name->ln_type, sizeof(unsigned int), h);
157         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
158         h &= GFS2_GL_HASH_MASK;
159
160         return h;
161 }
162
163 /**
164  * glock_free() - Perform a few checks and then release struct gfs2_glock
165  * @gl: The glock to release
166  *
167  * Also calls lock module to release its internal structure for this glock.
168  *
169  */
170
171 static void glock_free(struct gfs2_glock *gl)
172 {
173         struct gfs2_sbd *sdp = gl->gl_sbd;
174         struct inode *aspace = gl->gl_aspace;
175
176         gfs2_lm_put_lock(sdp, gl->gl_lock);
177
178         if (aspace)
179                 gfs2_aspace_put(aspace);
180
181         kmem_cache_free(gfs2_glock_cachep, gl);
182 }
183
184 /**
185  * gfs2_glock_hold() - increment reference count on glock
186  * @gl: The glock to hold
187  *
188  */
189
190 void gfs2_glock_hold(struct gfs2_glock *gl)
191 {
192         atomic_inc(&gl->gl_ref);
193 }
194
195 /**
196  * gfs2_glock_put() - Decrement reference count on glock
197  * @gl: The glock to put
198  *
199  */
200
201 int gfs2_glock_put(struct gfs2_glock *gl)
202 {
203         int rv = 0;
204         struct gfs2_sbd *sdp = gl->gl_sbd;
205
206         write_lock(gl_lock_addr(gl->gl_hash));
207         if (atomic_dec_and_test(&gl->gl_ref)) {
208                 hlist_del(&gl->gl_list);
209                 write_unlock(gl_lock_addr(gl->gl_hash));
210                 BUG_ON(spin_is_locked(&gl->gl_spin));
211                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
212                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
213                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
214                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
215                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
216                 glock_free(gl);
217                 rv = 1;
218                 goto out;
219         }
220         write_unlock(gl_lock_addr(gl->gl_hash));
221 out:
222         return rv;
223 }
224
225 /**
226  * search_bucket() - Find struct gfs2_glock by lock number
227  * @bucket: the bucket to search
228  * @name: The lock name
229  *
230  * Returns: NULL, or the struct gfs2_glock with the requested number
231  */
232
233 static struct gfs2_glock *search_bucket(unsigned int hash,
234                                         const struct gfs2_sbd *sdp,
235                                         const struct lm_lockname *name)
236 {
237         struct gfs2_glock *gl;
238         struct hlist_node *h;
239
240         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
241                 if (!lm_name_equal(&gl->gl_name, name))
242                         continue;
243                 if (gl->gl_sbd != sdp)
244                         continue;
245
246                 atomic_inc(&gl->gl_ref);
247
248                 return gl;
249         }
250
251         return NULL;
252 }
253
254 /**
255  * gfs2_glock_find() - Find glock by lock number
256  * @sdp: The GFS2 superblock
257  * @name: The lock name
258  *
259  * Returns: NULL, or the struct gfs2_glock with the requested number
260  */
261
262 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
263                                           const struct lm_lockname *name)
264 {
265         unsigned int hash = gl_hash(sdp, name);
266         struct gfs2_glock *gl;
267
268         read_lock(gl_lock_addr(hash));
269         gl = search_bucket(hash, sdp, name);
270         read_unlock(gl_lock_addr(hash));
271
272         return gl;
273 }
274
275 /**
276  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
277  * @sdp: The GFS2 superblock
278  * @number: the lock number
279  * @glops: The glock_operations to use
280  * @create: If 0, don't create the glock if it doesn't exist
281  * @glp: the glock is returned here
282  *
283  * This does not lock a glock, just finds/creates structures for one.
284  *
285  * Returns: errno
286  */
287
288 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
289                    const struct gfs2_glock_operations *glops, int create,
290                    struct gfs2_glock **glp)
291 {
292         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
293         struct gfs2_glock *gl, *tmp;
294         unsigned int hash = gl_hash(sdp, &name);
295         int error;
296
297         read_lock(gl_lock_addr(hash));
298         gl = search_bucket(hash, sdp, &name);
299         read_unlock(gl_lock_addr(hash));
300
301         if (gl || !create) {
302                 *glp = gl;
303                 return 0;
304         }
305
306         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
307         if (!gl)
308                 return -ENOMEM;
309
310         gl->gl_flags = 0;
311         gl->gl_name = name;
312         atomic_set(&gl->gl_ref, 1);
313         gl->gl_state = LM_ST_UNLOCKED;
314         gl->gl_hash = hash;
315         gl->gl_owner = NULL;
316         gl->gl_ip = 0;
317         gl->gl_ops = glops;
318         gl->gl_req_gh = NULL;
319         gl->gl_req_bh = NULL;
320         gl->gl_vn = 0;
321         gl->gl_stamp = jiffies;
322         gl->gl_object = NULL;
323         gl->gl_sbd = sdp;
324         gl->gl_aspace = NULL;
325         lops_init_le(&gl->gl_le, &gfs2_glock_lops);
326
327         /* If this glock protects actual on-disk data or metadata blocks,
328            create a VFS inode to manage the pages/buffers holding them. */
329         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
330                 gl->gl_aspace = gfs2_aspace_get(sdp);
331                 if (!gl->gl_aspace) {
332                         error = -ENOMEM;
333                         goto fail;
334                 }
335         }
336
337         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
338         if (error)
339                 goto fail_aspace;
340
341         write_lock(gl_lock_addr(hash));
342         tmp = search_bucket(hash, sdp, &name);
343         if (tmp) {
344                 write_unlock(gl_lock_addr(hash));
345                 glock_free(gl);
346                 gl = tmp;
347         } else {
348                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
349                 write_unlock(gl_lock_addr(hash));
350         }
351
352         *glp = gl;
353
354         return 0;
355
356 fail_aspace:
357         if (gl->gl_aspace)
358                 gfs2_aspace_put(gl->gl_aspace);
359 fail:
360         kmem_cache_free(gfs2_glock_cachep, gl);
361         return error;
362 }
363
364 /**
365  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
366  * @gl: the glock
367  * @state: the state we're requesting
368  * @flags: the modifier flags
369  * @gh: the holder structure
370  *
371  */
372
373 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
374                       struct gfs2_holder *gh)
375 {
376         INIT_LIST_HEAD(&gh->gh_list);
377         gh->gh_gl = gl;
378         gh->gh_ip = (unsigned long)__builtin_return_address(0);
379         gh->gh_owner = current;
380         gh->gh_state = state;
381         gh->gh_flags = flags;
382         gh->gh_error = 0;
383         gh->gh_iflags = 0;
384         gfs2_glock_hold(gl);
385 }
386
387 /**
388  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
389  * @state: the state we're requesting
390  * @flags: the modifier flags
391  * @gh: the holder structure
392  *
393  * Don't mess with the glock.
394  *
395  */
396
397 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
398 {
399         gh->gh_state = state;
400         gh->gh_flags = flags;
401         gh->gh_iflags = 0;
402         gh->gh_ip = (unsigned long)__builtin_return_address(0);
403 }
404
405 /**
406  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
407  * @gh: the holder structure
408  *
409  */
410
411 void gfs2_holder_uninit(struct gfs2_holder *gh)
412 {
413         gfs2_glock_put(gh->gh_gl);
414         gh->gh_gl = NULL;
415         gh->gh_ip = 0;
416 }
417
418 static void gfs2_holder_wake(struct gfs2_holder *gh)
419 {
420         clear_bit(HIF_WAIT, &gh->gh_iflags);
421         smp_mb();
422         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
423 }
424
425 static int holder_wait(void *word)
426 {
427         schedule();
428         return 0;
429 }
430
431 static void wait_on_holder(struct gfs2_holder *gh)
432 {
433         might_sleep();
434         wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
435 }
436
437 /**
438  * rq_mutex - process a mutex request in the queue
439  * @gh: the glock holder
440  *
441  * Returns: 1 if the queue is blocked
442  */
443
444 static int rq_mutex(struct gfs2_holder *gh)
445 {
446         struct gfs2_glock *gl = gh->gh_gl;
447
448         list_del_init(&gh->gh_list);
449         /*  gh->gh_error never examined.  */
450         set_bit(GLF_LOCK, &gl->gl_flags);
451         clear_bit(HIF_WAIT, &gh->gh_iflags);
452         smp_mb();
453         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
454
455         return 1;
456 }
457
458 /**
459  * rq_promote - process a promote request in the queue
460  * @gh: the glock holder
461  *
462  * Acquire a new inter-node lock, or change a lock state to more restrictive.
463  *
464  * Returns: 1 if the queue is blocked
465  */
466
467 static int rq_promote(struct gfs2_holder *gh)
468 {
469         struct gfs2_glock *gl = gh->gh_gl;
470         struct gfs2_sbd *sdp = gl->gl_sbd;
471
472         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
473                 if (list_empty(&gl->gl_holders)) {
474                         gl->gl_req_gh = gh;
475                         set_bit(GLF_LOCK, &gl->gl_flags);
476                         spin_unlock(&gl->gl_spin);
477
478                         if (atomic_read(&sdp->sd_reclaim_count) >
479                             gfs2_tune_get(sdp, gt_reclaim_limit) &&
480                             !(gh->gh_flags & LM_FLAG_PRIORITY)) {
481                                 gfs2_reclaim_glock(sdp);
482                                 gfs2_reclaim_glock(sdp);
483                         }
484
485                         gfs2_glock_xmote_th(gh->gh_gl, gh);
486                         spin_lock(&gl->gl_spin);
487                 }
488                 return 1;
489         }
490
491         if (list_empty(&gl->gl_holders)) {
492                 set_bit(HIF_FIRST, &gh->gh_iflags);
493                 set_bit(GLF_LOCK, &gl->gl_flags);
494         } else {
495                 struct gfs2_holder *next_gh;
496                 if (gh->gh_state == LM_ST_EXCLUSIVE)
497                         return 1;
498                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
499                                      gh_list);
500                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
501                          return 1;
502         }
503
504         list_move_tail(&gh->gh_list, &gl->gl_holders);
505         gh->gh_error = 0;
506         set_bit(HIF_HOLDER, &gh->gh_iflags);
507
508         gfs2_holder_wake(gh);
509
510         return 0;
511 }
512
513 /**
514  * rq_demote - process a demote request in the queue
515  * @gh: the glock holder
516  *
517  * Returns: 1 if the queue is blocked
518  */
519
520 static int rq_demote(struct gfs2_glock *gl)
521 {
522         if (!list_empty(&gl->gl_holders))
523                 return 1;
524
525         if (gl->gl_state == gl->gl_demote_state ||
526             gl->gl_state == LM_ST_UNLOCKED) {
527                 clear_bit(GLF_DEMOTE, &gl->gl_flags);
528                 return 0;
529         }
530         set_bit(GLF_LOCK, &gl->gl_flags);
531         spin_unlock(&gl->gl_spin);
532         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
533             gl->gl_state != LM_ST_EXCLUSIVE)
534                 gfs2_glock_drop_th(gl);
535         else
536                 gfs2_glock_xmote_th(gl, NULL);
537         spin_lock(&gl->gl_spin);
538
539         return 0;
540 }
541
542 /**
543  * run_queue - process holder structures on a glock
544  * @gl: the glock
545  *
546  */
547 static void run_queue(struct gfs2_glock *gl)
548 {
549         struct gfs2_holder *gh;
550         int blocked = 1;
551
552         for (;;) {
553                 if (test_bit(GLF_LOCK, &gl->gl_flags))
554                         break;
555
556                 if (!list_empty(&gl->gl_waiters1)) {
557                         gh = list_entry(gl->gl_waiters1.next,
558                                         struct gfs2_holder, gh_list);
559
560                         if (test_bit(HIF_MUTEX, &gh->gh_iflags))
561                                 blocked = rq_mutex(gh);
562                         else
563                                 gfs2_assert_warn(gl->gl_sbd, 0);
564
565                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
566                         blocked = rq_demote(gl);
567                 } else if (!list_empty(&gl->gl_waiters3)) {
568                         gh = list_entry(gl->gl_waiters3.next,
569                                         struct gfs2_holder, gh_list);
570
571                         if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
572                                 blocked = rq_promote(gh);
573                         else
574                                 gfs2_assert_warn(gl->gl_sbd, 0);
575
576                 } else
577                         break;
578
579                 if (blocked)
580                         break;
581         }
582 }
583
584 /**
585  * gfs2_glmutex_lock - acquire a local lock on a glock
586  * @gl: the glock
587  *
588  * Gives caller exclusive access to manipulate a glock structure.
589  */
590
591 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
592 {
593         struct gfs2_holder gh;
594
595         gfs2_holder_init(gl, 0, 0, &gh);
596         set_bit(HIF_MUTEX, &gh.gh_iflags);
597         if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
598                 BUG();
599
600         spin_lock(&gl->gl_spin);
601         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
602                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
603         } else {
604                 gl->gl_owner = current;
605                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
606                 clear_bit(HIF_WAIT, &gh.gh_iflags);
607                 smp_mb();
608                 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
609         }
610         spin_unlock(&gl->gl_spin);
611
612         wait_on_holder(&gh);
613         gfs2_holder_uninit(&gh);
614 }
615
616 /**
617  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
618  * @gl: the glock
619  *
620  * Returns: 1 if the glock is acquired
621  */
622
623 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
624 {
625         int acquired = 1;
626
627         spin_lock(&gl->gl_spin);
628         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
629                 acquired = 0;
630         } else {
631                 gl->gl_owner = current;
632                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
633         }
634         spin_unlock(&gl->gl_spin);
635
636         return acquired;
637 }
638
639 /**
640  * gfs2_glmutex_unlock - release a local lock on a glock
641  * @gl: the glock
642  *
643  */
644
645 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
646 {
647         spin_lock(&gl->gl_spin);
648         clear_bit(GLF_LOCK, &gl->gl_flags);
649         gl->gl_owner = NULL;
650         gl->gl_ip = 0;
651         run_queue(gl);
652         BUG_ON(!spin_is_locked(&gl->gl_spin));
653         spin_unlock(&gl->gl_spin);
654 }
655
656 /**
657  * handle_callback - process a demote request
658  * @gl: the glock
659  * @state: the state the caller wants us to change to
660  *
661  * There are only two requests that we are going to see in actual
662  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
663  */
664
665 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
666 {
667         spin_lock(&gl->gl_spin);
668         if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
669                 gl->gl_demote_state = state;
670                 gl->gl_demote_time = jiffies;
671         } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
672                 gl->gl_demote_state = state;
673         }
674         spin_unlock(&gl->gl_spin);
675 }
676
677 /**
678  * state_change - record that the glock is now in a different state
679  * @gl: the glock
680  * @new_state the new state
681  *
682  */
683
684 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
685 {
686         int held1, held2;
687
688         held1 = (gl->gl_state != LM_ST_UNLOCKED);
689         held2 = (new_state != LM_ST_UNLOCKED);
690
691         if (held1 != held2) {
692                 if (held2)
693                         gfs2_glock_hold(gl);
694                 else
695                         gfs2_glock_put(gl);
696         }
697
698         gl->gl_state = new_state;
699 }
700
701 /**
702  * xmote_bh - Called after the lock module is done acquiring a lock
703  * @gl: The glock in question
704  * @ret: the int returned from the lock module
705  *
706  */
707
708 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
709 {
710         struct gfs2_sbd *sdp = gl->gl_sbd;
711         const struct gfs2_glock_operations *glops = gl->gl_ops;
712         struct gfs2_holder *gh = gl->gl_req_gh;
713         int prev_state = gl->gl_state;
714         int op_done = 1;
715
716         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
717         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
718         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
719
720         state_change(gl, ret & LM_OUT_ST_MASK);
721
722         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
723                 if (glops->go_inval)
724                         glops->go_inval(gl, DIO_METADATA);
725         } else if (gl->gl_state == LM_ST_DEFERRED) {
726                 /* We might not want to do this here.
727                    Look at moving to the inode glops. */
728                 if (glops->go_inval)
729                         glops->go_inval(gl, 0);
730         }
731
732         /*  Deal with each possible exit condition  */
733
734         if (!gh) {
735                 gl->gl_stamp = jiffies;
736                 if (ret & LM_OUT_CANCELED)
737                         op_done = 0;
738                 else
739                         clear_bit(GLF_DEMOTE, &gl->gl_flags);
740         } else {
741                 spin_lock(&gl->gl_spin);
742                 list_del_init(&gh->gh_list);
743                 gh->gh_error = -EIO;
744                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
745                         goto out;
746                 gh->gh_error = GLR_CANCELED;
747                 if (ret & LM_OUT_CANCELED) 
748                         goto out;
749                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
750                         list_add_tail(&gh->gh_list, &gl->gl_holders);
751                         gh->gh_error = 0;
752                         set_bit(HIF_HOLDER, &gh->gh_iflags);
753                         set_bit(HIF_FIRST, &gh->gh_iflags);
754                         op_done = 0;
755                         goto out;
756                 }
757                 gh->gh_error = GLR_TRYFAILED;
758                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
759                         goto out;
760                 gh->gh_error = -EINVAL;
761                 if (gfs2_assert_withdraw(sdp, 0) == -1)
762                         fs_err(sdp, "ret = 0x%.8X\n", ret);
763 out:
764                 spin_unlock(&gl->gl_spin);
765         }
766
767         if (glops->go_xmote_bh)
768                 glops->go_xmote_bh(gl);
769
770         if (op_done) {
771                 spin_lock(&gl->gl_spin);
772                 gl->gl_req_gh = NULL;
773                 gl->gl_req_bh = NULL;
774                 clear_bit(GLF_LOCK, &gl->gl_flags);
775                 run_queue(gl);
776                 spin_unlock(&gl->gl_spin);
777         }
778
779         gfs2_glock_put(gl);
780
781         if (gh)
782                 gfs2_holder_wake(gh);
783 }
784
785 /**
786  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
787  * @gl: The glock in question
788  * @state: the requested state
789  * @flags: modifier flags to the lock call
790  *
791  */
792
793 void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
794 {
795         struct gfs2_sbd *sdp = gl->gl_sbd;
796         int flags = gh ? gh->gh_flags : 0;
797         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
798         const struct gfs2_glock_operations *glops = gl->gl_ops;
799         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
800                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
801                                  LM_FLAG_PRIORITY);
802         unsigned int lck_ret;
803
804         if (glops->go_xmote_th)
805                 glops->go_xmote_th(gl);
806
807         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
808         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
809         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
810         gfs2_assert_warn(sdp, state != gl->gl_state);
811
812         gfs2_glock_hold(gl);
813         gl->gl_req_bh = xmote_bh;
814
815         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
816
817         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
818                 return;
819
820         if (lck_ret & LM_OUT_ASYNC)
821                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
822         else
823                 xmote_bh(gl, lck_ret);
824 }
825
826 /**
827  * drop_bh - Called after a lock module unlock completes
828  * @gl: the glock
829  * @ret: the return status
830  *
831  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
832  * Doesn't drop the reference on the glock the top half took out
833  *
834  */
835
836 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
837 {
838         struct gfs2_sbd *sdp = gl->gl_sbd;
839         const struct gfs2_glock_operations *glops = gl->gl_ops;
840         struct gfs2_holder *gh = gl->gl_req_gh;
841
842         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
843         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
844         gfs2_assert_warn(sdp, !ret);
845
846         state_change(gl, LM_ST_UNLOCKED);
847         clear_bit(GLF_DEMOTE, &gl->gl_flags);
848
849         if (glops->go_inval)
850                 glops->go_inval(gl, DIO_METADATA);
851
852         if (gh) {
853                 spin_lock(&gl->gl_spin);
854                 list_del_init(&gh->gh_list);
855                 gh->gh_error = 0;
856                 spin_unlock(&gl->gl_spin);
857         }
858
859         spin_lock(&gl->gl_spin);
860         gl->gl_req_gh = NULL;
861         gl->gl_req_bh = NULL;
862         clear_bit(GLF_LOCK, &gl->gl_flags);
863         run_queue(gl);
864         spin_unlock(&gl->gl_spin);
865
866         gfs2_glock_put(gl);
867
868         if (gh)
869                 gfs2_holder_wake(gh);
870 }
871
872 /**
873  * gfs2_glock_drop_th - call into the lock module to unlock a lock
874  * @gl: the glock
875  *
876  */
877
878 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
879 {
880         struct gfs2_sbd *sdp = gl->gl_sbd;
881         const struct gfs2_glock_operations *glops = gl->gl_ops;
882         unsigned int ret;
883
884         if (glops->go_drop_th)
885                 glops->go_drop_th(gl);
886
887         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
888         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
889         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
890
891         gfs2_glock_hold(gl);
892         gl->gl_req_bh = drop_bh;
893
894         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
895
896         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
897                 return;
898
899         if (!ret)
900                 drop_bh(gl, ret);
901         else
902                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
903 }
904
905 /**
906  * do_cancels - cancel requests for locks stuck waiting on an expire flag
907  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
908  *
909  * Don't cancel GL_NOCANCEL requests.
910  */
911
912 static void do_cancels(struct gfs2_holder *gh)
913 {
914         struct gfs2_glock *gl = gh->gh_gl;
915
916         spin_lock(&gl->gl_spin);
917
918         while (gl->gl_req_gh != gh &&
919                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
920                !list_empty(&gh->gh_list)) {
921                 if (gl->gl_req_bh && !(gl->gl_req_gh &&
922                                      (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
923                         spin_unlock(&gl->gl_spin);
924                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
925                         msleep(100);
926                         spin_lock(&gl->gl_spin);
927                 } else {
928                         spin_unlock(&gl->gl_spin);
929                         msleep(100);
930                         spin_lock(&gl->gl_spin);
931                 }
932         }
933
934         spin_unlock(&gl->gl_spin);
935 }
936
937 /**
938  * glock_wait_internal - wait on a glock acquisition
939  * @gh: the glock holder
940  *
941  * Returns: 0 on success
942  */
943
944 static int glock_wait_internal(struct gfs2_holder *gh)
945 {
946         struct gfs2_glock *gl = gh->gh_gl;
947         struct gfs2_sbd *sdp = gl->gl_sbd;
948         const struct gfs2_glock_operations *glops = gl->gl_ops;
949
950         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
951                 return -EIO;
952
953         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
954                 spin_lock(&gl->gl_spin);
955                 if (gl->gl_req_gh != gh &&
956                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
957                     !list_empty(&gh->gh_list)) {
958                         list_del_init(&gh->gh_list);
959                         gh->gh_error = GLR_TRYFAILED;
960                         run_queue(gl);
961                         spin_unlock(&gl->gl_spin);
962                         return gh->gh_error;
963                 }
964                 spin_unlock(&gl->gl_spin);
965         }
966
967         if (gh->gh_flags & LM_FLAG_PRIORITY)
968                 do_cancels(gh);
969
970         wait_on_holder(gh);
971         if (gh->gh_error)
972                 return gh->gh_error;
973
974         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
975         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
976                                                    gh->gh_flags));
977
978         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
979                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
980
981                 if (glops->go_lock) {
982                         gh->gh_error = glops->go_lock(gh);
983                         if (gh->gh_error) {
984                                 spin_lock(&gl->gl_spin);
985                                 list_del_init(&gh->gh_list);
986                                 spin_unlock(&gl->gl_spin);
987                         }
988                 }
989
990                 spin_lock(&gl->gl_spin);
991                 gl->gl_req_gh = NULL;
992                 gl->gl_req_bh = NULL;
993                 clear_bit(GLF_LOCK, &gl->gl_flags);
994                 run_queue(gl);
995                 spin_unlock(&gl->gl_spin);
996         }
997
998         return gh->gh_error;
999 }
1000
1001 static inline struct gfs2_holder *
1002 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1003 {
1004         struct gfs2_holder *gh;
1005
1006         list_for_each_entry(gh, head, gh_list) {
1007                 if (gh->gh_owner == owner)
1008                         return gh;
1009         }
1010
1011         return NULL;
1012 }
1013
1014 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1015 {
1016         va_list args;
1017
1018         va_start(args, fmt);
1019         if (gi) {
1020                 vsprintf(gi->string, fmt, args);
1021                 seq_printf(gi->seq, gi->string);
1022         }
1023         else
1024                 vprintk(fmt, args);
1025         va_end(args);
1026 }
1027
1028 /**
1029  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1030  * @gh: the holder structure to add
1031  *
1032  */
1033
1034 static void add_to_queue(struct gfs2_holder *gh)
1035 {
1036         struct gfs2_glock *gl = gh->gh_gl;
1037         struct gfs2_holder *existing;
1038
1039         BUG_ON(!gh->gh_owner);
1040         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1041                 BUG();
1042
1043         existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1044         if (existing) {
1045                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1046                 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
1047                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1048                                 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1049                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1050                 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
1051                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1052                                 gl->gl_name.ln_type, gl->gl_state);
1053                 BUG();
1054         }
1055
1056         existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1057         if (existing) {
1058                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1059                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1060                 BUG();
1061         }
1062
1063         if (gh->gh_flags & LM_FLAG_PRIORITY)
1064                 list_add(&gh->gh_list, &gl->gl_waiters3);
1065         else
1066                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1067 }
1068
1069 /**
1070  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1071  * @gh: the holder structure
1072  *
1073  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1074  *
1075  * Returns: 0, GLR_TRYFAILED, or errno on failure
1076  */
1077
1078 int gfs2_glock_nq(struct gfs2_holder *gh)
1079 {
1080         struct gfs2_glock *gl = gh->gh_gl;
1081         struct gfs2_sbd *sdp = gl->gl_sbd;
1082         int error = 0;
1083
1084 restart:
1085         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1086                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1087                 return -EIO;
1088         }
1089
1090         set_bit(HIF_PROMOTE, &gh->gh_iflags);
1091
1092         spin_lock(&gl->gl_spin);
1093         add_to_queue(gh);
1094         run_queue(gl);
1095         spin_unlock(&gl->gl_spin);
1096
1097         if (!(gh->gh_flags & GL_ASYNC)) {
1098                 error = glock_wait_internal(gh);
1099                 if (error == GLR_CANCELED) {
1100                         msleep(100);
1101                         goto restart;
1102                 }
1103         }
1104
1105         return error;
1106 }
1107
1108 /**
1109  * gfs2_glock_poll - poll to see if an async request has been completed
1110  * @gh: the holder
1111  *
1112  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1113  */
1114
1115 int gfs2_glock_poll(struct gfs2_holder *gh)
1116 {
1117         struct gfs2_glock *gl = gh->gh_gl;
1118         int ready = 0;
1119
1120         spin_lock(&gl->gl_spin);
1121
1122         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1123                 ready = 1;
1124         else if (list_empty(&gh->gh_list)) {
1125                 if (gh->gh_error == GLR_CANCELED) {
1126                         spin_unlock(&gl->gl_spin);
1127                         msleep(100);
1128                         if (gfs2_glock_nq(gh))
1129                                 return 1;
1130                         return 0;
1131                 } else
1132                         ready = 1;
1133         }
1134
1135         spin_unlock(&gl->gl_spin);
1136
1137         return ready;
1138 }
1139
1140 /**
1141  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1142  * @gh: the holder structure
1143  *
1144  * Returns: 0, GLR_TRYFAILED, or errno on failure
1145  */
1146
1147 int gfs2_glock_wait(struct gfs2_holder *gh)
1148 {
1149         int error;
1150
1151         error = glock_wait_internal(gh);
1152         if (error == GLR_CANCELED) {
1153                 msleep(100);
1154                 gh->gh_flags &= ~GL_ASYNC;
1155                 error = gfs2_glock_nq(gh);
1156         }
1157
1158         return error;
1159 }
1160
1161 /**
1162  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1163  * @gh: the glock holder
1164  *
1165  */
1166
1167 void gfs2_glock_dq(struct gfs2_holder *gh)
1168 {
1169         struct gfs2_glock *gl = gh->gh_gl;
1170         const struct gfs2_glock_operations *glops = gl->gl_ops;
1171
1172         if (gh->gh_flags & GL_NOCACHE)
1173                 handle_callback(gl, LM_ST_UNLOCKED);
1174
1175         gfs2_glmutex_lock(gl);
1176
1177         spin_lock(&gl->gl_spin);
1178         list_del_init(&gh->gh_list);
1179
1180         if (list_empty(&gl->gl_holders)) {
1181                 spin_unlock(&gl->gl_spin);
1182
1183                 if (glops->go_unlock)
1184                         glops->go_unlock(gh);
1185
1186                 spin_lock(&gl->gl_spin);
1187                 gl->gl_stamp = jiffies;
1188         }
1189
1190         clear_bit(GLF_LOCK, &gl->gl_flags);
1191         run_queue(gl);
1192         spin_unlock(&gl->gl_spin);
1193 }
1194
1195 /**
1196  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1197  * @gh: the holder structure
1198  *
1199  */
1200
1201 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1202 {
1203         gfs2_glock_dq(gh);
1204         gfs2_holder_uninit(gh);
1205 }
1206
1207 /**
1208  * gfs2_glock_nq_num - acquire a glock based on lock number
1209  * @sdp: the filesystem
1210  * @number: the lock number
1211  * @glops: the glock operations for the type of glock
1212  * @state: the state to acquire the glock in
1213  * @flags: modifier flags for the aquisition
1214  * @gh: the struct gfs2_holder
1215  *
1216  * Returns: errno
1217  */
1218
1219 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1220                       const struct gfs2_glock_operations *glops,
1221                       unsigned int state, int flags, struct gfs2_holder *gh)
1222 {
1223         struct gfs2_glock *gl;
1224         int error;
1225
1226         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1227         if (!error) {
1228                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1229                 gfs2_glock_put(gl);
1230         }
1231
1232         return error;
1233 }
1234
1235 /**
1236  * glock_compare - Compare two struct gfs2_glock structures for sorting
1237  * @arg_a: the first structure
1238  * @arg_b: the second structure
1239  *
1240  */
1241
1242 static int glock_compare(const void *arg_a, const void *arg_b)
1243 {
1244         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1245         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1246         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1247         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1248
1249         if (a->ln_number > b->ln_number)
1250                 return 1;
1251         if (a->ln_number < b->ln_number)
1252                 return -1;
1253         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1254         return 0;
1255 }
1256
1257 /**
1258  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1259  * @num_gh: the number of structures
1260  * @ghs: an array of struct gfs2_holder structures
1261  *
1262  * Returns: 0 on success (all glocks acquired),
1263  *          errno on failure (no glocks acquired)
1264  */
1265
1266 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1267                      struct gfs2_holder **p)
1268 {
1269         unsigned int x;
1270         int error = 0;
1271
1272         for (x = 0; x < num_gh; x++)
1273                 p[x] = &ghs[x];
1274
1275         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1276
1277         for (x = 0; x < num_gh; x++) {
1278                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1279
1280                 error = gfs2_glock_nq(p[x]);
1281                 if (error) {
1282                         while (x--)
1283                                 gfs2_glock_dq(p[x]);
1284                         break;
1285                 }
1286         }
1287
1288         return error;
1289 }
1290
1291 /**
1292  * gfs2_glock_nq_m - acquire multiple glocks
1293  * @num_gh: the number of structures
1294  * @ghs: an array of struct gfs2_holder structures
1295  *
1296  * Figure out how big an impact this function has.  Either:
1297  * 1) Replace this code with code that calls gfs2_glock_prefetch()
1298  * 2) Forget async stuff and just call nq_m_sync()
1299  * 3) Leave it like it is
1300  *
1301  * Returns: 0 on success (all glocks acquired),
1302  *          errno on failure (no glocks acquired)
1303  */
1304
1305 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1306 {
1307         int *e;
1308         unsigned int x;
1309         int borked = 0, serious = 0;
1310         int error = 0;
1311
1312         if (!num_gh)
1313                 return 0;
1314
1315         if (num_gh == 1) {
1316                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1317                 return gfs2_glock_nq(ghs);
1318         }
1319
1320         e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1321         if (!e)
1322                 return -ENOMEM;
1323
1324         for (x = 0; x < num_gh; x++) {
1325                 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1326                 error = gfs2_glock_nq(&ghs[x]);
1327                 if (error) {
1328                         borked = 1;
1329                         serious = error;
1330                         num_gh = x;
1331                         break;
1332                 }
1333         }
1334
1335         for (x = 0; x < num_gh; x++) {
1336                 error = e[x] = glock_wait_internal(&ghs[x]);
1337                 if (error) {
1338                         borked = 1;
1339                         if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1340                                 serious = error;
1341                 }
1342         }
1343
1344         if (!borked) {
1345                 kfree(e);
1346                 return 0;
1347         }
1348
1349         for (x = 0; x < num_gh; x++)
1350                 if (!e[x])
1351                         gfs2_glock_dq(&ghs[x]);
1352
1353         if (serious)
1354                 error = serious;
1355         else {
1356                 for (x = 0; x < num_gh; x++)
1357                         gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1358                                           &ghs[x]);
1359                 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1360         }
1361
1362         kfree(e);
1363
1364         return error;
1365 }
1366
1367 /**
1368  * gfs2_glock_dq_m - release multiple glocks
1369  * @num_gh: the number of structures
1370  * @ghs: an array of struct gfs2_holder structures
1371  *
1372  */
1373
1374 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1375 {
1376         unsigned int x;
1377
1378         for (x = 0; x < num_gh; x++)
1379                 gfs2_glock_dq(&ghs[x]);
1380 }
1381
1382 /**
1383  * gfs2_glock_dq_uninit_m - release multiple glocks
1384  * @num_gh: the number of structures
1385  * @ghs: an array of struct gfs2_holder structures
1386  *
1387  */
1388
1389 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1390 {
1391         unsigned int x;
1392
1393         for (x = 0; x < num_gh; x++)
1394                 gfs2_glock_dq_uninit(&ghs[x]);
1395 }
1396
1397 /**
1398  * gfs2_lvb_hold - attach a LVB from a glock
1399  * @gl: The glock in question
1400  *
1401  */
1402
1403 int gfs2_lvb_hold(struct gfs2_glock *gl)
1404 {
1405         int error;
1406
1407         gfs2_glmutex_lock(gl);
1408
1409         if (!atomic_read(&gl->gl_lvb_count)) {
1410                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1411                 if (error) {
1412                         gfs2_glmutex_unlock(gl);
1413                         return error;
1414                 }
1415                 gfs2_glock_hold(gl);
1416         }
1417         atomic_inc(&gl->gl_lvb_count);
1418
1419         gfs2_glmutex_unlock(gl);
1420
1421         return 0;
1422 }
1423
1424 /**
1425  * gfs2_lvb_unhold - detach a LVB from a glock
1426  * @gl: The glock in question
1427  *
1428  */
1429
1430 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1431 {
1432         gfs2_glock_hold(gl);
1433         gfs2_glmutex_lock(gl);
1434
1435         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1436         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1437                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1438                 gl->gl_lvb = NULL;
1439                 gfs2_glock_put(gl);
1440         }
1441
1442         gfs2_glmutex_unlock(gl);
1443         gfs2_glock_put(gl);
1444 }
1445
1446 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1447                         unsigned int state)
1448 {
1449         struct gfs2_glock *gl;
1450
1451         gl = gfs2_glock_find(sdp, name);
1452         if (!gl)
1453                 return;
1454
1455         handle_callback(gl, state);
1456
1457         spin_lock(&gl->gl_spin);
1458         run_queue(gl);
1459         spin_unlock(&gl->gl_spin);
1460
1461         gfs2_glock_put(gl);
1462 }
1463
1464 /**
1465  * gfs2_glock_cb - Callback used by locking module
1466  * @sdp: Pointer to the superblock
1467  * @type: Type of callback
1468  * @data: Type dependent data pointer
1469  *
1470  * Called by the locking module when it wants to tell us something.
1471  * Either we need to drop a lock, one of our ASYNC requests completed, or
1472  * a journal from another client needs to be recovered.
1473  */
1474
1475 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1476 {
1477         struct gfs2_sbd *sdp = cb_data;
1478
1479         switch (type) {
1480         case LM_CB_NEED_E:
1481                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1482                 return;
1483
1484         case LM_CB_NEED_D:
1485                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1486                 return;
1487
1488         case LM_CB_NEED_S:
1489                 blocking_cb(sdp, data, LM_ST_SHARED);
1490                 return;
1491
1492         case LM_CB_ASYNC: {
1493                 struct lm_async_cb *async = data;
1494                 struct gfs2_glock *gl;
1495
1496                 down_read(&gfs2_umount_flush_sem);
1497                 gl = gfs2_glock_find(sdp, &async->lc_name);
1498                 if (gfs2_assert_warn(sdp, gl))
1499                         return;
1500                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1501                         gl->gl_req_bh(gl, async->lc_ret);
1502                 gfs2_glock_put(gl);
1503                 up_read(&gfs2_umount_flush_sem);
1504                 return;
1505         }
1506
1507         case LM_CB_NEED_RECOVERY:
1508                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1509                 if (sdp->sd_recoverd_process)
1510                         wake_up_process(sdp->sd_recoverd_process);
1511                 return;
1512
1513         case LM_CB_DROPLOCKS:
1514                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1515                 gfs2_quota_scan(sdp);
1516                 return;
1517
1518         default:
1519                 gfs2_assert_warn(sdp, 0);
1520                 return;
1521         }
1522 }
1523
1524 /**
1525  * demote_ok - Check to see if it's ok to unlock a glock
1526  * @gl: the glock
1527  *
1528  * Returns: 1 if it's ok
1529  */
1530
1531 static int demote_ok(struct gfs2_glock *gl)
1532 {
1533         const struct gfs2_glock_operations *glops = gl->gl_ops;
1534         int demote = 1;
1535
1536         if (test_bit(GLF_STICKY, &gl->gl_flags))
1537                 demote = 0;
1538         else if (glops->go_demote_ok)
1539                 demote = glops->go_demote_ok(gl);
1540
1541         return demote;
1542 }
1543
1544 /**
1545  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1546  * @gl: the glock
1547  *
1548  */
1549
1550 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1551 {
1552         struct gfs2_sbd *sdp = gl->gl_sbd;
1553
1554         spin_lock(&sdp->sd_reclaim_lock);
1555         if (list_empty(&gl->gl_reclaim)) {
1556                 gfs2_glock_hold(gl);
1557                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1558                 atomic_inc(&sdp->sd_reclaim_count);
1559         }
1560         spin_unlock(&sdp->sd_reclaim_lock);
1561
1562         wake_up(&sdp->sd_reclaim_wq);
1563 }
1564
1565 /**
1566  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1567  * @sdp: the filesystem
1568  *
1569  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1570  * different glock and we notice that there are a lot of glocks in the
1571  * reclaim list.
1572  *
1573  */
1574
1575 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1576 {
1577         struct gfs2_glock *gl;
1578
1579         spin_lock(&sdp->sd_reclaim_lock);
1580         if (list_empty(&sdp->sd_reclaim_list)) {
1581                 spin_unlock(&sdp->sd_reclaim_lock);
1582                 return;
1583         }
1584         gl = list_entry(sdp->sd_reclaim_list.next,
1585                         struct gfs2_glock, gl_reclaim);
1586         list_del_init(&gl->gl_reclaim);
1587         spin_unlock(&sdp->sd_reclaim_lock);
1588
1589         atomic_dec(&sdp->sd_reclaim_count);
1590         atomic_inc(&sdp->sd_reclaimed);
1591
1592         if (gfs2_glmutex_trylock(gl)) {
1593                 if (list_empty(&gl->gl_holders) &&
1594                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1595                         handle_callback(gl, LM_ST_UNLOCKED);
1596                 gfs2_glmutex_unlock(gl);
1597         }
1598
1599         gfs2_glock_put(gl);
1600 }
1601
1602 /**
1603  * examine_bucket - Call a function for glock in a hash bucket
1604  * @examiner: the function
1605  * @sdp: the filesystem
1606  * @bucket: the bucket
1607  *
1608  * Returns: 1 if the bucket has entries
1609  */
1610
1611 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1612                           unsigned int hash)
1613 {
1614         struct gfs2_glock *gl, *prev = NULL;
1615         int has_entries = 0;
1616         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1617
1618         read_lock(gl_lock_addr(hash));
1619         /* Can't use hlist_for_each_entry - don't want prefetch here */
1620         if (hlist_empty(head))
1621                 goto out;
1622         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1623         while(1) {
1624                 if (gl->gl_sbd == sdp) {
1625                         gfs2_glock_hold(gl);
1626                         read_unlock(gl_lock_addr(hash));
1627                         if (prev)
1628                                 gfs2_glock_put(prev);
1629                         prev = gl;
1630                         examiner(gl);
1631                         has_entries = 1;
1632                         read_lock(gl_lock_addr(hash));
1633                 }
1634                 if (gl->gl_list.next == NULL)
1635                         break;
1636                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1637         }
1638 out:
1639         read_unlock(gl_lock_addr(hash));
1640         if (prev)
1641                 gfs2_glock_put(prev);
1642         return has_entries;
1643 }
1644
1645 /**
1646  * scan_glock - look at a glock and see if we can reclaim it
1647  * @gl: the glock to look at
1648  *
1649  */
1650
1651 static void scan_glock(struct gfs2_glock *gl)
1652 {
1653         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1654                 return;
1655
1656         if (gfs2_glmutex_trylock(gl)) {
1657                 if (list_empty(&gl->gl_holders) &&
1658                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1659                         goto out_schedule;
1660                 gfs2_glmutex_unlock(gl);
1661         }
1662         return;
1663
1664 out_schedule:
1665         gfs2_glmutex_unlock(gl);
1666         gfs2_glock_schedule_for_reclaim(gl);
1667 }
1668
1669 /**
1670  * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1671  * @sdp: the filesystem
1672  *
1673  */
1674
1675 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1676 {
1677         unsigned int x;
1678
1679         for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1680                 examine_bucket(scan_glock, sdp, x);
1681 }
1682
1683 /**
1684  * clear_glock - look at a glock and see if we can free it from glock cache
1685  * @gl: the glock to look at
1686  *
1687  */
1688
1689 static void clear_glock(struct gfs2_glock *gl)
1690 {
1691         struct gfs2_sbd *sdp = gl->gl_sbd;
1692         int released;
1693
1694         spin_lock(&sdp->sd_reclaim_lock);
1695         if (!list_empty(&gl->gl_reclaim)) {
1696                 list_del_init(&gl->gl_reclaim);
1697                 atomic_dec(&sdp->sd_reclaim_count);
1698                 spin_unlock(&sdp->sd_reclaim_lock);
1699                 released = gfs2_glock_put(gl);
1700                 gfs2_assert(sdp, !released);
1701         } else {
1702                 spin_unlock(&sdp->sd_reclaim_lock);
1703         }
1704
1705         if (gfs2_glmutex_trylock(gl)) {
1706                 if (list_empty(&gl->gl_holders) &&
1707                     gl->gl_state != LM_ST_UNLOCKED)
1708                         handle_callback(gl, LM_ST_UNLOCKED);
1709                 gfs2_glmutex_unlock(gl);
1710         }
1711 }
1712
1713 /**
1714  * gfs2_gl_hash_clear - Empty out the glock hash table
1715  * @sdp: the filesystem
1716  * @wait: wait until it's all gone
1717  *
1718  * Called when unmounting the filesystem, or when inter-node lock manager
1719  * requests DROPLOCKS because it is running out of capacity.
1720  */
1721
1722 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1723 {
1724         unsigned long t;
1725         unsigned int x;
1726         int cont;
1727
1728         t = jiffies;
1729
1730         for (;;) {
1731                 cont = 0;
1732                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1733                         if (examine_bucket(clear_glock, sdp, x))
1734                                 cont = 1;
1735                 }
1736
1737                 if (!wait || !cont)
1738                         break;
1739
1740                 if (time_after_eq(jiffies,
1741                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1742                         fs_warn(sdp, "Unmount seems to be stalled. "
1743                                      "Dumping lock state...\n");
1744                         gfs2_dump_lockstate(sdp);
1745                         t = jiffies;
1746                 }
1747
1748                 down_write(&gfs2_umount_flush_sem);
1749                 invalidate_inodes(sdp->sd_vfs);
1750                 up_write(&gfs2_umount_flush_sem);
1751                 msleep(10);
1752         }
1753 }
1754
1755 /*
1756  *  Diagnostic routines to help debug distributed deadlock
1757  */
1758
1759 /**
1760  * dump_holder - print information about a glock holder
1761  * @str: a string naming the type of holder
1762  * @gh: the glock holder
1763  *
1764  * Returns: 0 on success, -ENOBUFS when we run out of space
1765  */
1766
1767 static int dump_holder(struct glock_iter *gi, char *str,
1768                        struct gfs2_holder *gh)
1769 {
1770         unsigned int x;
1771
1772         print_dbg(gi, "  %s\n", str);
1773         print_dbg(gi, "    owner = %ld\n",
1774                    (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
1775         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1776         print_dbg(gi, "    gh_flags =");
1777         for (x = 0; x < 32; x++)
1778                 if (gh->gh_flags & (1 << x))
1779                         print_dbg(gi, " %u", x);
1780         print_dbg(gi, " \n");
1781         print_dbg(gi, "    error = %d\n", gh->gh_error);
1782         print_dbg(gi, "    gh_iflags =");
1783         for (x = 0; x < 32; x++)
1784                 if (test_bit(x, &gh->gh_iflags))
1785                         print_dbg(gi, " %u", x);
1786         print_dbg(gi, " \n");
1787         if (gi)
1788                 print_dbg(gi, "    initialized at: 0x%x\n", gh->gh_ip);
1789         else
1790                 print_symbol(KERN_INFO "    initialized at: %s\n", gh->gh_ip);
1791
1792         return 0;
1793 }
1794
1795 /**
1796  * dump_inode - print information about an inode
1797  * @ip: the inode
1798  *
1799  * Returns: 0 on success, -ENOBUFS when we run out of space
1800  */
1801
1802 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1803 {
1804         unsigned int x;
1805
1806         print_dbg(gi, "  Inode:\n");
1807         print_dbg(gi, "    num = %llu/%llu\n",
1808                     ip->i_num.no_formal_ino, ip->i_num.no_addr);
1809         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1810         print_dbg(gi, "    i_flags =");
1811         for (x = 0; x < 32; x++)
1812                 if (test_bit(x, &ip->i_flags))
1813                         print_dbg(gi, " %u", x);
1814         print_dbg(gi, " \n");
1815         return 0;
1816 }
1817
1818 /**
1819  * dump_glock - print information about a glock
1820  * @gl: the glock
1821  * @count: where we are in the buffer
1822  *
1823  * Returns: 0 on success, -ENOBUFS when we run out of space
1824  */
1825
1826 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1827 {
1828         struct gfs2_holder *gh;
1829         unsigned int x;
1830         int error = -ENOBUFS;
1831
1832         spin_lock(&gl->gl_spin);
1833
1834         print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
1835                    (unsigned long long)gl->gl_name.ln_number);
1836         print_dbg(gi, "  gl_flags =");
1837         for (x = 0; x < 32; x++) {
1838                 if (test_bit(x, &gl->gl_flags))
1839                         print_dbg(gi, " %u", x);
1840         }
1841         print_dbg(gi, " \n");
1842         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1843         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1844         print_dbg(gi, "  gl_owner = %s\n", gl->gl_owner->comm);
1845         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1846         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1847         print_dbg(gi, "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1848         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1849         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1850         print_dbg(gi, "  le = %s\n",
1851                    (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1852         print_dbg(gi, "  reclaim = %s\n",
1853                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1854         if (gl->gl_aspace)
1855                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1856                            gl->gl_aspace->i_mapping->nrpages);
1857         else
1858                 print_dbg(gi, "  aspace = no\n");
1859         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1860         if (gl->gl_req_gh) {
1861                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1862                 if (error)
1863                         goto out;
1864         }
1865         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1866                 error = dump_holder(gi, "Holder", gh);
1867                 if (error)
1868                         goto out;
1869         }
1870         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1871                 error = dump_holder(gi, "Waiter1", gh);
1872                 if (error)
1873                         goto out;
1874         }
1875         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1876                 error = dump_holder(gi, "Waiter3", gh);
1877                 if (error)
1878                         goto out;
1879         }
1880         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1881                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1882                           gl->gl_demote_state,
1883                           (u64)(jiffies - gl->gl_demote_time)*1000000/HZ);
1884         }
1885         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1886                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1887                         list_empty(&gl->gl_holders)) {
1888                         error = dump_inode(gi, gl->gl_object);
1889                         if (error)
1890                                 goto out;
1891                 } else {
1892                         error = -ENOBUFS;
1893                         print_dbg(gi, "  Inode: busy\n");
1894                 }
1895         }
1896
1897         error = 0;
1898
1899 out:
1900         spin_unlock(&gl->gl_spin);
1901         return error;
1902 }
1903
1904 /**
1905  * gfs2_dump_lockstate - print out the current lockstate
1906  * @sdp: the filesystem
1907  * @ub: the buffer to copy the information into
1908  *
1909  * If @ub is NULL, dump the lockstate to the console.
1910  *
1911  */
1912
1913 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1914 {
1915         struct gfs2_glock *gl;
1916         struct hlist_node *h;
1917         unsigned int x;
1918         int error = 0;
1919
1920         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1921
1922                 read_lock(gl_lock_addr(x));
1923
1924                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1925                         if (gl->gl_sbd != sdp)
1926                                 continue;
1927
1928                         error = dump_glock(NULL, gl);
1929                         if (error)
1930                                 break;
1931                 }
1932
1933                 read_unlock(gl_lock_addr(x));
1934
1935                 if (error)
1936                         break;
1937         }
1938
1939
1940         return error;
1941 }
1942
1943 int __init gfs2_glock_init(void)
1944 {
1945         unsigned i;
1946         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1947                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1948         }
1949 #ifdef GL_HASH_LOCK_SZ
1950         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1951                 rwlock_init(&gl_hash_locks[i]);
1952         }
1953 #endif
1954         return 0;
1955 }
1956
1957 static int gfs2_glock_iter_next(struct glock_iter *gi)
1958 {
1959         while (1) {
1960                 if (!gi->hb_list) {  /* If we don't have a hash bucket yet */
1961                         gi->hb_list = &gl_hash_table[gi->hash].hb_list;
1962                         if (hlist_empty(gi->hb_list)) {
1963                                 gi->hash++;
1964                                 gi->hb_list = NULL;
1965                                 if (gi->hash >= GFS2_GL_HASH_SIZE)
1966                                         return 1;
1967                                 else
1968                                         continue;
1969                         }
1970                         if (!hlist_empty(gi->hb_list)) {
1971                                 gi->gl = list_entry(gi->hb_list->first,
1972                                                     struct gfs2_glock,
1973                                                     gl_list);
1974                         }
1975                 } else {
1976                         if (gi->gl->gl_list.next == NULL) {
1977                                 gi->hash++;
1978                                 gi->hb_list = NULL;
1979                                 continue;
1980                         }
1981                         gi->gl = list_entry(gi->gl->gl_list.next,
1982                                             struct gfs2_glock, gl_list);
1983                 }
1984                 if (gi->gl)
1985                         break;
1986         }
1987         return 0;
1988 }
1989
1990 static void gfs2_glock_iter_free(struct glock_iter *gi)
1991 {
1992         kfree(gi);
1993 }
1994
1995 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
1996 {
1997         struct glock_iter *gi;
1998
1999         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2000         if (!gi)
2001                 return NULL;
2002
2003         gi->sdp = sdp;
2004         gi->hash = 0;
2005         gi->gl = NULL;
2006         gi->hb_list = NULL;
2007         gi->seq = NULL;
2008         memset(gi->string, 0, sizeof(gi->string));
2009
2010         if (gfs2_glock_iter_next(gi)) {
2011                 gfs2_glock_iter_free(gi);
2012                 return NULL;
2013         }
2014
2015         return gi;
2016 }
2017
2018 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2019 {
2020         struct glock_iter *gi;
2021         loff_t n = *pos;
2022
2023         gi = gfs2_glock_iter_init(file->private);
2024         if (!gi)
2025                 return NULL;
2026
2027         while (n--) {
2028                 if (gfs2_glock_iter_next(gi)) {
2029                         gfs2_glock_iter_free(gi);
2030                         return NULL;
2031                 }
2032         }
2033
2034         return gi;
2035 }
2036
2037 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2038                                  loff_t *pos)
2039 {
2040         struct glock_iter *gi = iter_ptr;
2041
2042         (*pos)++;
2043
2044         if (gfs2_glock_iter_next(gi)) {
2045                 gfs2_glock_iter_free(gi);
2046                 return NULL;
2047         }
2048
2049         return gi;
2050 }
2051
2052 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2053 {
2054         /* nothing for now */
2055 }
2056
2057 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2058 {
2059         struct glock_iter *gi = iter_ptr;
2060
2061         gi->seq = file;
2062         dump_glock(gi, gi->gl);
2063
2064         return 0;
2065 }
2066
2067 static struct seq_operations gfs2_glock_seq_ops = {
2068         .start = gfs2_glock_seq_start,
2069         .next  = gfs2_glock_seq_next,
2070         .stop  = gfs2_glock_seq_stop,
2071         .show  = gfs2_glock_seq_show,
2072 };
2073
2074 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2075 {
2076         struct seq_file *seq;
2077         int ret;
2078
2079         ret = seq_open(file, &gfs2_glock_seq_ops);
2080         if (ret)
2081                 return ret;
2082
2083         seq = file->private_data;
2084         seq->private = inode->i_private;
2085
2086         return 0;
2087 }
2088
2089 static const struct file_operations gfs2_debug_fops = {
2090         .owner   = THIS_MODULE,
2091         .open    = gfs2_debugfs_open,
2092         .read    = seq_read,
2093         .llseek  = seq_lseek,
2094         .release = seq_release
2095 };
2096
2097 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2098 {
2099         sdp->debugfs_dentry = debugfs_create_file(sdp->sd_table_name,
2100                                                   S_IFREG | S_IRUGO,
2101                                                   gfs2_root, sdp,
2102                                                   &gfs2_debug_fops);
2103         if (!sdp->debugfs_dentry)
2104                 return -ENOMEM;
2105
2106         return 0;
2107 }
2108
2109 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2110 {
2111         if (sdp && sdp->debugfs_dentry)
2112                 debugfs_remove(sdp->debugfs_dentry);
2113 }
2114
2115 int gfs2_register_debugfs(void)
2116 {
2117         gfs2_root = debugfs_create_dir("gfs2", NULL);
2118         return gfs2_root ? 0 : -ENOMEM;
2119 }
2120
2121 void gfs2_unregister_debugfs(void)
2122 {
2123         debugfs_remove(gfs2_root);
2124 }