GFS2: Fix "truncate in progress" hang
[safe/jmp/linux-2.6] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43 #include "bmap.h"
44
45 struct gfs2_gl_hash_bucket {
46         struct hlist_head hb_list;
47 };
48
49 struct gfs2_glock_iter {
50         int hash;                       /* hash bucket index         */
51         struct gfs2_sbd *sdp;           /* incore superblock         */
52         struct gfs2_glock *gl;          /* current glock struct      */
53         char string[512];               /* scratch space             */
54 };
55
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
57
58 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
60 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
61 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62
63 static DECLARE_RWSEM(gfs2_umount_flush_sem);
64 static struct dentry *gfs2_root;
65 static struct task_struct *scand_process;
66 static unsigned int scand_secs = 5;
67 static struct workqueue_struct *glock_workqueue;
68
69 #define GFS2_GL_HASH_SHIFT      15
70 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
71 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
72
73 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
74 static struct dentry *gfs2_root;
75
76 /*
77  * Despite what you might think, the numbers below are not arbitrary :-)
78  * They are taken from the ipv4 routing hash code, which is well tested
79  * and thus should be nearly optimal. Later on we might tweek the numbers
80  * but for now this should be fine.
81  *
82  * The reason for putting the locks in a separate array from the list heads
83  * is that we can have fewer locks than list heads and save memory. We use
84  * the same hash function for both, but with a different hash mask.
85  */
86 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
87         defined(CONFIG_PROVE_LOCKING)
88
89 #ifdef CONFIG_LOCKDEP
90 # define GL_HASH_LOCK_SZ        256
91 #else
92 # if NR_CPUS >= 32
93 #  define GL_HASH_LOCK_SZ       4096
94 # elif NR_CPUS >= 16
95 #  define GL_HASH_LOCK_SZ       2048
96 # elif NR_CPUS >= 8
97 #  define GL_HASH_LOCK_SZ       1024
98 # elif NR_CPUS >= 4
99 #  define GL_HASH_LOCK_SZ       512
100 # else
101 #  define GL_HASH_LOCK_SZ       256
102 # endif
103 #endif
104
105 /* We never want more locks than chains */
106 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
107 # undef GL_HASH_LOCK_SZ
108 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
109 #endif
110
111 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
112
113 static inline rwlock_t *gl_lock_addr(unsigned int x)
114 {
115         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
116 }
117 #else /* not SMP, so no spinlocks required */
118 static inline rwlock_t *gl_lock_addr(unsigned int x)
119 {
120         return NULL;
121 }
122 #endif
123
124 /**
125  * gl_hash() - Turn glock number into hash bucket number
126  * @lock: The glock number
127  *
128  * Returns: The number of the corresponding hash bucket
129  */
130
131 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
132                             const struct lm_lockname *name)
133 {
134         unsigned int h;
135
136         h = jhash(&name->ln_number, sizeof(u64), 0);
137         h = jhash(&name->ln_type, sizeof(unsigned int), h);
138         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
139         h &= GFS2_GL_HASH_MASK;
140
141         return h;
142 }
143
144 /**
145  * glock_free() - Perform a few checks and then release struct gfs2_glock
146  * @gl: The glock to release
147  *
148  * Also calls lock module to release its internal structure for this glock.
149  *
150  */
151
152 static void glock_free(struct gfs2_glock *gl)
153 {
154         struct gfs2_sbd *sdp = gl->gl_sbd;
155         struct inode *aspace = gl->gl_aspace;
156
157         if (sdp->sd_lockstruct.ls_ops->lm_put_lock)
158                 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
159
160         if (aspace)
161                 gfs2_aspace_put(aspace);
162
163         kmem_cache_free(gfs2_glock_cachep, gl);
164 }
165
166 /**
167  * gfs2_glock_hold() - increment reference count on glock
168  * @gl: The glock to hold
169  *
170  */
171
172 static void gfs2_glock_hold(struct gfs2_glock *gl)
173 {
174         atomic_inc(&gl->gl_ref);
175 }
176
177 /**
178  * gfs2_glock_put() - Decrement reference count on glock
179  * @gl: The glock to put
180  *
181  */
182
183 int gfs2_glock_put(struct gfs2_glock *gl)
184 {
185         int rv = 0;
186
187         write_lock(gl_lock_addr(gl->gl_hash));
188         if (atomic_dec_and_test(&gl->gl_ref)) {
189                 hlist_del(&gl->gl_list);
190                 write_unlock(gl_lock_addr(gl->gl_hash));
191                 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
192                 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim));
193                 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
194                 glock_free(gl);
195                 rv = 1;
196                 goto out;
197         }
198         write_unlock(gl_lock_addr(gl->gl_hash));
199 out:
200         return rv;
201 }
202
203 /**
204  * search_bucket() - Find struct gfs2_glock by lock number
205  * @bucket: the bucket to search
206  * @name: The lock name
207  *
208  * Returns: NULL, or the struct gfs2_glock with the requested number
209  */
210
211 static struct gfs2_glock *search_bucket(unsigned int hash,
212                                         const struct gfs2_sbd *sdp,
213                                         const struct lm_lockname *name)
214 {
215         struct gfs2_glock *gl;
216         struct hlist_node *h;
217
218         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
219                 if (!lm_name_equal(&gl->gl_name, name))
220                         continue;
221                 if (gl->gl_sbd != sdp)
222                         continue;
223
224                 atomic_inc(&gl->gl_ref);
225
226                 return gl;
227         }
228
229         return NULL;
230 }
231
232 /**
233  * gfs2_glock_find() - Find glock by lock number
234  * @sdp: The GFS2 superblock
235  * @name: The lock name
236  *
237  * Returns: NULL, or the struct gfs2_glock with the requested number
238  */
239
240 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
241                                           const struct lm_lockname *name)
242 {
243         unsigned int hash = gl_hash(sdp, name);
244         struct gfs2_glock *gl;
245
246         read_lock(gl_lock_addr(hash));
247         gl = search_bucket(hash, sdp, name);
248         read_unlock(gl_lock_addr(hash));
249
250         return gl;
251 }
252
253 /**
254  * may_grant - check if its ok to grant a new lock
255  * @gl: The glock
256  * @gh: The lock request which we wish to grant
257  *
258  * Returns: true if its ok to grant the lock
259  */
260
261 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
262 {
263         const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
264         if ((gh->gh_state == LM_ST_EXCLUSIVE ||
265              gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
266                 return 0;
267         if (gl->gl_state == gh->gh_state)
268                 return 1;
269         if (gh->gh_flags & GL_EXACT)
270                 return 0;
271         if (gl->gl_state == LM_ST_EXCLUSIVE) {
272                 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
273                         return 1;
274                 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
275                         return 1;
276         }
277         if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
278                 return 1;
279         return 0;
280 }
281
282 static void gfs2_holder_wake(struct gfs2_holder *gh)
283 {
284         clear_bit(HIF_WAIT, &gh->gh_iflags);
285         smp_mb__after_clear_bit();
286         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
287 }
288
289 /**
290  * do_promote - promote as many requests as possible on the current queue
291  * @gl: The glock
292  * 
293  * Returns: 1 if there is a blocked holder at the head of the list, or 2
294  *          if a type specific operation is underway.
295  */
296
297 static int do_promote(struct gfs2_glock *gl)
298 __releases(&gl->gl_spin)
299 __acquires(&gl->gl_spin)
300 {
301         const struct gfs2_glock_operations *glops = gl->gl_ops;
302         struct gfs2_holder *gh, *tmp;
303         int ret;
304
305 restart:
306         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
307                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
308                         continue;
309                 if (may_grant(gl, gh)) {
310                         if (gh->gh_list.prev == &gl->gl_holders &&
311                             glops->go_lock) {
312                                 spin_unlock(&gl->gl_spin);
313                                 /* FIXME: eliminate this eventually */
314                                 ret = glops->go_lock(gh);
315                                 spin_lock(&gl->gl_spin);
316                                 if (ret) {
317                                         if (ret == 1)
318                                                 return 2;
319                                         gh->gh_error = ret;
320                                         list_del_init(&gh->gh_list);
321                                         gfs2_holder_wake(gh);
322                                         goto restart;
323                                 }
324                                 set_bit(HIF_HOLDER, &gh->gh_iflags);
325                                 gfs2_holder_wake(gh);
326                                 goto restart;
327                         }
328                         set_bit(HIF_HOLDER, &gh->gh_iflags);
329                         gfs2_holder_wake(gh);
330                         continue;
331                 }
332                 if (gh->gh_list.prev == &gl->gl_holders)
333                         return 1;
334                 break;
335         }
336         return 0;
337 }
338
339 /**
340  * do_error - Something unexpected has happened during a lock request
341  *
342  */
343
344 static inline void do_error(struct gfs2_glock *gl, const int ret)
345 {
346         struct gfs2_holder *gh, *tmp;
347
348         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
349                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
350                         continue;
351                 if (ret & LM_OUT_ERROR)
352                         gh->gh_error = -EIO;
353                 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
354                         gh->gh_error = GLR_TRYFAILED;
355                 else
356                         continue;
357                 list_del_init(&gh->gh_list);
358                 gfs2_holder_wake(gh);
359         }
360 }
361
362 /**
363  * find_first_waiter - find the first gh that's waiting for the glock
364  * @gl: the glock
365  */
366
367 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
368 {
369         struct gfs2_holder *gh;
370
371         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
372                 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
373                         return gh;
374         }
375         return NULL;
376 }
377
378 /**
379  * state_change - record that the glock is now in a different state
380  * @gl: the glock
381  * @new_state the new state
382  *
383  */
384
385 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
386 {
387         int held1, held2;
388
389         held1 = (gl->gl_state != LM_ST_UNLOCKED);
390         held2 = (new_state != LM_ST_UNLOCKED);
391
392         if (held1 != held2) {
393                 if (held2)
394                         gfs2_glock_hold(gl);
395                 else
396                         gfs2_glock_put(gl);
397         }
398
399         gl->gl_state = new_state;
400         gl->gl_tchange = jiffies;
401 }
402
403 static void gfs2_demote_wake(struct gfs2_glock *gl)
404 {
405         gl->gl_demote_state = LM_ST_EXCLUSIVE;
406         clear_bit(GLF_DEMOTE, &gl->gl_flags);
407         smp_mb__after_clear_bit();
408         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
409 }
410
411 /**
412  * finish_xmote - The DLM has replied to one of our lock requests
413  * @gl: The glock
414  * @ret: The status from the DLM
415  *
416  */
417
418 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
419 {
420         const struct gfs2_glock_operations *glops = gl->gl_ops;
421         struct gfs2_holder *gh;
422         unsigned state = ret & LM_OUT_ST_MASK;
423         int rv;
424
425         spin_lock(&gl->gl_spin);
426         state_change(gl, state);
427         gh = find_first_waiter(gl);
428
429         /* Demote to UN request arrived during demote to SH or DF */
430         if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
431             state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
432                 gl->gl_target = LM_ST_UNLOCKED;
433
434         /* Check for state != intended state */
435         if (unlikely(state != gl->gl_target)) {
436                 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
437                         /* move to back of queue and try next entry */
438                         if (ret & LM_OUT_CANCELED) {
439                                 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
440                                         list_move_tail(&gh->gh_list, &gl->gl_holders);
441                                 gh = find_first_waiter(gl);
442                                 gl->gl_target = gh->gh_state;
443                                 goto retry;
444                         }
445                         /* Some error or failed "try lock" - report it */
446                         if ((ret & LM_OUT_ERROR) ||
447                             (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
448                                 gl->gl_target = gl->gl_state;
449                                 do_error(gl, ret);
450                                 goto out;
451                         }
452                 }
453                 switch(state) {
454                 /* Unlocked due to conversion deadlock, try again */
455                 case LM_ST_UNLOCKED:
456 retry:
457                         do_xmote(gl, gh, gl->gl_target);
458                         break;
459                 /* Conversion fails, unlock and try again */
460                 case LM_ST_SHARED:
461                 case LM_ST_DEFERRED:
462                         do_xmote(gl, gh, LM_ST_UNLOCKED);
463                         break;
464                 default: /* Everything else */
465                         printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
466                         GLOCK_BUG_ON(gl, 1);
467                 }
468                 spin_unlock(&gl->gl_spin);
469                 gfs2_glock_put(gl);
470                 return;
471         }
472
473         /* Fast path - we got what we asked for */
474         if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
475                 gfs2_demote_wake(gl);
476         if (state != LM_ST_UNLOCKED) {
477                 if (glops->go_xmote_bh) {
478                         spin_unlock(&gl->gl_spin);
479                         rv = glops->go_xmote_bh(gl, gh);
480                         if (rv == -EAGAIN)
481                                 return;
482                         spin_lock(&gl->gl_spin);
483                         if (rv) {
484                                 do_error(gl, rv);
485                                 goto out;
486                         }
487                 }
488                 rv = do_promote(gl);
489                 if (rv == 2)
490                         goto out_locked;
491         }
492 out:
493         clear_bit(GLF_LOCK, &gl->gl_flags);
494 out_locked:
495         spin_unlock(&gl->gl_spin);
496         gfs2_glock_put(gl);
497 }
498
499 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
500                                  unsigned int cur_state, unsigned int req_state,
501                                  unsigned int flags)
502 {
503         int ret = LM_OUT_ERROR;
504
505         if (!sdp->sd_lockstruct.ls_ops->lm_lock)
506                 return req_state == LM_ST_UNLOCKED ? 0 : req_state;
507
508         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
509                 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
510                                                          req_state, flags);
511         return ret;
512 }
513
514 /**
515  * do_xmote - Calls the DLM to change the state of a lock
516  * @gl: The lock state
517  * @gh: The holder (only for promotes)
518  * @target: The target lock state
519  *
520  */
521
522 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
523 __releases(&gl->gl_spin)
524 __acquires(&gl->gl_spin)
525 {
526         const struct gfs2_glock_operations *glops = gl->gl_ops;
527         struct gfs2_sbd *sdp = gl->gl_sbd;
528         unsigned int lck_flags = gh ? gh->gh_flags : 0;
529         int ret;
530
531         lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
532                       LM_FLAG_PRIORITY);
533         BUG_ON(gl->gl_state == target);
534         BUG_ON(gl->gl_state == gl->gl_target);
535         if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
536             glops->go_inval) {
537                 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
538                 do_error(gl, 0); /* Fail queued try locks */
539         }
540         spin_unlock(&gl->gl_spin);
541         if (glops->go_xmote_th)
542                 glops->go_xmote_th(gl);
543         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
544                 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
545         clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
546
547         gfs2_glock_hold(gl);
548         if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
549             gl->gl_state == LM_ST_DEFERRED) &&
550             !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
551                 lck_flags |= LM_FLAG_TRY_1CB;
552         ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags);
553
554         if (!(ret & LM_OUT_ASYNC)) {
555                 finish_xmote(gl, ret);
556                 gfs2_glock_hold(gl);
557                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
558                         gfs2_glock_put(gl);
559         } else {
560                 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
561         }
562         spin_lock(&gl->gl_spin);
563 }
564
565 /**
566  * find_first_holder - find the first "holder" gh
567  * @gl: the glock
568  */
569
570 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
571 {
572         struct gfs2_holder *gh;
573
574         if (!list_empty(&gl->gl_holders)) {
575                 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
576                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
577                         return gh;
578         }
579         return NULL;
580 }
581
582 /**
583  * run_queue - do all outstanding tasks related to a glock
584  * @gl: The glock in question
585  * @nonblock: True if we must not block in run_queue
586  *
587  */
588
589 static void run_queue(struct gfs2_glock *gl, const int nonblock)
590 __releases(&gl->gl_spin)
591 __acquires(&gl->gl_spin)
592 {
593         struct gfs2_holder *gh = NULL;
594         int ret;
595
596         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
597                 return;
598
599         GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
600
601         if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
602             gl->gl_demote_state != gl->gl_state) {
603                 if (find_first_holder(gl))
604                         goto out;
605                 if (nonblock)
606                         goto out_sched;
607                 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
608                 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
609                 gl->gl_target = gl->gl_demote_state;
610         } else {
611                 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
612                         gfs2_demote_wake(gl);
613                 ret = do_promote(gl);
614                 if (ret == 0)
615                         goto out;
616                 if (ret == 2)
617                         return;
618                 gh = find_first_waiter(gl);
619                 gl->gl_target = gh->gh_state;
620                 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
621                         do_error(gl, 0); /* Fail queued try locks */
622         }
623         do_xmote(gl, gh, gl->gl_target);
624         return;
625
626 out_sched:
627         gfs2_glock_hold(gl);
628         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
629                 gfs2_glock_put(gl);
630 out:
631         clear_bit(GLF_LOCK, &gl->gl_flags);
632 }
633
634 static void glock_work_func(struct work_struct *work)
635 {
636         unsigned long delay = 0;
637         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
638
639         if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
640                 finish_xmote(gl, gl->gl_reply);
641         spin_lock(&gl->gl_spin);
642         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
643             gl->gl_state != LM_ST_UNLOCKED &&
644             gl->gl_demote_state != LM_ST_EXCLUSIVE) {
645                 unsigned long holdtime, now = jiffies;
646                 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
647                 if (time_before(now, holdtime))
648                         delay = holdtime - now;
649                 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
650         }
651         run_queue(gl, 0);
652         spin_unlock(&gl->gl_spin);
653         if (!delay ||
654             queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
655                 gfs2_glock_put(gl);
656 }
657
658 static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
659                      void **lockp)
660 {
661         int error = -EIO;
662         if (!sdp->sd_lockstruct.ls_ops->lm_get_lock)
663                 return 0;
664         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
665                 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
666                                 sdp->sd_lockstruct.ls_lockspace, name, lockp);
667         return error;
668 }
669
670 /**
671  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
672  * @sdp: The GFS2 superblock
673  * @number: the lock number
674  * @glops: The glock_operations to use
675  * @create: If 0, don't create the glock if it doesn't exist
676  * @glp: the glock is returned here
677  *
678  * This does not lock a glock, just finds/creates structures for one.
679  *
680  * Returns: errno
681  */
682
683 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
684                    const struct gfs2_glock_operations *glops, int create,
685                    struct gfs2_glock **glp)
686 {
687         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
688         struct gfs2_glock *gl, *tmp;
689         unsigned int hash = gl_hash(sdp, &name);
690         int error;
691
692         read_lock(gl_lock_addr(hash));
693         gl = search_bucket(hash, sdp, &name);
694         read_unlock(gl_lock_addr(hash));
695
696         if (gl || !create) {
697                 *glp = gl;
698                 return 0;
699         }
700
701         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
702         if (!gl)
703                 return -ENOMEM;
704
705         gl->gl_flags = 0;
706         gl->gl_name = name;
707         atomic_set(&gl->gl_ref, 1);
708         gl->gl_state = LM_ST_UNLOCKED;
709         gl->gl_target = LM_ST_UNLOCKED;
710         gl->gl_demote_state = LM_ST_EXCLUSIVE;
711         gl->gl_hash = hash;
712         gl->gl_ops = glops;
713         gl->gl_stamp = jiffies;
714         gl->gl_tchange = jiffies;
715         gl->gl_object = NULL;
716         gl->gl_sbd = sdp;
717         gl->gl_aspace = NULL;
718         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
719
720         /* If this glock protects actual on-disk data or metadata blocks,
721            create a VFS inode to manage the pages/buffers holding them. */
722         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
723                 gl->gl_aspace = gfs2_aspace_get(sdp);
724                 if (!gl->gl_aspace) {
725                         error = -ENOMEM;
726                         goto fail;
727                 }
728         }
729
730         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
731         if (error)
732                 goto fail_aspace;
733
734         write_lock(gl_lock_addr(hash));
735         tmp = search_bucket(hash, sdp, &name);
736         if (tmp) {
737                 write_unlock(gl_lock_addr(hash));
738                 glock_free(gl);
739                 gl = tmp;
740         } else {
741                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
742                 write_unlock(gl_lock_addr(hash));
743         }
744
745         *glp = gl;
746
747         return 0;
748
749 fail_aspace:
750         if (gl->gl_aspace)
751                 gfs2_aspace_put(gl->gl_aspace);
752 fail:
753         kmem_cache_free(gfs2_glock_cachep, gl);
754         return error;
755 }
756
757 /**
758  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
759  * @gl: the glock
760  * @state: the state we're requesting
761  * @flags: the modifier flags
762  * @gh: the holder structure
763  *
764  */
765
766 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
767                       struct gfs2_holder *gh)
768 {
769         INIT_LIST_HEAD(&gh->gh_list);
770         gh->gh_gl = gl;
771         gh->gh_ip = (unsigned long)__builtin_return_address(0);
772         gh->gh_owner_pid = get_pid(task_pid(current));
773         gh->gh_state = state;
774         gh->gh_flags = flags;
775         gh->gh_error = 0;
776         gh->gh_iflags = 0;
777         gfs2_glock_hold(gl);
778 }
779
780 /**
781  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
782  * @state: the state we're requesting
783  * @flags: the modifier flags
784  * @gh: the holder structure
785  *
786  * Don't mess with the glock.
787  *
788  */
789
790 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
791 {
792         gh->gh_state = state;
793         gh->gh_flags = flags;
794         gh->gh_iflags = 0;
795         gh->gh_ip = (unsigned long)__builtin_return_address(0);
796 }
797
798 /**
799  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
800  * @gh: the holder structure
801  *
802  */
803
804 void gfs2_holder_uninit(struct gfs2_holder *gh)
805 {
806         put_pid(gh->gh_owner_pid);
807         gfs2_glock_put(gh->gh_gl);
808         gh->gh_gl = NULL;
809         gh->gh_ip = 0;
810 }
811
812 static int just_schedule(void *word)
813 {
814         schedule();
815         return 0;
816 }
817
818 static void wait_on_holder(struct gfs2_holder *gh)
819 {
820         might_sleep();
821         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
822 }
823
824 static void wait_on_demote(struct gfs2_glock *gl)
825 {
826         might_sleep();
827         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
828 }
829
830 /**
831  * handle_callback - process a demote request
832  * @gl: the glock
833  * @state: the state the caller wants us to change to
834  *
835  * There are only two requests that we are going to see in actual
836  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
837  */
838
839 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
840                             int remote, unsigned long delay)
841 {
842         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
843
844         set_bit(bit, &gl->gl_flags);
845         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
846                 gl->gl_demote_state = state;
847                 gl->gl_demote_time = jiffies;
848                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
849                     gl->gl_object)
850                         gfs2_glock_schedule_for_reclaim(gl);
851         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
852                         gl->gl_demote_state != state) {
853                 gl->gl_demote_state = LM_ST_UNLOCKED;
854         }
855 }
856
857 /**
858  * gfs2_glock_wait - wait on a glock acquisition
859  * @gh: the glock holder
860  *
861  * Returns: 0 on success
862  */
863
864 int gfs2_glock_wait(struct gfs2_holder *gh)
865 {
866         wait_on_holder(gh);
867         return gh->gh_error;
868 }
869
870 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
871 {
872         va_list args;
873
874         va_start(args, fmt);
875         if (seq) {
876                 struct gfs2_glock_iter *gi = seq->private;
877                 vsprintf(gi->string, fmt, args);
878                 seq_printf(seq, gi->string);
879         } else {
880                 printk(KERN_ERR " ");
881                 vprintk(fmt, args);
882         }
883         va_end(args);
884 }
885
886 /**
887  * add_to_queue - Add a holder to the wait queue (but look for recursion)
888  * @gh: the holder structure to add
889  *
890  * Eventually we should move the recursive locking trap to a
891  * debugging option or something like that. This is the fast
892  * path and needs to have the minimum number of distractions.
893  * 
894  */
895
896 static inline void add_to_queue(struct gfs2_holder *gh)
897 __releases(&gl->gl_spin)
898 __acquires(&gl->gl_spin)
899 {
900         struct gfs2_glock *gl = gh->gh_gl;
901         struct gfs2_sbd *sdp = gl->gl_sbd;
902         struct list_head *insert_pt = NULL;
903         struct gfs2_holder *gh2;
904         int try_lock = 0;
905
906         BUG_ON(gh->gh_owner_pid == NULL);
907         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
908                 BUG();
909
910         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
911                 if (test_bit(GLF_LOCK, &gl->gl_flags))
912                         try_lock = 1;
913                 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
914                         goto fail;
915         }
916
917         list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
918                 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
919                     (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
920                         goto trap_recursive;
921                 if (try_lock &&
922                     !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
923                     !may_grant(gl, gh)) {
924 fail:
925                         gh->gh_error = GLR_TRYFAILED;
926                         gfs2_holder_wake(gh);
927                         return;
928                 }
929                 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
930                         continue;
931                 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
932                         insert_pt = &gh2->gh_list;
933         }
934         if (likely(insert_pt == NULL)) {
935                 list_add_tail(&gh->gh_list, &gl->gl_holders);
936                 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
937                         goto do_cancel;
938                 return;
939         }
940         list_add_tail(&gh->gh_list, insert_pt);
941 do_cancel:
942         gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
943         if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
944                 spin_unlock(&gl->gl_spin);
945                 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
946                         sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
947                 spin_lock(&gl->gl_spin);
948         }
949         return;
950
951 trap_recursive:
952         print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
953         printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
954         printk(KERN_ERR "lock type: %d req lock state : %d\n",
955                gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
956         print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
957         printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
958         printk(KERN_ERR "lock type: %d req lock state : %d\n",
959                gh->gh_gl->gl_name.ln_type, gh->gh_state);
960         __dump_glock(NULL, gl);
961         BUG();
962 }
963
964 /**
965  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
966  * @gh: the holder structure
967  *
968  * if (gh->gh_flags & GL_ASYNC), this never returns an error
969  *
970  * Returns: 0, GLR_TRYFAILED, or errno on failure
971  */
972
973 int gfs2_glock_nq(struct gfs2_holder *gh)
974 {
975         struct gfs2_glock *gl = gh->gh_gl;
976         struct gfs2_sbd *sdp = gl->gl_sbd;
977         int error = 0;
978
979         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
980                 return -EIO;
981
982         spin_lock(&gl->gl_spin);
983         add_to_queue(gh);
984         run_queue(gl, 1);
985         spin_unlock(&gl->gl_spin);
986
987         if (!(gh->gh_flags & GL_ASYNC))
988                 error = gfs2_glock_wait(gh);
989
990         return error;
991 }
992
993 /**
994  * gfs2_glock_poll - poll to see if an async request has been completed
995  * @gh: the holder
996  *
997  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
998  */
999
1000 int gfs2_glock_poll(struct gfs2_holder *gh)
1001 {
1002         return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1003 }
1004
1005 /**
1006  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1007  * @gh: the glock holder
1008  *
1009  */
1010
1011 void gfs2_glock_dq(struct gfs2_holder *gh)
1012 {
1013         struct gfs2_glock *gl = gh->gh_gl;
1014         const struct gfs2_glock_operations *glops = gl->gl_ops;
1015         unsigned delay = 0;
1016         int fast_path = 0;
1017
1018         spin_lock(&gl->gl_spin);
1019         if (gh->gh_flags & GL_NOCACHE)
1020                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1021
1022         list_del_init(&gh->gh_list);
1023         if (find_first_holder(gl) == NULL) {
1024                 if (glops->go_unlock) {
1025                         GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1026                         spin_unlock(&gl->gl_spin);
1027                         glops->go_unlock(gh);
1028                         spin_lock(&gl->gl_spin);
1029                         clear_bit(GLF_LOCK, &gl->gl_flags);
1030                 }
1031                 gl->gl_stamp = jiffies;
1032                 if (list_empty(&gl->gl_holders) &&
1033                     !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1034                     !test_bit(GLF_DEMOTE, &gl->gl_flags))
1035                         fast_path = 1;
1036         }
1037         spin_unlock(&gl->gl_spin);
1038         if (likely(fast_path))
1039                 return;
1040
1041         gfs2_glock_hold(gl);
1042         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1043             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1044                 delay = gl->gl_ops->go_min_hold_time;
1045         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1046                 gfs2_glock_put(gl);
1047 }
1048
1049 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1050 {
1051         struct gfs2_glock *gl = gh->gh_gl;
1052         gfs2_glock_dq(gh);
1053         wait_on_demote(gl);
1054 }
1055
1056 /**
1057  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1058  * @gh: the holder structure
1059  *
1060  */
1061
1062 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1063 {
1064         gfs2_glock_dq(gh);
1065         gfs2_holder_uninit(gh);
1066 }
1067
1068 /**
1069  * gfs2_glock_nq_num - acquire a glock based on lock number
1070  * @sdp: the filesystem
1071  * @number: the lock number
1072  * @glops: the glock operations for the type of glock
1073  * @state: the state to acquire the glock in
1074  * @flags: modifier flags for the aquisition
1075  * @gh: the struct gfs2_holder
1076  *
1077  * Returns: errno
1078  */
1079
1080 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1081                       const struct gfs2_glock_operations *glops,
1082                       unsigned int state, int flags, struct gfs2_holder *gh)
1083 {
1084         struct gfs2_glock *gl;
1085         int error;
1086
1087         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1088         if (!error) {
1089                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1090                 gfs2_glock_put(gl);
1091         }
1092
1093         return error;
1094 }
1095
1096 /**
1097  * glock_compare - Compare two struct gfs2_glock structures for sorting
1098  * @arg_a: the first structure
1099  * @arg_b: the second structure
1100  *
1101  */
1102
1103 static int glock_compare(const void *arg_a, const void *arg_b)
1104 {
1105         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1106         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1107         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1108         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1109
1110         if (a->ln_number > b->ln_number)
1111                 return 1;
1112         if (a->ln_number < b->ln_number)
1113                 return -1;
1114         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1115         return 0;
1116 }
1117
1118 /**
1119  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1120  * @num_gh: the number of structures
1121  * @ghs: an array of struct gfs2_holder structures
1122  *
1123  * Returns: 0 on success (all glocks acquired),
1124  *          errno on failure (no glocks acquired)
1125  */
1126
1127 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1128                      struct gfs2_holder **p)
1129 {
1130         unsigned int x;
1131         int error = 0;
1132
1133         for (x = 0; x < num_gh; x++)
1134                 p[x] = &ghs[x];
1135
1136         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1137
1138         for (x = 0; x < num_gh; x++) {
1139                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1140
1141                 error = gfs2_glock_nq(p[x]);
1142                 if (error) {
1143                         while (x--)
1144                                 gfs2_glock_dq(p[x]);
1145                         break;
1146                 }
1147         }
1148
1149         return error;
1150 }
1151
1152 /**
1153  * gfs2_glock_nq_m - acquire multiple glocks
1154  * @num_gh: the number of structures
1155  * @ghs: an array of struct gfs2_holder structures
1156  *
1157  *
1158  * Returns: 0 on success (all glocks acquired),
1159  *          errno on failure (no glocks acquired)
1160  */
1161
1162 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1163 {
1164         struct gfs2_holder *tmp[4];
1165         struct gfs2_holder **pph = tmp;
1166         int error = 0;
1167
1168         switch(num_gh) {
1169         case 0:
1170                 return 0;
1171         case 1:
1172                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1173                 return gfs2_glock_nq(ghs);
1174         default:
1175                 if (num_gh <= 4)
1176                         break;
1177                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1178                 if (!pph)
1179                         return -ENOMEM;
1180         }
1181
1182         error = nq_m_sync(num_gh, ghs, pph);
1183
1184         if (pph != tmp)
1185                 kfree(pph);
1186
1187         return error;
1188 }
1189
1190 /**
1191  * gfs2_glock_dq_m - release multiple glocks
1192  * @num_gh: the number of structures
1193  * @ghs: an array of struct gfs2_holder structures
1194  *
1195  */
1196
1197 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1198 {
1199         unsigned int x;
1200
1201         for (x = 0; x < num_gh; x++)
1202                 gfs2_glock_dq(&ghs[x]);
1203 }
1204
1205 /**
1206  * gfs2_glock_dq_uninit_m - release multiple glocks
1207  * @num_gh: the number of structures
1208  * @ghs: an array of struct gfs2_holder structures
1209  *
1210  */
1211
1212 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1213 {
1214         unsigned int x;
1215
1216         for (x = 0; x < num_gh; x++)
1217                 gfs2_glock_dq_uninit(&ghs[x]);
1218 }
1219
1220 static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
1221 {
1222         int error = -EIO;
1223         if (!sdp->sd_lockstruct.ls_ops->lm_hold_lvb)
1224                 return 0;
1225         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1226                 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
1227         return error;
1228 }
1229
1230 /**
1231  * gfs2_lvb_hold - attach a LVB from a glock
1232  * @gl: The glock in question
1233  *
1234  */
1235
1236 int gfs2_lvb_hold(struct gfs2_glock *gl)
1237 {
1238         int error;
1239
1240         if (!atomic_read(&gl->gl_lvb_count)) {
1241                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1242                 if (error) 
1243                         return error;
1244                 gfs2_glock_hold(gl);
1245         }
1246         atomic_inc(&gl->gl_lvb_count);
1247
1248         return 0;
1249 }
1250
1251 /**
1252  * gfs2_lvb_unhold - detach a LVB from a glock
1253  * @gl: The glock in question
1254  *
1255  */
1256
1257 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1258 {
1259         struct gfs2_sbd *sdp = gl->gl_sbd;
1260
1261         gfs2_glock_hold(gl);
1262         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1263         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1264                 if (sdp->sd_lockstruct.ls_ops->lm_unhold_lvb)
1265                         sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
1266                 gl->gl_lvb = NULL;
1267                 gfs2_glock_put(gl);
1268         }
1269         gfs2_glock_put(gl);
1270 }
1271
1272 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1273                         unsigned int state)
1274 {
1275         struct gfs2_glock *gl;
1276         unsigned long delay = 0;
1277         unsigned long holdtime;
1278         unsigned long now = jiffies;
1279
1280         gl = gfs2_glock_find(sdp, name);
1281         if (!gl)
1282                 return;
1283
1284         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1285         if (time_before(now, holdtime))
1286                 delay = holdtime - now;
1287         if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1288                 delay = gl->gl_ops->go_min_hold_time;
1289
1290         spin_lock(&gl->gl_spin);
1291         handle_callback(gl, state, 1, delay);
1292         spin_unlock(&gl->gl_spin);
1293         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1294                 gfs2_glock_put(gl);
1295 }
1296
1297 /**
1298  * gfs2_glock_cb - Callback used by locking module
1299  * @sdp: Pointer to the superblock
1300  * @type: Type of callback
1301  * @data: Type dependent data pointer
1302  *
1303  * Called by the locking module when it wants to tell us something.
1304  * Either we need to drop a lock, one of our ASYNC requests completed, or
1305  * a journal from another client needs to be recovered.
1306  */
1307
1308 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1309 {
1310         struct gfs2_sbd *sdp = cb_data;
1311
1312         switch (type) {
1313         case LM_CB_NEED_E:
1314                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1315                 return;
1316
1317         case LM_CB_NEED_D:
1318                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1319                 return;
1320
1321         case LM_CB_NEED_S:
1322                 blocking_cb(sdp, data, LM_ST_SHARED);
1323                 return;
1324
1325         case LM_CB_ASYNC: {
1326                 struct lm_async_cb *async = data;
1327                 struct gfs2_glock *gl;
1328
1329                 down_read(&gfs2_umount_flush_sem);
1330                 gl = gfs2_glock_find(sdp, &async->lc_name);
1331                 if (gfs2_assert_warn(sdp, gl))
1332                         return;
1333                 gl->gl_reply = async->lc_ret;
1334                 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1335                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1336                         gfs2_glock_put(gl);
1337                 up_read(&gfs2_umount_flush_sem);
1338                 return;
1339         }
1340
1341         case LM_CB_NEED_RECOVERY:
1342                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1343                 if (sdp->sd_recoverd_process)
1344                         wake_up_process(sdp->sd_recoverd_process);
1345                 return;
1346
1347         default:
1348                 gfs2_assert_warn(sdp, 0);
1349                 return;
1350         }
1351 }
1352
1353 /**
1354  * demote_ok - Check to see if it's ok to unlock a glock
1355  * @gl: the glock
1356  *
1357  * Returns: 1 if it's ok
1358  */
1359
1360 static int demote_ok(struct gfs2_glock *gl)
1361 {
1362         const struct gfs2_glock_operations *glops = gl->gl_ops;
1363         int demote = 1;
1364
1365         if (test_bit(GLF_STICKY, &gl->gl_flags))
1366                 demote = 0;
1367         else if (glops->go_demote_ok)
1368                 demote = glops->go_demote_ok(gl);
1369
1370         return demote;
1371 }
1372
1373 /**
1374  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1375  * @gl: the glock
1376  *
1377  */
1378
1379 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1380 {
1381         struct gfs2_sbd *sdp = gl->gl_sbd;
1382
1383         spin_lock(&sdp->sd_reclaim_lock);
1384         if (list_empty(&gl->gl_reclaim)) {
1385                 gfs2_glock_hold(gl);
1386                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1387                 atomic_inc(&sdp->sd_reclaim_count);
1388                 spin_unlock(&sdp->sd_reclaim_lock);
1389                 wake_up(&sdp->sd_reclaim_wq);
1390         } else
1391                 spin_unlock(&sdp->sd_reclaim_lock);
1392 }
1393
1394 /**
1395  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1396  * @sdp: the filesystem
1397  *
1398  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1399  * different glock and we notice that there are a lot of glocks in the
1400  * reclaim list.
1401  *
1402  */
1403
1404 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1405 {
1406         struct gfs2_glock *gl;
1407         int done_callback = 0;
1408
1409         spin_lock(&sdp->sd_reclaim_lock);
1410         if (list_empty(&sdp->sd_reclaim_list)) {
1411                 spin_unlock(&sdp->sd_reclaim_lock);
1412                 return;
1413         }
1414         gl = list_entry(sdp->sd_reclaim_list.next,
1415                         struct gfs2_glock, gl_reclaim);
1416         list_del_init(&gl->gl_reclaim);
1417         spin_unlock(&sdp->sd_reclaim_lock);
1418
1419         atomic_dec(&sdp->sd_reclaim_count);
1420         atomic_inc(&sdp->sd_reclaimed);
1421
1422         spin_lock(&gl->gl_spin);
1423         if (find_first_holder(gl) == NULL &&
1424             gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) {
1425                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1426                 done_callback = 1;
1427         }
1428         spin_unlock(&gl->gl_spin);
1429         if (!done_callback ||
1430             queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1431                 gfs2_glock_put(gl);
1432 }
1433
1434 /**
1435  * examine_bucket - Call a function for glock in a hash bucket
1436  * @examiner: the function
1437  * @sdp: the filesystem
1438  * @bucket: the bucket
1439  *
1440  * Returns: 1 if the bucket has entries
1441  */
1442
1443 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1444                           unsigned int hash)
1445 {
1446         struct gfs2_glock *gl, *prev = NULL;
1447         int has_entries = 0;
1448         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1449
1450         read_lock(gl_lock_addr(hash));
1451         /* Can't use hlist_for_each_entry - don't want prefetch here */
1452         if (hlist_empty(head))
1453                 goto out;
1454         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1455         while(1) {
1456                 if (!sdp || gl->gl_sbd == sdp) {
1457                         gfs2_glock_hold(gl);
1458                         read_unlock(gl_lock_addr(hash));
1459                         if (prev)
1460                                 gfs2_glock_put(prev);
1461                         prev = gl;
1462                         examiner(gl);
1463                         has_entries = 1;
1464                         read_lock(gl_lock_addr(hash));
1465                 }
1466                 if (gl->gl_list.next == NULL)
1467                         break;
1468                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1469         }
1470 out:
1471         read_unlock(gl_lock_addr(hash));
1472         if (prev)
1473                 gfs2_glock_put(prev);
1474         cond_resched();
1475         return has_entries;
1476 }
1477
1478 /**
1479  * scan_glock - look at a glock and see if we can reclaim it
1480  * @gl: the glock to look at
1481  *
1482  */
1483
1484 static void scan_glock(struct gfs2_glock *gl)
1485 {
1486         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1487                 return;
1488         if (test_bit(GLF_LOCK, &gl->gl_flags))
1489                 return;
1490
1491         spin_lock(&gl->gl_spin);
1492         if (find_first_holder(gl) == NULL &&
1493             gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1494                 gfs2_glock_schedule_for_reclaim(gl);
1495         spin_unlock(&gl->gl_spin);
1496 }
1497
1498 /**
1499  * clear_glock - look at a glock and see if we can free it from glock cache
1500  * @gl: the glock to look at
1501  *
1502  */
1503
1504 static void clear_glock(struct gfs2_glock *gl)
1505 {
1506         struct gfs2_sbd *sdp = gl->gl_sbd;
1507         int released;
1508
1509         spin_lock(&sdp->sd_reclaim_lock);
1510         if (!list_empty(&gl->gl_reclaim)) {
1511                 list_del_init(&gl->gl_reclaim);
1512                 atomic_dec(&sdp->sd_reclaim_count);
1513                 spin_unlock(&sdp->sd_reclaim_lock);
1514                 released = gfs2_glock_put(gl);
1515                 gfs2_assert(sdp, !released);
1516         } else {
1517                 spin_unlock(&sdp->sd_reclaim_lock);
1518         }
1519
1520         spin_lock(&gl->gl_spin);
1521         if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1522                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1523         spin_unlock(&gl->gl_spin);
1524         gfs2_glock_hold(gl);
1525         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1526                 gfs2_glock_put(gl);
1527 }
1528
1529 /**
1530  * gfs2_gl_hash_clear - Empty out the glock hash table
1531  * @sdp: the filesystem
1532  * @wait: wait until it's all gone
1533  *
1534  * Called when unmounting the filesystem.
1535  */
1536
1537 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1538 {
1539         unsigned long t;
1540         unsigned int x;
1541         int cont;
1542
1543         t = jiffies;
1544
1545         for (;;) {
1546                 cont = 0;
1547                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1548                         if (examine_bucket(clear_glock, sdp, x))
1549                                 cont = 1;
1550                 }
1551
1552                 if (!cont)
1553                         break;
1554
1555                 if (time_after_eq(jiffies,
1556                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1557                         fs_warn(sdp, "Unmount seems to be stalled. "
1558                                      "Dumping lock state...\n");
1559                         gfs2_dump_lockstate(sdp);
1560                         t = jiffies;
1561                 }
1562
1563                 down_write(&gfs2_umount_flush_sem);
1564                 invalidate_inodes(sdp->sd_vfs);
1565                 up_write(&gfs2_umount_flush_sem);
1566                 msleep(10);
1567         }
1568 }
1569
1570 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1571 {
1572         struct gfs2_glock *gl = ip->i_gl;
1573         int ret;
1574
1575         ret = gfs2_truncatei_resume(ip);
1576         gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1577
1578         spin_lock(&gl->gl_spin);
1579         clear_bit(GLF_LOCK, &gl->gl_flags);
1580         run_queue(gl, 1);
1581         spin_unlock(&gl->gl_spin);
1582 }
1583
1584 static const char *state2str(unsigned state)
1585 {
1586         switch(state) {
1587         case LM_ST_UNLOCKED:
1588                 return "UN";
1589         case LM_ST_SHARED:
1590                 return "SH";
1591         case LM_ST_DEFERRED:
1592                 return "DF";
1593         case LM_ST_EXCLUSIVE:
1594                 return "EX";
1595         }
1596         return "??";
1597 }
1598
1599 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1600 {
1601         char *p = buf;
1602         if (flags & LM_FLAG_TRY)
1603                 *p++ = 't';
1604         if (flags & LM_FLAG_TRY_1CB)
1605                 *p++ = 'T';
1606         if (flags & LM_FLAG_NOEXP)
1607                 *p++ = 'e';
1608         if (flags & LM_FLAG_ANY)
1609                 *p++ = 'a';
1610         if (flags & LM_FLAG_PRIORITY)
1611                 *p++ = 'p';
1612         if (flags & GL_ASYNC)
1613                 *p++ = 'a';
1614         if (flags & GL_EXACT)
1615                 *p++ = 'E';
1616         if (flags & GL_NOCACHE)
1617                 *p++ = 'c';
1618         if (test_bit(HIF_HOLDER, &iflags))
1619                 *p++ = 'H';
1620         if (test_bit(HIF_WAIT, &iflags))
1621                 *p++ = 'W';
1622         if (test_bit(HIF_FIRST, &iflags))
1623                 *p++ = 'F';
1624         *p = 0;
1625         return buf;
1626 }
1627
1628 /**
1629  * dump_holder - print information about a glock holder
1630  * @seq: the seq_file struct
1631  * @gh: the glock holder
1632  *
1633  * Returns: 0 on success, -ENOBUFS when we run out of space
1634  */
1635
1636 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1637 {
1638         struct task_struct *gh_owner = NULL;
1639         char buffer[KSYM_SYMBOL_LEN];
1640         char flags_buf[32];
1641
1642         sprint_symbol(buffer, gh->gh_ip);
1643         if (gh->gh_owner_pid)
1644                 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1645         gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1646                   state2str(gh->gh_state),
1647                   hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1648                   gh->gh_error, 
1649                   gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1650                   gh_owner ? gh_owner->comm : "(ended)", buffer);
1651         return 0;
1652 }
1653
1654 static const char *gflags2str(char *buf, const unsigned long *gflags)
1655 {
1656         char *p = buf;
1657         if (test_bit(GLF_LOCK, gflags))
1658                 *p++ = 'l';
1659         if (test_bit(GLF_STICKY, gflags))
1660                 *p++ = 's';
1661         if (test_bit(GLF_DEMOTE, gflags))
1662                 *p++ = 'D';
1663         if (test_bit(GLF_PENDING_DEMOTE, gflags))
1664                 *p++ = 'd';
1665         if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1666                 *p++ = 'p';
1667         if (test_bit(GLF_DIRTY, gflags))
1668                 *p++ = 'y';
1669         if (test_bit(GLF_LFLUSH, gflags))
1670                 *p++ = 'f';
1671         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1672                 *p++ = 'i';
1673         if (test_bit(GLF_REPLY_PENDING, gflags))
1674                 *p++ = 'r';
1675         *p = 0;
1676         return buf;
1677 }
1678
1679 /**
1680  * __dump_glock - print information about a glock
1681  * @seq: The seq_file struct
1682  * @gl: the glock
1683  *
1684  * The file format is as follows:
1685  * One line per object, capital letters are used to indicate objects
1686  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1687  * other objects are indented by a single space and follow the glock to
1688  * which they are related. Fields are indicated by lower case letters
1689  * followed by a colon and the field value, except for strings which are in
1690  * [] so that its possible to see if they are composed of spaces for
1691  * example. The field's are n = number (id of the object), f = flags,
1692  * t = type, s = state, r = refcount, e = error, p = pid.
1693  *
1694  * Returns: 0 on success, -ENOBUFS when we run out of space
1695  */
1696
1697 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1698 {
1699         const struct gfs2_glock_operations *glops = gl->gl_ops;
1700         unsigned long long dtime;
1701         const struct gfs2_holder *gh;
1702         char gflags_buf[32];
1703         int error = 0;
1704
1705         dtime = jiffies - gl->gl_demote_time;
1706         dtime *= 1000000/HZ; /* demote time in uSec */
1707         if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1708                 dtime = 0;
1709         gfs2_print_dbg(seq, "G:  s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n",
1710                   state2str(gl->gl_state),
1711                   gl->gl_name.ln_type,
1712                   (unsigned long long)gl->gl_name.ln_number,
1713                   gflags2str(gflags_buf, &gl->gl_flags),
1714                   state2str(gl->gl_target),
1715                   state2str(gl->gl_demote_state), dtime,
1716                   atomic_read(&gl->gl_lvb_count),
1717                   atomic_read(&gl->gl_ail_count),
1718                   atomic_read(&gl->gl_ref));
1719
1720         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1721                 error = dump_holder(seq, gh);
1722                 if (error)
1723                         goto out;
1724         }
1725         if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1726                 error = glops->go_dump(seq, gl);
1727 out:
1728         return error;
1729 }
1730
1731 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1732 {
1733         int ret;
1734         spin_lock(&gl->gl_spin);
1735         ret = __dump_glock(seq, gl);
1736         spin_unlock(&gl->gl_spin);
1737         return ret;
1738 }
1739
1740 /**
1741  * gfs2_dump_lockstate - print out the current lockstate
1742  * @sdp: the filesystem
1743  * @ub: the buffer to copy the information into
1744  *
1745  * If @ub is NULL, dump the lockstate to the console.
1746  *
1747  */
1748
1749 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1750 {
1751         struct gfs2_glock *gl;
1752         struct hlist_node *h;
1753         unsigned int x;
1754         int error = 0;
1755
1756         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1757
1758                 read_lock(gl_lock_addr(x));
1759
1760                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1761                         if (gl->gl_sbd != sdp)
1762                                 continue;
1763
1764                         error = dump_glock(NULL, gl);
1765                         if (error)
1766                                 break;
1767                 }
1768
1769                 read_unlock(gl_lock_addr(x));
1770
1771                 if (error)
1772                         break;
1773         }
1774
1775
1776         return error;
1777 }
1778
1779 /**
1780  * gfs2_scand - Look for cached glocks and inodes to toss from memory
1781  * @sdp: Pointer to GFS2 superblock
1782  *
1783  * One of these daemons runs, finding candidates to add to sd_reclaim_list.
1784  * See gfs2_glockd()
1785  */
1786
1787 static int gfs2_scand(void *data)
1788 {
1789         unsigned x;
1790         unsigned delay;
1791
1792         while (!kthread_should_stop()) {
1793                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1794                         examine_bucket(scan_glock, NULL, x);
1795                 if (freezing(current))
1796                         refrigerator();
1797                 delay = scand_secs;
1798                 if (delay < 1)
1799                         delay = 1;
1800                 schedule_timeout_interruptible(delay * HZ);
1801         }
1802
1803         return 0;
1804 }
1805
1806
1807
1808 int __init gfs2_glock_init(void)
1809 {
1810         unsigned i;
1811         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1812                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1813         }
1814 #ifdef GL_HASH_LOCK_SZ
1815         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1816                 rwlock_init(&gl_hash_locks[i]);
1817         }
1818 #endif
1819
1820         scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
1821         if (IS_ERR(scand_process))
1822                 return PTR_ERR(scand_process);
1823
1824         glock_workqueue = create_workqueue("glock_workqueue");
1825         if (IS_ERR(glock_workqueue)) {
1826                 kthread_stop(scand_process);
1827                 return PTR_ERR(glock_workqueue);
1828         }
1829
1830         return 0;
1831 }
1832
1833 void gfs2_glock_exit(void)
1834 {
1835         destroy_workqueue(glock_workqueue);
1836         kthread_stop(scand_process);
1837 }
1838
1839 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
1840 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
1841
1842 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1843 {
1844         struct gfs2_glock *gl;
1845
1846 restart:
1847         read_lock(gl_lock_addr(gi->hash));
1848         gl = gi->gl;
1849         if (gl) {
1850                 gi->gl = hlist_entry(gl->gl_list.next,
1851                                      struct gfs2_glock, gl_list);
1852         } else {
1853                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1854                                      struct gfs2_glock, gl_list);
1855         }
1856         if (gi->gl)
1857                 gfs2_glock_hold(gi->gl);
1858         read_unlock(gl_lock_addr(gi->hash));
1859         if (gl)
1860                 gfs2_glock_put(gl);
1861         while (gi->gl == NULL) {
1862                 gi->hash++;
1863                 if (gi->hash >= GFS2_GL_HASH_SIZE)
1864                         return 1;
1865                 read_lock(gl_lock_addr(gi->hash));
1866                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1867                                      struct gfs2_glock, gl_list);
1868                 if (gi->gl)
1869                         gfs2_glock_hold(gi->gl);
1870                 read_unlock(gl_lock_addr(gi->hash));
1871         }
1872
1873         if (gi->sdp != gi->gl->gl_sbd)
1874                 goto restart;
1875
1876         return 0;
1877 }
1878
1879 static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1880 {
1881         if (gi->gl)
1882                 gfs2_glock_put(gi->gl);
1883         gi->gl = NULL;
1884 }
1885
1886 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1887 {
1888         struct gfs2_glock_iter *gi = seq->private;
1889         loff_t n = *pos;
1890
1891         gi->hash = 0;
1892
1893         do {
1894                 if (gfs2_glock_iter_next(gi)) {
1895                         gfs2_glock_iter_free(gi);
1896                         return NULL;
1897                 }
1898         } while (n--);
1899
1900         return gi->gl;
1901 }
1902
1903 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1904                                  loff_t *pos)
1905 {
1906         struct gfs2_glock_iter *gi = seq->private;
1907
1908         (*pos)++;
1909
1910         if (gfs2_glock_iter_next(gi)) {
1911                 gfs2_glock_iter_free(gi);
1912                 return NULL;
1913         }
1914
1915         return gi->gl;
1916 }
1917
1918 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1919 {
1920         struct gfs2_glock_iter *gi = seq->private;
1921         gfs2_glock_iter_free(gi);
1922 }
1923
1924 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1925 {
1926         return dump_glock(seq, iter_ptr);
1927 }
1928
1929 static const struct seq_operations gfs2_glock_seq_ops = {
1930         .start = gfs2_glock_seq_start,
1931         .next  = gfs2_glock_seq_next,
1932         .stop  = gfs2_glock_seq_stop,
1933         .show  = gfs2_glock_seq_show,
1934 };
1935
1936 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1937 {
1938         int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1939                                    sizeof(struct gfs2_glock_iter));
1940         if (ret == 0) {
1941                 struct seq_file *seq = file->private_data;
1942                 struct gfs2_glock_iter *gi = seq->private;
1943                 gi->sdp = inode->i_private;
1944         }
1945         return ret;
1946 }
1947
1948 static const struct file_operations gfs2_debug_fops = {
1949         .owner   = THIS_MODULE,
1950         .open    = gfs2_debugfs_open,
1951         .read    = seq_read,
1952         .llseek  = seq_lseek,
1953         .release = seq_release_private,
1954 };
1955
1956 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1957 {
1958         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1959         if (!sdp->debugfs_dir)
1960                 return -ENOMEM;
1961         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1962                                                          S_IFREG | S_IRUGO,
1963                                                          sdp->debugfs_dir, sdp,
1964                                                          &gfs2_debug_fops);
1965         if (!sdp->debugfs_dentry_glocks)
1966                 return -ENOMEM;
1967
1968         return 0;
1969 }
1970
1971 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1972 {
1973         if (sdp && sdp->debugfs_dir) {
1974                 if (sdp->debugfs_dentry_glocks) {
1975                         debugfs_remove(sdp->debugfs_dentry_glocks);
1976                         sdp->debugfs_dentry_glocks = NULL;
1977                 }
1978                 debugfs_remove(sdp->debugfs_dir);
1979                 sdp->debugfs_dir = NULL;
1980         }
1981 }
1982
1983 int gfs2_register_debugfs(void)
1984 {
1985         gfs2_root = debugfs_create_dir("gfs2", NULL);
1986         return gfs2_root ? 0 : -ENOMEM;
1987 }
1988
1989 void gfs2_unregister_debugfs(void)
1990 {
1991         debugfs_remove(gfs2_root);
1992         gfs2_root = NULL;
1993 }