[GFS2] Handle multiple glock demote requests
[safe/jmp/linux-2.6] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lm.h"
39 #include "lops.h"
40 #include "meta_io.h"
41 #include "quota.h"
42 #include "super.h"
43 #include "util.h"
44
45 struct gfs2_gl_hash_bucket {
46         struct hlist_head hb_list;
47 };
48
49 struct glock_iter {
50         int hash;                     /* hash bucket index         */
51         struct gfs2_sbd *sdp;         /* incore superblock         */
52         struct gfs2_glock *gl;        /* current glock struct      */
53         struct seq_file *seq;         /* sequence file for debugfs */
54         char string[512];             /* scratch space             */
55 };
56
57 typedef void (*glock_examiner) (struct gfs2_glock * gl);
58
59 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
60 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
61 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
62 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
63 static void run_queue(struct gfs2_glock *gl);
64
65 static DECLARE_RWSEM(gfs2_umount_flush_sem);
66 static struct dentry *gfs2_root;
67 static struct task_struct *scand_process;
68 static unsigned int scand_secs = 5;
69 static struct workqueue_struct *glock_workqueue;
70
71 #define GFS2_GL_HASH_SHIFT      15
72 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
73 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
74
75 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
76 static struct dentry *gfs2_root;
77
78 /*
79  * Despite what you might think, the numbers below are not arbitrary :-)
80  * They are taken from the ipv4 routing hash code, which is well tested
81  * and thus should be nearly optimal. Later on we might tweek the numbers
82  * but for now this should be fine.
83  *
84  * The reason for putting the locks in a separate array from the list heads
85  * is that we can have fewer locks than list heads and save memory. We use
86  * the same hash function for both, but with a different hash mask.
87  */
88 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
89         defined(CONFIG_PROVE_LOCKING)
90
91 #ifdef CONFIG_LOCKDEP
92 # define GL_HASH_LOCK_SZ        256
93 #else
94 # if NR_CPUS >= 32
95 #  define GL_HASH_LOCK_SZ       4096
96 # elif NR_CPUS >= 16
97 #  define GL_HASH_LOCK_SZ       2048
98 # elif NR_CPUS >= 8
99 #  define GL_HASH_LOCK_SZ       1024
100 # elif NR_CPUS >= 4
101 #  define GL_HASH_LOCK_SZ       512
102 # else
103 #  define GL_HASH_LOCK_SZ       256
104 # endif
105 #endif
106
107 /* We never want more locks than chains */
108 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
109 # undef GL_HASH_LOCK_SZ
110 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
111 #endif
112
113 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
114
115 static inline rwlock_t *gl_lock_addr(unsigned int x)
116 {
117         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
118 }
119 #else /* not SMP, so no spinlocks required */
120 static inline rwlock_t *gl_lock_addr(unsigned int x)
121 {
122         return NULL;
123 }
124 #endif
125
126 /**
127  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
128  * @actual: the current state of the lock
129  * @requested: the lock state that was requested by the caller
130  * @flags: the modifier flags passed in by the caller
131  *
132  * Returns: 1 if the locks are compatible, 0 otherwise
133  */
134
135 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
136                                    int flags)
137 {
138         if (actual == requested)
139                 return 1;
140
141         if (flags & GL_EXACT)
142                 return 0;
143
144         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
145                 return 1;
146
147         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
148                 return 1;
149
150         return 0;
151 }
152
153 /**
154  * gl_hash() - Turn glock number into hash bucket number
155  * @lock: The glock number
156  *
157  * Returns: The number of the corresponding hash bucket
158  */
159
160 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
161                             const struct lm_lockname *name)
162 {
163         unsigned int h;
164
165         h = jhash(&name->ln_number, sizeof(u64), 0);
166         h = jhash(&name->ln_type, sizeof(unsigned int), h);
167         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
168         h &= GFS2_GL_HASH_MASK;
169
170         return h;
171 }
172
173 /**
174  * glock_free() - Perform a few checks and then release struct gfs2_glock
175  * @gl: The glock to release
176  *
177  * Also calls lock module to release its internal structure for this glock.
178  *
179  */
180
181 static void glock_free(struct gfs2_glock *gl)
182 {
183         struct gfs2_sbd *sdp = gl->gl_sbd;
184         struct inode *aspace = gl->gl_aspace;
185
186         gfs2_lm_put_lock(sdp, gl->gl_lock);
187
188         if (aspace)
189                 gfs2_aspace_put(aspace);
190
191         kmem_cache_free(gfs2_glock_cachep, gl);
192 }
193
194 /**
195  * gfs2_glock_hold() - increment reference count on glock
196  * @gl: The glock to hold
197  *
198  */
199
200 void gfs2_glock_hold(struct gfs2_glock *gl)
201 {
202         atomic_inc(&gl->gl_ref);
203 }
204
205 /**
206  * gfs2_glock_put() - Decrement reference count on glock
207  * @gl: The glock to put
208  *
209  */
210
211 int gfs2_glock_put(struct gfs2_glock *gl)
212 {
213         int rv = 0;
214         struct gfs2_sbd *sdp = gl->gl_sbd;
215
216         write_lock(gl_lock_addr(gl->gl_hash));
217         if (atomic_dec_and_test(&gl->gl_ref)) {
218                 hlist_del(&gl->gl_list);
219                 write_unlock(gl_lock_addr(gl->gl_hash));
220                 BUG_ON(spin_is_locked(&gl->gl_spin));
221                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
222                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
223                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
224                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
225                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
226                 glock_free(gl);
227                 rv = 1;
228                 goto out;
229         }
230         write_unlock(gl_lock_addr(gl->gl_hash));
231 out:
232         return rv;
233 }
234
235 /**
236  * search_bucket() - Find struct gfs2_glock by lock number
237  * @bucket: the bucket to search
238  * @name: The lock name
239  *
240  * Returns: NULL, or the struct gfs2_glock with the requested number
241  */
242
243 static struct gfs2_glock *search_bucket(unsigned int hash,
244                                         const struct gfs2_sbd *sdp,
245                                         const struct lm_lockname *name)
246 {
247         struct gfs2_glock *gl;
248         struct hlist_node *h;
249
250         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
251                 if (!lm_name_equal(&gl->gl_name, name))
252                         continue;
253                 if (gl->gl_sbd != sdp)
254                         continue;
255
256                 atomic_inc(&gl->gl_ref);
257
258                 return gl;
259         }
260
261         return NULL;
262 }
263
264 /**
265  * gfs2_glock_find() - Find glock by lock number
266  * @sdp: The GFS2 superblock
267  * @name: The lock name
268  *
269  * Returns: NULL, or the struct gfs2_glock with the requested number
270  */
271
272 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
273                                           const struct lm_lockname *name)
274 {
275         unsigned int hash = gl_hash(sdp, name);
276         struct gfs2_glock *gl;
277
278         read_lock(gl_lock_addr(hash));
279         gl = search_bucket(hash, sdp, name);
280         read_unlock(gl_lock_addr(hash));
281
282         return gl;
283 }
284
285 static void glock_work_func(struct work_struct *work)
286 {
287         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
288
289         spin_lock(&gl->gl_spin);
290         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
291                 set_bit(GLF_DEMOTE, &gl->gl_flags);
292         run_queue(gl);
293         spin_unlock(&gl->gl_spin);
294         gfs2_glock_put(gl);
295 }
296
297 /**
298  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
299  * @sdp: The GFS2 superblock
300  * @number: the lock number
301  * @glops: The glock_operations to use
302  * @create: If 0, don't create the glock if it doesn't exist
303  * @glp: the glock is returned here
304  *
305  * This does not lock a glock, just finds/creates structures for one.
306  *
307  * Returns: errno
308  */
309
310 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
311                    const struct gfs2_glock_operations *glops, int create,
312                    struct gfs2_glock **glp)
313 {
314         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
315         struct gfs2_glock *gl, *tmp;
316         unsigned int hash = gl_hash(sdp, &name);
317         int error;
318
319         read_lock(gl_lock_addr(hash));
320         gl = search_bucket(hash, sdp, &name);
321         read_unlock(gl_lock_addr(hash));
322
323         if (gl || !create) {
324                 *glp = gl;
325                 return 0;
326         }
327
328         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
329         if (!gl)
330                 return -ENOMEM;
331
332         gl->gl_flags = 0;
333         gl->gl_name = name;
334         atomic_set(&gl->gl_ref, 1);
335         gl->gl_state = LM_ST_UNLOCKED;
336         gl->gl_demote_state = LM_ST_EXCLUSIVE;
337         gl->gl_hash = hash;
338         gl->gl_owner_pid = 0;
339         gl->gl_ip = 0;
340         gl->gl_ops = glops;
341         gl->gl_req_gh = NULL;
342         gl->gl_req_bh = NULL;
343         gl->gl_vn = 0;
344         gl->gl_stamp = jiffies;
345         gl->gl_tchange = jiffies;
346         gl->gl_object = NULL;
347         gl->gl_sbd = sdp;
348         gl->gl_aspace = NULL;
349         lops_init_le(&gl->gl_le, &gfs2_glock_lops);
350         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
351
352         /* If this glock protects actual on-disk data or metadata blocks,
353            create a VFS inode to manage the pages/buffers holding them. */
354         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
355                 gl->gl_aspace = gfs2_aspace_get(sdp);
356                 if (!gl->gl_aspace) {
357                         error = -ENOMEM;
358                         goto fail;
359                 }
360         }
361
362         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
363         if (error)
364                 goto fail_aspace;
365
366         write_lock(gl_lock_addr(hash));
367         tmp = search_bucket(hash, sdp, &name);
368         if (tmp) {
369                 write_unlock(gl_lock_addr(hash));
370                 glock_free(gl);
371                 gl = tmp;
372         } else {
373                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
374                 write_unlock(gl_lock_addr(hash));
375         }
376
377         *glp = gl;
378
379         return 0;
380
381 fail_aspace:
382         if (gl->gl_aspace)
383                 gfs2_aspace_put(gl->gl_aspace);
384 fail:
385         kmem_cache_free(gfs2_glock_cachep, gl);
386         return error;
387 }
388
389 /**
390  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
391  * @gl: the glock
392  * @state: the state we're requesting
393  * @flags: the modifier flags
394  * @gh: the holder structure
395  *
396  */
397
398 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
399                       struct gfs2_holder *gh)
400 {
401         INIT_LIST_HEAD(&gh->gh_list);
402         gh->gh_gl = gl;
403         gh->gh_ip = (unsigned long)__builtin_return_address(0);
404         gh->gh_owner_pid = current->pid;
405         gh->gh_state = state;
406         gh->gh_flags = flags;
407         gh->gh_error = 0;
408         gh->gh_iflags = 0;
409         gfs2_glock_hold(gl);
410 }
411
412 /**
413  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
414  * @state: the state we're requesting
415  * @flags: the modifier flags
416  * @gh: the holder structure
417  *
418  * Don't mess with the glock.
419  *
420  */
421
422 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
423 {
424         gh->gh_state = state;
425         gh->gh_flags = flags;
426         gh->gh_iflags = 0;
427         gh->gh_ip = (unsigned long)__builtin_return_address(0);
428 }
429
430 /**
431  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
432  * @gh: the holder structure
433  *
434  */
435
436 void gfs2_holder_uninit(struct gfs2_holder *gh)
437 {
438         gfs2_glock_put(gh->gh_gl);
439         gh->gh_gl = NULL;
440         gh->gh_ip = 0;
441 }
442
443 static void gfs2_holder_wake(struct gfs2_holder *gh)
444 {
445         clear_bit(HIF_WAIT, &gh->gh_iflags);
446         smp_mb__after_clear_bit();
447         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
448 }
449
450 static int just_schedule(void *word)
451 {
452         schedule();
453         return 0;
454 }
455
456 static void wait_on_holder(struct gfs2_holder *gh)
457 {
458         might_sleep();
459         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
460 }
461
462 static void gfs2_demote_wake(struct gfs2_glock *gl)
463 {
464         BUG_ON(!spin_is_locked(&gl->gl_spin));
465         gl->gl_demote_state = LM_ST_EXCLUSIVE;
466         clear_bit(GLF_DEMOTE, &gl->gl_flags);
467         smp_mb__after_clear_bit();
468         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
469 }
470
471 static void wait_on_demote(struct gfs2_glock *gl)
472 {
473         might_sleep();
474         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
475 }
476
477 /**
478  * rq_mutex - process a mutex request in the queue
479  * @gh: the glock holder
480  *
481  * Returns: 1 if the queue is blocked
482  */
483
484 static int rq_mutex(struct gfs2_holder *gh)
485 {
486         struct gfs2_glock *gl = gh->gh_gl;
487
488         list_del_init(&gh->gh_list);
489         /*  gh->gh_error never examined.  */
490         set_bit(GLF_LOCK, &gl->gl_flags);
491         clear_bit(HIF_WAIT, &gh->gh_iflags);
492         smp_mb();
493         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
494
495         return 1;
496 }
497
498 /**
499  * rq_promote - process a promote request in the queue
500  * @gh: the glock holder
501  *
502  * Acquire a new inter-node lock, or change a lock state to more restrictive.
503  *
504  * Returns: 1 if the queue is blocked
505  */
506
507 static int rq_promote(struct gfs2_holder *gh)
508 {
509         struct gfs2_glock *gl = gh->gh_gl;
510         struct gfs2_sbd *sdp = gl->gl_sbd;
511
512         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
513                 if (list_empty(&gl->gl_holders)) {
514                         gl->gl_req_gh = gh;
515                         set_bit(GLF_LOCK, &gl->gl_flags);
516                         spin_unlock(&gl->gl_spin);
517
518                         if (atomic_read(&sdp->sd_reclaim_count) >
519                             gfs2_tune_get(sdp, gt_reclaim_limit) &&
520                             !(gh->gh_flags & LM_FLAG_PRIORITY)) {
521                                 gfs2_reclaim_glock(sdp);
522                                 gfs2_reclaim_glock(sdp);
523                         }
524
525                         gfs2_glock_xmote_th(gh->gh_gl, gh);
526                         spin_lock(&gl->gl_spin);
527                 }
528                 return 1;
529         }
530
531         if (list_empty(&gl->gl_holders)) {
532                 set_bit(HIF_FIRST, &gh->gh_iflags);
533                 set_bit(GLF_LOCK, &gl->gl_flags);
534         } else {
535                 struct gfs2_holder *next_gh;
536                 if (gh->gh_state == LM_ST_EXCLUSIVE)
537                         return 1;
538                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
539                                      gh_list);
540                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
541                          return 1;
542         }
543
544         list_move_tail(&gh->gh_list, &gl->gl_holders);
545         gh->gh_error = 0;
546         set_bit(HIF_HOLDER, &gh->gh_iflags);
547
548         gfs2_holder_wake(gh);
549
550         return 0;
551 }
552
553 /**
554  * rq_demote - process a demote request in the queue
555  * @gh: the glock holder
556  *
557  * Returns: 1 if the queue is blocked
558  */
559
560 static int rq_demote(struct gfs2_glock *gl)
561 {
562         if (!list_empty(&gl->gl_holders))
563                 return 1;
564
565         if (gl->gl_state == gl->gl_demote_state ||
566             gl->gl_state == LM_ST_UNLOCKED) {
567                 gfs2_demote_wake(gl);
568                 return 0;
569         }
570
571         set_bit(GLF_LOCK, &gl->gl_flags);
572         set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
573
574         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
575             gl->gl_state != LM_ST_EXCLUSIVE) {
576                 spin_unlock(&gl->gl_spin);
577                 gfs2_glock_drop_th(gl);
578         } else {
579                 spin_unlock(&gl->gl_spin);
580                 gfs2_glock_xmote_th(gl, NULL);
581         }
582
583         spin_lock(&gl->gl_spin);
584         clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
585
586         return 0;
587 }
588
589 /**
590  * run_queue - process holder structures on a glock
591  * @gl: the glock
592  *
593  */
594 static void run_queue(struct gfs2_glock *gl)
595 {
596         struct gfs2_holder *gh;
597         int blocked = 1;
598
599         for (;;) {
600                 if (test_bit(GLF_LOCK, &gl->gl_flags))
601                         break;
602
603                 if (!list_empty(&gl->gl_waiters1)) {
604                         gh = list_entry(gl->gl_waiters1.next,
605                                         struct gfs2_holder, gh_list);
606
607                         if (test_bit(HIF_MUTEX, &gh->gh_iflags))
608                                 blocked = rq_mutex(gh);
609                         else
610                                 gfs2_assert_warn(gl->gl_sbd, 0);
611
612                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
613                         blocked = rq_demote(gl);
614                         if (gl->gl_waiters2 && !blocked) {
615                                 set_bit(GLF_DEMOTE, &gl->gl_flags);
616                                 gl->gl_demote_state = LM_ST_UNLOCKED;
617                         }
618                         gl->gl_waiters2 = 0;
619                 } else if (!list_empty(&gl->gl_waiters3)) {
620                         gh = list_entry(gl->gl_waiters3.next,
621                                         struct gfs2_holder, gh_list);
622
623                         if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
624                                 blocked = rq_promote(gh);
625                         else
626                                 gfs2_assert_warn(gl->gl_sbd, 0);
627
628                 } else
629                         break;
630
631                 if (blocked)
632                         break;
633         }
634 }
635
636 /**
637  * gfs2_glmutex_lock - acquire a local lock on a glock
638  * @gl: the glock
639  *
640  * Gives caller exclusive access to manipulate a glock structure.
641  */
642
643 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
644 {
645         struct gfs2_holder gh;
646
647         gfs2_holder_init(gl, 0, 0, &gh);
648         set_bit(HIF_MUTEX, &gh.gh_iflags);
649         if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
650                 BUG();
651
652         spin_lock(&gl->gl_spin);
653         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
654                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
655         } else {
656                 gl->gl_owner_pid = current->pid;
657                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
658                 clear_bit(HIF_WAIT, &gh.gh_iflags);
659                 smp_mb();
660                 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
661         }
662         spin_unlock(&gl->gl_spin);
663
664         wait_on_holder(&gh);
665         gfs2_holder_uninit(&gh);
666 }
667
668 /**
669  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
670  * @gl: the glock
671  *
672  * Returns: 1 if the glock is acquired
673  */
674
675 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
676 {
677         int acquired = 1;
678
679         spin_lock(&gl->gl_spin);
680         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
681                 acquired = 0;
682         } else {
683                 gl->gl_owner_pid = current->pid;
684                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
685         }
686         spin_unlock(&gl->gl_spin);
687
688         return acquired;
689 }
690
691 /**
692  * gfs2_glmutex_unlock - release a local lock on a glock
693  * @gl: the glock
694  *
695  */
696
697 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
698 {
699         spin_lock(&gl->gl_spin);
700         clear_bit(GLF_LOCK, &gl->gl_flags);
701         gl->gl_owner_pid = 0;
702         gl->gl_ip = 0;
703         run_queue(gl);
704         BUG_ON(!spin_is_locked(&gl->gl_spin));
705         spin_unlock(&gl->gl_spin);
706 }
707
708 /**
709  * handle_callback - process a demote request
710  * @gl: the glock
711  * @state: the state the caller wants us to change to
712  *
713  * There are only two requests that we are going to see in actual
714  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
715  */
716
717 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
718                             int remote, unsigned long delay)
719 {
720         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
721
722         spin_lock(&gl->gl_spin);
723         set_bit(bit, &gl->gl_flags);
724         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
725                 gl->gl_demote_state = state;
726                 gl->gl_demote_time = jiffies;
727                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
728                     gl->gl_object) {
729                         gfs2_glock_schedule_for_reclaim(gl);
730                         spin_unlock(&gl->gl_spin);
731                         return;
732                 }
733         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
734                         gl->gl_demote_state != state) {
735                 if (test_bit(GLF_DEMOTE_IN_PROGRESS,  &gl->gl_flags)) 
736                         gl->gl_waiters2 = 1;
737                 else 
738                         gl->gl_demote_state = LM_ST_UNLOCKED;
739         }
740         spin_unlock(&gl->gl_spin);
741 }
742
743 /**
744  * state_change - record that the glock is now in a different state
745  * @gl: the glock
746  * @new_state the new state
747  *
748  */
749
750 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
751 {
752         int held1, held2;
753
754         held1 = (gl->gl_state != LM_ST_UNLOCKED);
755         held2 = (new_state != LM_ST_UNLOCKED);
756
757         if (held1 != held2) {
758                 if (held2)
759                         gfs2_glock_hold(gl);
760                 else
761                         gfs2_glock_put(gl);
762         }
763
764         gl->gl_state = new_state;
765         gl->gl_tchange = jiffies;
766 }
767
768 /**
769  * xmote_bh - Called after the lock module is done acquiring a lock
770  * @gl: The glock in question
771  * @ret: the int returned from the lock module
772  *
773  */
774
775 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
776 {
777         struct gfs2_sbd *sdp = gl->gl_sbd;
778         const struct gfs2_glock_operations *glops = gl->gl_ops;
779         struct gfs2_holder *gh = gl->gl_req_gh;
780         int prev_state = gl->gl_state;
781         int op_done = 1;
782
783         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
784         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
785         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
786
787         state_change(gl, ret & LM_OUT_ST_MASK);
788
789         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
790                 if (glops->go_inval)
791                         glops->go_inval(gl, DIO_METADATA);
792         } else if (gl->gl_state == LM_ST_DEFERRED) {
793                 /* We might not want to do this here.
794                    Look at moving to the inode glops. */
795                 if (glops->go_inval)
796                         glops->go_inval(gl, 0);
797         }
798
799         /*  Deal with each possible exit condition  */
800
801         if (!gh) {
802                 gl->gl_stamp = jiffies;
803                 if (ret & LM_OUT_CANCELED) {
804                         op_done = 0;
805                 } else {
806                         spin_lock(&gl->gl_spin);
807                         if (gl->gl_state != gl->gl_demote_state) {
808                                 gl->gl_req_bh = NULL;
809                                 spin_unlock(&gl->gl_spin);
810                                 gfs2_glock_drop_th(gl);
811                                 gfs2_glock_put(gl);
812                                 return;
813                         }
814                         gfs2_demote_wake(gl);
815                         spin_unlock(&gl->gl_spin);
816                 }
817         } else {
818                 spin_lock(&gl->gl_spin);
819                 list_del_init(&gh->gh_list);
820                 gh->gh_error = -EIO;
821                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
822                         goto out;
823                 gh->gh_error = GLR_CANCELED;
824                 if (ret & LM_OUT_CANCELED) 
825                         goto out;
826                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
827                         list_add_tail(&gh->gh_list, &gl->gl_holders);
828                         gh->gh_error = 0;
829                         set_bit(HIF_HOLDER, &gh->gh_iflags);
830                         set_bit(HIF_FIRST, &gh->gh_iflags);
831                         op_done = 0;
832                         goto out;
833                 }
834                 gh->gh_error = GLR_TRYFAILED;
835                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
836                         goto out;
837                 gh->gh_error = -EINVAL;
838                 if (gfs2_assert_withdraw(sdp, 0) == -1)
839                         fs_err(sdp, "ret = 0x%.8X\n", ret);
840 out:
841                 spin_unlock(&gl->gl_spin);
842         }
843
844         if (glops->go_xmote_bh)
845                 glops->go_xmote_bh(gl);
846
847         if (op_done) {
848                 spin_lock(&gl->gl_spin);
849                 gl->gl_req_gh = NULL;
850                 gl->gl_req_bh = NULL;
851                 clear_bit(GLF_LOCK, &gl->gl_flags);
852                 spin_unlock(&gl->gl_spin);
853         }
854
855         gfs2_glock_put(gl);
856
857         if (gh)
858                 gfs2_holder_wake(gh);
859 }
860
861 /**
862  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
863  * @gl: The glock in question
864  * @state: the requested state
865  * @flags: modifier flags to the lock call
866  *
867  */
868
869 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
870 {
871         struct gfs2_sbd *sdp = gl->gl_sbd;
872         int flags = gh ? gh->gh_flags : 0;
873         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
874         const struct gfs2_glock_operations *glops = gl->gl_ops;
875         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
876                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
877                                  LM_FLAG_PRIORITY);
878         unsigned int lck_ret;
879
880         if (glops->go_xmote_th)
881                 glops->go_xmote_th(gl);
882
883         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
884         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
885         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
886         gfs2_assert_warn(sdp, state != gl->gl_state);
887
888         gfs2_glock_hold(gl);
889         gl->gl_req_bh = xmote_bh;
890
891         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
892
893         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
894                 return;
895
896         if (lck_ret & LM_OUT_ASYNC)
897                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
898         else
899                 xmote_bh(gl, lck_ret);
900 }
901
902 /**
903  * drop_bh - Called after a lock module unlock completes
904  * @gl: the glock
905  * @ret: the return status
906  *
907  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
908  * Doesn't drop the reference on the glock the top half took out
909  *
910  */
911
912 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
913 {
914         struct gfs2_sbd *sdp = gl->gl_sbd;
915         const struct gfs2_glock_operations *glops = gl->gl_ops;
916         struct gfs2_holder *gh = gl->gl_req_gh;
917
918         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
919         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
920         gfs2_assert_warn(sdp, !ret);
921
922         state_change(gl, LM_ST_UNLOCKED);
923
924         if (glops->go_inval)
925                 glops->go_inval(gl, DIO_METADATA);
926
927         if (gh) {
928                 spin_lock(&gl->gl_spin);
929                 list_del_init(&gh->gh_list);
930                 gh->gh_error = 0;
931                 spin_unlock(&gl->gl_spin);
932         }
933
934         spin_lock(&gl->gl_spin);
935         gfs2_demote_wake(gl);
936         gl->gl_req_gh = NULL;
937         gl->gl_req_bh = NULL;
938         clear_bit(GLF_LOCK, &gl->gl_flags);
939         spin_unlock(&gl->gl_spin);
940
941         gfs2_glock_put(gl);
942
943         if (gh)
944                 gfs2_holder_wake(gh);
945 }
946
947 /**
948  * gfs2_glock_drop_th - call into the lock module to unlock a lock
949  * @gl: the glock
950  *
951  */
952
953 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
954 {
955         struct gfs2_sbd *sdp = gl->gl_sbd;
956         const struct gfs2_glock_operations *glops = gl->gl_ops;
957         unsigned int ret;
958
959         if (glops->go_drop_th)
960                 glops->go_drop_th(gl);
961
962         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
963         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
964         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
965
966         gfs2_glock_hold(gl);
967         gl->gl_req_bh = drop_bh;
968
969         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
970
971         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
972                 return;
973
974         if (!ret)
975                 drop_bh(gl, ret);
976         else
977                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
978 }
979
980 /**
981  * do_cancels - cancel requests for locks stuck waiting on an expire flag
982  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
983  *
984  * Don't cancel GL_NOCANCEL requests.
985  */
986
987 static void do_cancels(struct gfs2_holder *gh)
988 {
989         struct gfs2_glock *gl = gh->gh_gl;
990
991         spin_lock(&gl->gl_spin);
992
993         while (gl->gl_req_gh != gh &&
994                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
995                !list_empty(&gh->gh_list)) {
996                 if (gl->gl_req_bh && !(gl->gl_req_gh &&
997                                      (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
998                         spin_unlock(&gl->gl_spin);
999                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1000                         msleep(100);
1001                         spin_lock(&gl->gl_spin);
1002                 } else {
1003                         spin_unlock(&gl->gl_spin);
1004                         msleep(100);
1005                         spin_lock(&gl->gl_spin);
1006                 }
1007         }
1008
1009         spin_unlock(&gl->gl_spin);
1010 }
1011
1012 /**
1013  * glock_wait_internal - wait on a glock acquisition
1014  * @gh: the glock holder
1015  *
1016  * Returns: 0 on success
1017  */
1018
1019 static int glock_wait_internal(struct gfs2_holder *gh)
1020 {
1021         struct gfs2_glock *gl = gh->gh_gl;
1022         struct gfs2_sbd *sdp = gl->gl_sbd;
1023         const struct gfs2_glock_operations *glops = gl->gl_ops;
1024
1025         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1026                 return -EIO;
1027
1028         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1029                 spin_lock(&gl->gl_spin);
1030                 if (gl->gl_req_gh != gh &&
1031                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1032                     !list_empty(&gh->gh_list)) {
1033                         list_del_init(&gh->gh_list);
1034                         gh->gh_error = GLR_TRYFAILED;
1035                         run_queue(gl);
1036                         spin_unlock(&gl->gl_spin);
1037                         return gh->gh_error;
1038                 }
1039                 spin_unlock(&gl->gl_spin);
1040         }
1041
1042         if (gh->gh_flags & LM_FLAG_PRIORITY)
1043                 do_cancels(gh);
1044
1045         wait_on_holder(gh);
1046         if (gh->gh_error)
1047                 return gh->gh_error;
1048
1049         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1050         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1051                                                    gh->gh_flags));
1052
1053         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1054                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1055
1056                 if (glops->go_lock) {
1057                         gh->gh_error = glops->go_lock(gh);
1058                         if (gh->gh_error) {
1059                                 spin_lock(&gl->gl_spin);
1060                                 list_del_init(&gh->gh_list);
1061                                 spin_unlock(&gl->gl_spin);
1062                         }
1063                 }
1064
1065                 spin_lock(&gl->gl_spin);
1066                 gl->gl_req_gh = NULL;
1067                 gl->gl_req_bh = NULL;
1068                 clear_bit(GLF_LOCK, &gl->gl_flags);
1069                 run_queue(gl);
1070                 spin_unlock(&gl->gl_spin);
1071         }
1072
1073         return gh->gh_error;
1074 }
1075
1076 static inline struct gfs2_holder *
1077 find_holder_by_owner(struct list_head *head, pid_t pid)
1078 {
1079         struct gfs2_holder *gh;
1080
1081         list_for_each_entry(gh, head, gh_list) {
1082                 if (gh->gh_owner_pid == pid)
1083                         return gh;
1084         }
1085
1086         return NULL;
1087 }
1088
1089 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1090 {
1091         va_list args;
1092
1093         va_start(args, fmt);
1094         if (gi) {
1095                 vsprintf(gi->string, fmt, args);
1096                 seq_printf(gi->seq, gi->string);
1097         }
1098         else
1099                 vprintk(fmt, args);
1100         va_end(args);
1101 }
1102
1103 /**
1104  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1105  * @gh: the holder structure to add
1106  *
1107  */
1108
1109 static void add_to_queue(struct gfs2_holder *gh)
1110 {
1111         struct gfs2_glock *gl = gh->gh_gl;
1112         struct gfs2_holder *existing;
1113
1114         BUG_ON(!gh->gh_owner_pid);
1115         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1116                 BUG();
1117
1118         if (!(gh->gh_flags & GL_FLOCK)) {
1119                 existing = find_holder_by_owner(&gl->gl_holders, 
1120                                                 gh->gh_owner_pid);
1121                 if (existing) {
1122                         print_symbol(KERN_WARNING "original: %s\n", 
1123                                      existing->gh_ip);
1124                         printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1125                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1126                                existing->gh_gl->gl_name.ln_type, 
1127                                existing->gh_gl->gl_state);
1128                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1129                         printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1130                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1131                                gl->gl_name.ln_type, gl->gl_state);
1132                         BUG();
1133                 }
1134                 
1135                 existing = find_holder_by_owner(&gl->gl_waiters3, 
1136                                                 gh->gh_owner_pid);
1137                 if (existing) {
1138                         print_symbol(KERN_WARNING "original: %s\n", 
1139                                      existing->gh_ip);
1140                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1141                         BUG();
1142                 }
1143         }
1144
1145         if (gh->gh_flags & LM_FLAG_PRIORITY)
1146                 list_add(&gh->gh_list, &gl->gl_waiters3);
1147         else
1148                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1149 }
1150
1151 /**
1152  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1153  * @gh: the holder structure
1154  *
1155  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1156  *
1157  * Returns: 0, GLR_TRYFAILED, or errno on failure
1158  */
1159
1160 int gfs2_glock_nq(struct gfs2_holder *gh)
1161 {
1162         struct gfs2_glock *gl = gh->gh_gl;
1163         struct gfs2_sbd *sdp = gl->gl_sbd;
1164         int error = 0;
1165
1166 restart:
1167         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1168                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1169                 return -EIO;
1170         }
1171
1172         set_bit(HIF_PROMOTE, &gh->gh_iflags);
1173
1174         spin_lock(&gl->gl_spin);
1175         add_to_queue(gh);
1176         run_queue(gl);
1177         spin_unlock(&gl->gl_spin);
1178
1179         if (!(gh->gh_flags & GL_ASYNC)) {
1180                 error = glock_wait_internal(gh);
1181                 if (error == GLR_CANCELED) {
1182                         msleep(100);
1183                         goto restart;
1184                 }
1185         }
1186
1187         return error;
1188 }
1189
1190 /**
1191  * gfs2_glock_poll - poll to see if an async request has been completed
1192  * @gh: the holder
1193  *
1194  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1195  */
1196
1197 int gfs2_glock_poll(struct gfs2_holder *gh)
1198 {
1199         struct gfs2_glock *gl = gh->gh_gl;
1200         int ready = 0;
1201
1202         spin_lock(&gl->gl_spin);
1203
1204         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1205                 ready = 1;
1206         else if (list_empty(&gh->gh_list)) {
1207                 if (gh->gh_error == GLR_CANCELED) {
1208                         spin_unlock(&gl->gl_spin);
1209                         msleep(100);
1210                         if (gfs2_glock_nq(gh))
1211                                 return 1;
1212                         return 0;
1213                 } else
1214                         ready = 1;
1215         }
1216
1217         spin_unlock(&gl->gl_spin);
1218
1219         return ready;
1220 }
1221
1222 /**
1223  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1224  * @gh: the holder structure
1225  *
1226  * Returns: 0, GLR_TRYFAILED, or errno on failure
1227  */
1228
1229 int gfs2_glock_wait(struct gfs2_holder *gh)
1230 {
1231         int error;
1232
1233         error = glock_wait_internal(gh);
1234         if (error == GLR_CANCELED) {
1235                 msleep(100);
1236                 gh->gh_flags &= ~GL_ASYNC;
1237                 error = gfs2_glock_nq(gh);
1238         }
1239
1240         return error;
1241 }
1242
1243 /**
1244  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1245  * @gh: the glock holder
1246  *
1247  */
1248
1249 void gfs2_glock_dq(struct gfs2_holder *gh)
1250 {
1251         struct gfs2_glock *gl = gh->gh_gl;
1252         const struct gfs2_glock_operations *glops = gl->gl_ops;
1253         unsigned delay = 0;
1254
1255         if (gh->gh_flags & GL_NOCACHE)
1256                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1257
1258         gfs2_glmutex_lock(gl);
1259
1260         spin_lock(&gl->gl_spin);
1261         list_del_init(&gh->gh_list);
1262
1263         if (list_empty(&gl->gl_holders)) {
1264                 spin_unlock(&gl->gl_spin);
1265
1266                 if (glops->go_unlock)
1267                         glops->go_unlock(gh);
1268
1269                 spin_lock(&gl->gl_spin);
1270                 gl->gl_stamp = jiffies;
1271         }
1272
1273         clear_bit(GLF_LOCK, &gl->gl_flags);
1274         spin_unlock(&gl->gl_spin);
1275
1276         gfs2_glock_hold(gl);
1277         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1278             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1279                 delay = gl->gl_ops->go_min_hold_time;
1280         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1281                 gfs2_glock_put(gl);
1282 }
1283
1284 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1285 {
1286         struct gfs2_glock *gl = gh->gh_gl;
1287         gfs2_glock_dq(gh);
1288         wait_on_demote(gl);
1289 }
1290
1291 /**
1292  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1293  * @gh: the holder structure
1294  *
1295  */
1296
1297 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1298 {
1299         gfs2_glock_dq(gh);
1300         gfs2_holder_uninit(gh);
1301 }
1302
1303 /**
1304  * gfs2_glock_nq_num - acquire a glock based on lock number
1305  * @sdp: the filesystem
1306  * @number: the lock number
1307  * @glops: the glock operations for the type of glock
1308  * @state: the state to acquire the glock in
1309  * @flags: modifier flags for the aquisition
1310  * @gh: the struct gfs2_holder
1311  *
1312  * Returns: errno
1313  */
1314
1315 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1316                       const struct gfs2_glock_operations *glops,
1317                       unsigned int state, int flags, struct gfs2_holder *gh)
1318 {
1319         struct gfs2_glock *gl;
1320         int error;
1321
1322         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1323         if (!error) {
1324                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1325                 gfs2_glock_put(gl);
1326         }
1327
1328         return error;
1329 }
1330
1331 /**
1332  * glock_compare - Compare two struct gfs2_glock structures for sorting
1333  * @arg_a: the first structure
1334  * @arg_b: the second structure
1335  *
1336  */
1337
1338 static int glock_compare(const void *arg_a, const void *arg_b)
1339 {
1340         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1341         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1342         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1343         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1344
1345         if (a->ln_number > b->ln_number)
1346                 return 1;
1347         if (a->ln_number < b->ln_number)
1348                 return -1;
1349         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1350         return 0;
1351 }
1352
1353 /**
1354  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1355  * @num_gh: the number of structures
1356  * @ghs: an array of struct gfs2_holder structures
1357  *
1358  * Returns: 0 on success (all glocks acquired),
1359  *          errno on failure (no glocks acquired)
1360  */
1361
1362 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1363                      struct gfs2_holder **p)
1364 {
1365         unsigned int x;
1366         int error = 0;
1367
1368         for (x = 0; x < num_gh; x++)
1369                 p[x] = &ghs[x];
1370
1371         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1372
1373         for (x = 0; x < num_gh; x++) {
1374                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1375
1376                 error = gfs2_glock_nq(p[x]);
1377                 if (error) {
1378                         while (x--)
1379                                 gfs2_glock_dq(p[x]);
1380                         break;
1381                 }
1382         }
1383
1384         return error;
1385 }
1386
1387 /**
1388  * gfs2_glock_nq_m - acquire multiple glocks
1389  * @num_gh: the number of structures
1390  * @ghs: an array of struct gfs2_holder structures
1391  *
1392  *
1393  * Returns: 0 on success (all glocks acquired),
1394  *          errno on failure (no glocks acquired)
1395  */
1396
1397 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1398 {
1399         struct gfs2_holder *tmp[4];
1400         struct gfs2_holder **pph = tmp;
1401         int error = 0;
1402
1403         switch(num_gh) {
1404         case 0:
1405                 return 0;
1406         case 1:
1407                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1408                 return gfs2_glock_nq(ghs);
1409         default:
1410                 if (num_gh <= 4)
1411                         break;
1412                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1413                 if (!pph)
1414                         return -ENOMEM;
1415         }
1416
1417         error = nq_m_sync(num_gh, ghs, pph);
1418
1419         if (pph != tmp)
1420                 kfree(pph);
1421
1422         return error;
1423 }
1424
1425 /**
1426  * gfs2_glock_dq_m - release multiple glocks
1427  * @num_gh: the number of structures
1428  * @ghs: an array of struct gfs2_holder structures
1429  *
1430  */
1431
1432 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1433 {
1434         unsigned int x;
1435
1436         for (x = 0; x < num_gh; x++)
1437                 gfs2_glock_dq(&ghs[x]);
1438 }
1439
1440 /**
1441  * gfs2_glock_dq_uninit_m - release multiple glocks
1442  * @num_gh: the number of structures
1443  * @ghs: an array of struct gfs2_holder structures
1444  *
1445  */
1446
1447 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1448 {
1449         unsigned int x;
1450
1451         for (x = 0; x < num_gh; x++)
1452                 gfs2_glock_dq_uninit(&ghs[x]);
1453 }
1454
1455 /**
1456  * gfs2_lvb_hold - attach a LVB from a glock
1457  * @gl: The glock in question
1458  *
1459  */
1460
1461 int gfs2_lvb_hold(struct gfs2_glock *gl)
1462 {
1463         int error;
1464
1465         gfs2_glmutex_lock(gl);
1466
1467         if (!atomic_read(&gl->gl_lvb_count)) {
1468                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1469                 if (error) {
1470                         gfs2_glmutex_unlock(gl);
1471                         return error;
1472                 }
1473                 gfs2_glock_hold(gl);
1474         }
1475         atomic_inc(&gl->gl_lvb_count);
1476
1477         gfs2_glmutex_unlock(gl);
1478
1479         return 0;
1480 }
1481
1482 /**
1483  * gfs2_lvb_unhold - detach a LVB from a glock
1484  * @gl: The glock in question
1485  *
1486  */
1487
1488 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1489 {
1490         gfs2_glock_hold(gl);
1491         gfs2_glmutex_lock(gl);
1492
1493         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1494         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1495                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1496                 gl->gl_lvb = NULL;
1497                 gfs2_glock_put(gl);
1498         }
1499
1500         gfs2_glmutex_unlock(gl);
1501         gfs2_glock_put(gl);
1502 }
1503
1504 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1505                         unsigned int state)
1506 {
1507         struct gfs2_glock *gl;
1508         unsigned long delay = 0;
1509         unsigned long holdtime;
1510         unsigned long now = jiffies;
1511
1512         gl = gfs2_glock_find(sdp, name);
1513         if (!gl)
1514                 return;
1515
1516         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1517         if (time_before(now, holdtime))
1518                 delay = holdtime - now;
1519
1520         handle_callback(gl, state, 1, delay);
1521         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1522                 gfs2_glock_put(gl);
1523 }
1524
1525 /**
1526  * gfs2_glock_cb - Callback used by locking module
1527  * @sdp: Pointer to the superblock
1528  * @type: Type of callback
1529  * @data: Type dependent data pointer
1530  *
1531  * Called by the locking module when it wants to tell us something.
1532  * Either we need to drop a lock, one of our ASYNC requests completed, or
1533  * a journal from another client needs to be recovered.
1534  */
1535
1536 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1537 {
1538         struct gfs2_sbd *sdp = cb_data;
1539
1540         switch (type) {
1541         case LM_CB_NEED_E:
1542                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1543                 return;
1544
1545         case LM_CB_NEED_D:
1546                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1547                 return;
1548
1549         case LM_CB_NEED_S:
1550                 blocking_cb(sdp, data, LM_ST_SHARED);
1551                 return;
1552
1553         case LM_CB_ASYNC: {
1554                 struct lm_async_cb *async = data;
1555                 struct gfs2_glock *gl;
1556
1557                 down_read(&gfs2_umount_flush_sem);
1558                 gl = gfs2_glock_find(sdp, &async->lc_name);
1559                 if (gfs2_assert_warn(sdp, gl))
1560                         return;
1561                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1562                         gl->gl_req_bh(gl, async->lc_ret);
1563                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1564                         gfs2_glock_put(gl);
1565                 up_read(&gfs2_umount_flush_sem);
1566                 return;
1567         }
1568
1569         case LM_CB_NEED_RECOVERY:
1570                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1571                 if (sdp->sd_recoverd_process)
1572                         wake_up_process(sdp->sd_recoverd_process);
1573                 return;
1574
1575         case LM_CB_DROPLOCKS:
1576                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1577                 gfs2_quota_scan(sdp);
1578                 return;
1579
1580         default:
1581                 gfs2_assert_warn(sdp, 0);
1582                 return;
1583         }
1584 }
1585
1586 /**
1587  * demote_ok - Check to see if it's ok to unlock a glock
1588  * @gl: the glock
1589  *
1590  * Returns: 1 if it's ok
1591  */
1592
1593 static int demote_ok(struct gfs2_glock *gl)
1594 {
1595         const struct gfs2_glock_operations *glops = gl->gl_ops;
1596         int demote = 1;
1597
1598         if (test_bit(GLF_STICKY, &gl->gl_flags))
1599                 demote = 0;
1600         else if (glops->go_demote_ok)
1601                 demote = glops->go_demote_ok(gl);
1602
1603         return demote;
1604 }
1605
1606 /**
1607  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1608  * @gl: the glock
1609  *
1610  */
1611
1612 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1613 {
1614         struct gfs2_sbd *sdp = gl->gl_sbd;
1615
1616         spin_lock(&sdp->sd_reclaim_lock);
1617         if (list_empty(&gl->gl_reclaim)) {
1618                 gfs2_glock_hold(gl);
1619                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1620                 atomic_inc(&sdp->sd_reclaim_count);
1621         }
1622         spin_unlock(&sdp->sd_reclaim_lock);
1623
1624         wake_up(&sdp->sd_reclaim_wq);
1625 }
1626
1627 /**
1628  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1629  * @sdp: the filesystem
1630  *
1631  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1632  * different glock and we notice that there are a lot of glocks in the
1633  * reclaim list.
1634  *
1635  */
1636
1637 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1638 {
1639         struct gfs2_glock *gl;
1640
1641         spin_lock(&sdp->sd_reclaim_lock);
1642         if (list_empty(&sdp->sd_reclaim_list)) {
1643                 spin_unlock(&sdp->sd_reclaim_lock);
1644                 return;
1645         }
1646         gl = list_entry(sdp->sd_reclaim_list.next,
1647                         struct gfs2_glock, gl_reclaim);
1648         list_del_init(&gl->gl_reclaim);
1649         spin_unlock(&sdp->sd_reclaim_lock);
1650
1651         atomic_dec(&sdp->sd_reclaim_count);
1652         atomic_inc(&sdp->sd_reclaimed);
1653
1654         if (gfs2_glmutex_trylock(gl)) {
1655                 if (list_empty(&gl->gl_holders) &&
1656                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1657                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1658                 gfs2_glmutex_unlock(gl);
1659         }
1660
1661         gfs2_glock_put(gl);
1662 }
1663
1664 /**
1665  * examine_bucket - Call a function for glock in a hash bucket
1666  * @examiner: the function
1667  * @sdp: the filesystem
1668  * @bucket: the bucket
1669  *
1670  * Returns: 1 if the bucket has entries
1671  */
1672
1673 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1674                           unsigned int hash)
1675 {
1676         struct gfs2_glock *gl, *prev = NULL;
1677         int has_entries = 0;
1678         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1679
1680         read_lock(gl_lock_addr(hash));
1681         /* Can't use hlist_for_each_entry - don't want prefetch here */
1682         if (hlist_empty(head))
1683                 goto out;
1684         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1685         while(1) {
1686                 if (!sdp || gl->gl_sbd == sdp) {
1687                         gfs2_glock_hold(gl);
1688                         read_unlock(gl_lock_addr(hash));
1689                         if (prev)
1690                                 gfs2_glock_put(prev);
1691                         prev = gl;
1692                         examiner(gl);
1693                         has_entries = 1;
1694                         read_lock(gl_lock_addr(hash));
1695                 }
1696                 if (gl->gl_list.next == NULL)
1697                         break;
1698                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1699         }
1700 out:
1701         read_unlock(gl_lock_addr(hash));
1702         if (prev)
1703                 gfs2_glock_put(prev);
1704         cond_resched();
1705         return has_entries;
1706 }
1707
1708 /**
1709  * scan_glock - look at a glock and see if we can reclaim it
1710  * @gl: the glock to look at
1711  *
1712  */
1713
1714 static void scan_glock(struct gfs2_glock *gl)
1715 {
1716         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1717                 return;
1718
1719         if (gfs2_glmutex_trylock(gl)) {
1720                 if (list_empty(&gl->gl_holders) &&
1721                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1722                         goto out_schedule;
1723                 gfs2_glmutex_unlock(gl);
1724         }
1725         return;
1726
1727 out_schedule:
1728         gfs2_glmutex_unlock(gl);
1729         gfs2_glock_schedule_for_reclaim(gl);
1730 }
1731
1732 /**
1733  * clear_glock - look at a glock and see if we can free it from glock cache
1734  * @gl: the glock to look at
1735  *
1736  */
1737
1738 static void clear_glock(struct gfs2_glock *gl)
1739 {
1740         struct gfs2_sbd *sdp = gl->gl_sbd;
1741         int released;
1742
1743         spin_lock(&sdp->sd_reclaim_lock);
1744         if (!list_empty(&gl->gl_reclaim)) {
1745                 list_del_init(&gl->gl_reclaim);
1746                 atomic_dec(&sdp->sd_reclaim_count);
1747                 spin_unlock(&sdp->sd_reclaim_lock);
1748                 released = gfs2_glock_put(gl);
1749                 gfs2_assert(sdp, !released);
1750         } else {
1751                 spin_unlock(&sdp->sd_reclaim_lock);
1752         }
1753
1754         if (gfs2_glmutex_trylock(gl)) {
1755                 if (list_empty(&gl->gl_holders) &&
1756                     gl->gl_state != LM_ST_UNLOCKED)
1757                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1758                 gfs2_glmutex_unlock(gl);
1759         }
1760 }
1761
1762 /**
1763  * gfs2_gl_hash_clear - Empty out the glock hash table
1764  * @sdp: the filesystem
1765  * @wait: wait until it's all gone
1766  *
1767  * Called when unmounting the filesystem, or when inter-node lock manager
1768  * requests DROPLOCKS because it is running out of capacity.
1769  */
1770
1771 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1772 {
1773         unsigned long t;
1774         unsigned int x;
1775         int cont;
1776
1777         t = jiffies;
1778
1779         for (;;) {
1780                 cont = 0;
1781                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1782                         if (examine_bucket(clear_glock, sdp, x))
1783                                 cont = 1;
1784                 }
1785
1786                 if (!wait || !cont)
1787                         break;
1788
1789                 if (time_after_eq(jiffies,
1790                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1791                         fs_warn(sdp, "Unmount seems to be stalled. "
1792                                      "Dumping lock state...\n");
1793                         gfs2_dump_lockstate(sdp);
1794                         t = jiffies;
1795                 }
1796
1797                 down_write(&gfs2_umount_flush_sem);
1798                 invalidate_inodes(sdp->sd_vfs);
1799                 up_write(&gfs2_umount_flush_sem);
1800                 msleep(10);
1801         }
1802 }
1803
1804 /*
1805  *  Diagnostic routines to help debug distributed deadlock
1806  */
1807
1808 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1809                               unsigned long address)
1810 {
1811         char buffer[KSYM_SYMBOL_LEN];
1812
1813         sprint_symbol(buffer, address);
1814         print_dbg(gi, fmt, buffer);
1815 }
1816
1817 /**
1818  * dump_holder - print information about a glock holder
1819  * @str: a string naming the type of holder
1820  * @gh: the glock holder
1821  *
1822  * Returns: 0 on success, -ENOBUFS when we run out of space
1823  */
1824
1825 static int dump_holder(struct glock_iter *gi, char *str,
1826                        struct gfs2_holder *gh)
1827 {
1828         unsigned int x;
1829         struct task_struct *gh_owner;
1830
1831         print_dbg(gi, "  %s\n", str);
1832         if (gh->gh_owner_pid) {
1833                 print_dbg(gi, "    owner = %ld ", (long)gh->gh_owner_pid);
1834                 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1835                 if (gh_owner)
1836                         print_dbg(gi, "(%s)\n", gh_owner->comm);
1837                 else
1838                         print_dbg(gi, "(ended)\n");
1839         } else
1840                 print_dbg(gi, "    owner = -1\n");
1841         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1842         print_dbg(gi, "    gh_flags =");
1843         for (x = 0; x < 32; x++)
1844                 if (gh->gh_flags & (1 << x))
1845                         print_dbg(gi, " %u", x);
1846         print_dbg(gi, " \n");
1847         print_dbg(gi, "    error = %d\n", gh->gh_error);
1848         print_dbg(gi, "    gh_iflags =");
1849         for (x = 0; x < 32; x++)
1850                 if (test_bit(x, &gh->gh_iflags))
1851                         print_dbg(gi, " %u", x);
1852         print_dbg(gi, " \n");
1853         gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1854
1855         return 0;
1856 }
1857
1858 /**
1859  * dump_inode - print information about an inode
1860  * @ip: the inode
1861  *
1862  * Returns: 0 on success, -ENOBUFS when we run out of space
1863  */
1864
1865 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1866 {
1867         unsigned int x;
1868
1869         print_dbg(gi, "  Inode:\n");
1870         print_dbg(gi, "    num = %llu/%llu\n",
1871                   (unsigned long long)ip->i_no_formal_ino,
1872                   (unsigned long long)ip->i_no_addr);
1873         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1874         print_dbg(gi, "    i_flags =");
1875         for (x = 0; x < 32; x++)
1876                 if (test_bit(x, &ip->i_flags))
1877                         print_dbg(gi, " %u", x);
1878         print_dbg(gi, " \n");
1879         return 0;
1880 }
1881
1882 /**
1883  * dump_glock - print information about a glock
1884  * @gl: the glock
1885  * @count: where we are in the buffer
1886  *
1887  * Returns: 0 on success, -ENOBUFS when we run out of space
1888  */
1889
1890 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1891 {
1892         struct gfs2_holder *gh;
1893         unsigned int x;
1894         int error = -ENOBUFS;
1895         struct task_struct *gl_owner;
1896
1897         spin_lock(&gl->gl_spin);
1898
1899         print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1900                    (unsigned long long)gl->gl_name.ln_number);
1901         print_dbg(gi, "  gl_flags =");
1902         for (x = 0; x < 32; x++) {
1903                 if (test_bit(x, &gl->gl_flags))
1904                         print_dbg(gi, " %u", x);
1905         }
1906         if (!test_bit(GLF_LOCK, &gl->gl_flags))
1907                 print_dbg(gi, " (unlocked)");
1908         print_dbg(gi, " \n");
1909         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1910         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1911         if (gl->gl_owner_pid) {
1912                 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1913                 if (gl_owner)
1914                         print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1915                                   gl->gl_owner_pid, gl_owner->comm);
1916                 else
1917                         print_dbg(gi, "  gl_owner = %d (ended)\n",
1918                                   gl->gl_owner_pid);
1919         } else
1920                 print_dbg(gi, "  gl_owner = -1\n");
1921         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1922         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1923         print_dbg(gi, "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1924         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1925         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1926         print_dbg(gi, "  le = %s\n",
1927                    (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1928         print_dbg(gi, "  reclaim = %s\n",
1929                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1930         if (gl->gl_aspace)
1931                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1932                            gl->gl_aspace->i_mapping->nrpages);
1933         else
1934                 print_dbg(gi, "  aspace = no\n");
1935         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1936         if (gl->gl_req_gh) {
1937                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1938                 if (error)
1939                         goto out;
1940         }
1941         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1942                 error = dump_holder(gi, "Holder", gh);
1943                 if (error)
1944                         goto out;
1945         }
1946         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1947                 error = dump_holder(gi, "Waiter1", gh);
1948                 if (error)
1949                         goto out;
1950         }
1951         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1952                 error = dump_holder(gi, "Waiter3", gh);
1953                 if (error)
1954                         goto out;
1955         }
1956         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1957                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1958                           gl->gl_demote_state, (unsigned long long)
1959                           (jiffies - gl->gl_demote_time)*(1000000/HZ));
1960         }
1961         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1962                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1963                         list_empty(&gl->gl_holders)) {
1964                         error = dump_inode(gi, gl->gl_object);
1965                         if (error)
1966                                 goto out;
1967                 } else {
1968                         error = -ENOBUFS;
1969                         print_dbg(gi, "  Inode: busy\n");
1970                 }
1971         }
1972
1973         error = 0;
1974
1975 out:
1976         spin_unlock(&gl->gl_spin);
1977         return error;
1978 }
1979
1980 /**
1981  * gfs2_dump_lockstate - print out the current lockstate
1982  * @sdp: the filesystem
1983  * @ub: the buffer to copy the information into
1984  *
1985  * If @ub is NULL, dump the lockstate to the console.
1986  *
1987  */
1988
1989 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1990 {
1991         struct gfs2_glock *gl;
1992         struct hlist_node *h;
1993         unsigned int x;
1994         int error = 0;
1995
1996         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1997
1998                 read_lock(gl_lock_addr(x));
1999
2000                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
2001                         if (gl->gl_sbd != sdp)
2002                                 continue;
2003
2004                         error = dump_glock(NULL, gl);
2005                         if (error)
2006                                 break;
2007                 }
2008
2009                 read_unlock(gl_lock_addr(x));
2010
2011                 if (error)
2012                         break;
2013         }
2014
2015
2016         return error;
2017 }
2018
2019 /**
2020  * gfs2_scand - Look for cached glocks and inodes to toss from memory
2021  * @sdp: Pointer to GFS2 superblock
2022  *
2023  * One of these daemons runs, finding candidates to add to sd_reclaim_list.
2024  * See gfs2_glockd()
2025  */
2026
2027 static int gfs2_scand(void *data)
2028 {
2029         unsigned x;
2030         unsigned delay;
2031
2032         while (!kthread_should_stop()) {
2033                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2034                         examine_bucket(scan_glock, NULL, x);
2035                 if (freezing(current))
2036                         refrigerator();
2037                 delay = scand_secs;
2038                 if (delay < 1)
2039                         delay = 1;
2040                 schedule_timeout_interruptible(delay * HZ);
2041         }
2042
2043         return 0;
2044 }
2045
2046
2047
2048 int __init gfs2_glock_init(void)
2049 {
2050         unsigned i;
2051         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2052                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2053         }
2054 #ifdef GL_HASH_LOCK_SZ
2055         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2056                 rwlock_init(&gl_hash_locks[i]);
2057         }
2058 #endif
2059
2060         scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2061         if (IS_ERR(scand_process))
2062                 return PTR_ERR(scand_process);
2063
2064         glock_workqueue = create_workqueue("glock_workqueue");
2065         if (IS_ERR(glock_workqueue)) {
2066                 kthread_stop(scand_process);
2067                 return PTR_ERR(glock_workqueue);
2068         }
2069
2070         return 0;
2071 }
2072
2073 void gfs2_glock_exit(void)
2074 {
2075         destroy_workqueue(glock_workqueue);
2076         kthread_stop(scand_process);
2077 }
2078
2079 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2080 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2081
2082 static int gfs2_glock_iter_next(struct glock_iter *gi)
2083 {
2084         struct gfs2_glock *gl;
2085
2086 restart:
2087         read_lock(gl_lock_addr(gi->hash));
2088         gl = gi->gl;
2089         if (gl) {
2090                 gi->gl = hlist_entry(gl->gl_list.next,
2091                                      struct gfs2_glock, gl_list);
2092                 if (gi->gl)
2093                         gfs2_glock_hold(gi->gl);
2094         }
2095         read_unlock(gl_lock_addr(gi->hash));
2096         if (gl)
2097                 gfs2_glock_put(gl);
2098         if (gl && gi->gl == NULL)
2099                 gi->hash++;
2100         while(gi->gl == NULL) {
2101                 if (gi->hash >= GFS2_GL_HASH_SIZE)
2102                         return 1;
2103                 read_lock(gl_lock_addr(gi->hash));
2104                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2105                                      struct gfs2_glock, gl_list);
2106                 if (gi->gl)
2107                         gfs2_glock_hold(gi->gl);
2108                 read_unlock(gl_lock_addr(gi->hash));
2109                 gi->hash++;
2110         }
2111
2112         if (gi->sdp != gi->gl->gl_sbd)
2113                 goto restart;
2114
2115         return 0;
2116 }
2117
2118 static void gfs2_glock_iter_free(struct glock_iter *gi)
2119 {
2120         if (gi->gl)
2121                 gfs2_glock_put(gi->gl);
2122         kfree(gi);
2123 }
2124
2125 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2126 {
2127         struct glock_iter *gi;
2128
2129         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2130         if (!gi)
2131                 return NULL;
2132
2133         gi->sdp = sdp;
2134         gi->hash = 0;
2135         gi->seq = NULL;
2136         gi->gl = NULL;
2137         memset(gi->string, 0, sizeof(gi->string));
2138
2139         if (gfs2_glock_iter_next(gi)) {
2140                 gfs2_glock_iter_free(gi);
2141                 return NULL;
2142         }
2143
2144         return gi;
2145 }
2146
2147 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2148 {
2149         struct glock_iter *gi;
2150         loff_t n = *pos;
2151
2152         gi = gfs2_glock_iter_init(file->private);
2153         if (!gi)
2154                 return NULL;
2155
2156         while(n--) {
2157                 if (gfs2_glock_iter_next(gi)) {
2158                         gfs2_glock_iter_free(gi);
2159                         return NULL;
2160                 }
2161         }
2162
2163         return gi;
2164 }
2165
2166 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2167                                  loff_t *pos)
2168 {
2169         struct glock_iter *gi = iter_ptr;
2170
2171         (*pos)++;
2172
2173         if (gfs2_glock_iter_next(gi)) {
2174                 gfs2_glock_iter_free(gi);
2175                 return NULL;
2176         }
2177
2178         return gi;
2179 }
2180
2181 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2182 {
2183         struct glock_iter *gi = iter_ptr;
2184         if (gi)
2185                 gfs2_glock_iter_free(gi);
2186 }
2187
2188 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2189 {
2190         struct glock_iter *gi = iter_ptr;
2191
2192         gi->seq = file;
2193         dump_glock(gi, gi->gl);
2194
2195         return 0;
2196 }
2197
2198 static const struct seq_operations gfs2_glock_seq_ops = {
2199         .start = gfs2_glock_seq_start,
2200         .next  = gfs2_glock_seq_next,
2201         .stop  = gfs2_glock_seq_stop,
2202         .show  = gfs2_glock_seq_show,
2203 };
2204
2205 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2206 {
2207         struct seq_file *seq;
2208         int ret;
2209
2210         ret = seq_open(file, &gfs2_glock_seq_ops);
2211         if (ret)
2212                 return ret;
2213
2214         seq = file->private_data;
2215         seq->private = inode->i_private;
2216
2217         return 0;
2218 }
2219
2220 static const struct file_operations gfs2_debug_fops = {
2221         .owner   = THIS_MODULE,
2222         .open    = gfs2_debugfs_open,
2223         .read    = seq_read,
2224         .llseek  = seq_lseek,
2225         .release = seq_release
2226 };
2227
2228 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2229 {
2230         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2231         if (!sdp->debugfs_dir)
2232                 return -ENOMEM;
2233         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2234                                                          S_IFREG | S_IRUGO,
2235                                                          sdp->debugfs_dir, sdp,
2236                                                          &gfs2_debug_fops);
2237         if (!sdp->debugfs_dentry_glocks)
2238                 return -ENOMEM;
2239
2240         return 0;
2241 }
2242
2243 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2244 {
2245         if (sdp && sdp->debugfs_dir) {
2246                 if (sdp->debugfs_dentry_glocks) {
2247                         debugfs_remove(sdp->debugfs_dentry_glocks);
2248                         sdp->debugfs_dentry_glocks = NULL;
2249                 }
2250                 debugfs_remove(sdp->debugfs_dir);
2251                 sdp->debugfs_dir = NULL;
2252         }
2253 }
2254
2255 int gfs2_register_debugfs(void)
2256 {
2257         gfs2_root = debugfs_create_dir("gfs2", NULL);
2258         return gfs2_root ? 0 : -ENOMEM;
2259 }
2260
2261 void gfs2_unregister_debugfs(void)
2262 {
2263         debugfs_remove(gfs2_root);
2264         gfs2_root = NULL;
2265 }