[GFS2] Remove lm.[ch] and distribute content
[safe/jmp/linux-2.6] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43
44 struct gfs2_gl_hash_bucket {
45         struct hlist_head hb_list;
46 };
47
48 struct glock_iter {
49         int hash;                     /* hash bucket index         */
50         struct gfs2_sbd *sdp;         /* incore superblock         */
51         struct gfs2_glock *gl;        /* current glock struct      */
52         struct seq_file *seq;         /* sequence file for debugfs */
53         char string[512];             /* scratch space             */
54 };
55
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
57
58 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
60 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
61 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
62 static void run_queue(struct gfs2_glock *gl);
63
64 static DECLARE_RWSEM(gfs2_umount_flush_sem);
65 static struct dentry *gfs2_root;
66 static struct task_struct *scand_process;
67 static unsigned int scand_secs = 5;
68 static struct workqueue_struct *glock_workqueue;
69
70 #define GFS2_GL_HASH_SHIFT      15
71 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
72 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
73
74 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
75 static struct dentry *gfs2_root;
76
77 /*
78  * Despite what you might think, the numbers below are not arbitrary :-)
79  * They are taken from the ipv4 routing hash code, which is well tested
80  * and thus should be nearly optimal. Later on we might tweek the numbers
81  * but for now this should be fine.
82  *
83  * The reason for putting the locks in a separate array from the list heads
84  * is that we can have fewer locks than list heads and save memory. We use
85  * the same hash function for both, but with a different hash mask.
86  */
87 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
88         defined(CONFIG_PROVE_LOCKING)
89
90 #ifdef CONFIG_LOCKDEP
91 # define GL_HASH_LOCK_SZ        256
92 #else
93 # if NR_CPUS >= 32
94 #  define GL_HASH_LOCK_SZ       4096
95 # elif NR_CPUS >= 16
96 #  define GL_HASH_LOCK_SZ       2048
97 # elif NR_CPUS >= 8
98 #  define GL_HASH_LOCK_SZ       1024
99 # elif NR_CPUS >= 4
100 #  define GL_HASH_LOCK_SZ       512
101 # else
102 #  define GL_HASH_LOCK_SZ       256
103 # endif
104 #endif
105
106 /* We never want more locks than chains */
107 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
108 # undef GL_HASH_LOCK_SZ
109 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
110 #endif
111
112 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
113
114 static inline rwlock_t *gl_lock_addr(unsigned int x)
115 {
116         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
117 }
118 #else /* not SMP, so no spinlocks required */
119 static inline rwlock_t *gl_lock_addr(unsigned int x)
120 {
121         return NULL;
122 }
123 #endif
124
125 /**
126  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
127  * @actual: the current state of the lock
128  * @requested: the lock state that was requested by the caller
129  * @flags: the modifier flags passed in by the caller
130  *
131  * Returns: 1 if the locks are compatible, 0 otherwise
132  */
133
134 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
135                                    int flags)
136 {
137         if (actual == requested)
138                 return 1;
139
140         if (flags & GL_EXACT)
141                 return 0;
142
143         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
144                 return 1;
145
146         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
147                 return 1;
148
149         return 0;
150 }
151
152 /**
153  * gl_hash() - Turn glock number into hash bucket number
154  * @lock: The glock number
155  *
156  * Returns: The number of the corresponding hash bucket
157  */
158
159 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
160                             const struct lm_lockname *name)
161 {
162         unsigned int h;
163
164         h = jhash(&name->ln_number, sizeof(u64), 0);
165         h = jhash(&name->ln_type, sizeof(unsigned int), h);
166         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
167         h &= GFS2_GL_HASH_MASK;
168
169         return h;
170 }
171
172 /**
173  * glock_free() - Perform a few checks and then release struct gfs2_glock
174  * @gl: The glock to release
175  *
176  * Also calls lock module to release its internal structure for this glock.
177  *
178  */
179
180 static void glock_free(struct gfs2_glock *gl)
181 {
182         struct gfs2_sbd *sdp = gl->gl_sbd;
183         struct inode *aspace = gl->gl_aspace;
184
185         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
186                 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
187
188         if (aspace)
189                 gfs2_aspace_put(aspace);
190
191         kmem_cache_free(gfs2_glock_cachep, gl);
192 }
193
194 /**
195  * gfs2_glock_hold() - increment reference count on glock
196  * @gl: The glock to hold
197  *
198  */
199
200 static void gfs2_glock_hold(struct gfs2_glock *gl)
201 {
202         atomic_inc(&gl->gl_ref);
203 }
204
205 /**
206  * gfs2_glock_put() - Decrement reference count on glock
207  * @gl: The glock to put
208  *
209  */
210
211 int gfs2_glock_put(struct gfs2_glock *gl)
212 {
213         int rv = 0;
214         struct gfs2_sbd *sdp = gl->gl_sbd;
215
216         write_lock(gl_lock_addr(gl->gl_hash));
217         if (atomic_dec_and_test(&gl->gl_ref)) {
218                 hlist_del(&gl->gl_list);
219                 write_unlock(gl_lock_addr(gl->gl_hash));
220                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
221                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
222                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
223                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
224                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
225                 glock_free(gl);
226                 rv = 1;
227                 goto out;
228         }
229         write_unlock(gl_lock_addr(gl->gl_hash));
230 out:
231         return rv;
232 }
233
234 /**
235  * search_bucket() - Find struct gfs2_glock by lock number
236  * @bucket: the bucket to search
237  * @name: The lock name
238  *
239  * Returns: NULL, or the struct gfs2_glock with the requested number
240  */
241
242 static struct gfs2_glock *search_bucket(unsigned int hash,
243                                         const struct gfs2_sbd *sdp,
244                                         const struct lm_lockname *name)
245 {
246         struct gfs2_glock *gl;
247         struct hlist_node *h;
248
249         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
250                 if (!lm_name_equal(&gl->gl_name, name))
251                         continue;
252                 if (gl->gl_sbd != sdp)
253                         continue;
254
255                 atomic_inc(&gl->gl_ref);
256
257                 return gl;
258         }
259
260         return NULL;
261 }
262
263 /**
264  * gfs2_glock_find() - Find glock by lock number
265  * @sdp: The GFS2 superblock
266  * @name: The lock name
267  *
268  * Returns: NULL, or the struct gfs2_glock with the requested number
269  */
270
271 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
272                                           const struct lm_lockname *name)
273 {
274         unsigned int hash = gl_hash(sdp, name);
275         struct gfs2_glock *gl;
276
277         read_lock(gl_lock_addr(hash));
278         gl = search_bucket(hash, sdp, name);
279         read_unlock(gl_lock_addr(hash));
280
281         return gl;
282 }
283
284 static void glock_work_func(struct work_struct *work)
285 {
286         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
287
288         spin_lock(&gl->gl_spin);
289         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
290                 set_bit(GLF_DEMOTE, &gl->gl_flags);
291         run_queue(gl);
292         spin_unlock(&gl->gl_spin);
293         gfs2_glock_put(gl);
294 }
295
296 static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
297                      void **lockp)
298 {
299         int error = -EIO;
300         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
301                 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
302                                 sdp->sd_lockstruct.ls_lockspace, name, lockp);
303         return error;
304 }
305
306 /**
307  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
308  * @sdp: The GFS2 superblock
309  * @number: the lock number
310  * @glops: The glock_operations to use
311  * @create: If 0, don't create the glock if it doesn't exist
312  * @glp: the glock is returned here
313  *
314  * This does not lock a glock, just finds/creates structures for one.
315  *
316  * Returns: errno
317  */
318
319 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
320                    const struct gfs2_glock_operations *glops, int create,
321                    struct gfs2_glock **glp)
322 {
323         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
324         struct gfs2_glock *gl, *tmp;
325         unsigned int hash = gl_hash(sdp, &name);
326         int error;
327
328         read_lock(gl_lock_addr(hash));
329         gl = search_bucket(hash, sdp, &name);
330         read_unlock(gl_lock_addr(hash));
331
332         if (gl || !create) {
333                 *glp = gl;
334                 return 0;
335         }
336
337         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
338         if (!gl)
339                 return -ENOMEM;
340
341         gl->gl_flags = 0;
342         gl->gl_name = name;
343         atomic_set(&gl->gl_ref, 1);
344         gl->gl_state = LM_ST_UNLOCKED;
345         gl->gl_demote_state = LM_ST_EXCLUSIVE;
346         gl->gl_hash = hash;
347         gl->gl_owner_pid = NULL;
348         gl->gl_ip = 0;
349         gl->gl_ops = glops;
350         gl->gl_req_gh = NULL;
351         gl->gl_vn = 0;
352         gl->gl_stamp = jiffies;
353         gl->gl_tchange = jiffies;
354         gl->gl_object = NULL;
355         gl->gl_sbd = sdp;
356         gl->gl_aspace = NULL;
357         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
358
359         /* If this glock protects actual on-disk data or metadata blocks,
360            create a VFS inode to manage the pages/buffers holding them. */
361         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
362                 gl->gl_aspace = gfs2_aspace_get(sdp);
363                 if (!gl->gl_aspace) {
364                         error = -ENOMEM;
365                         goto fail;
366                 }
367         }
368
369         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
370         if (error)
371                 goto fail_aspace;
372
373         write_lock(gl_lock_addr(hash));
374         tmp = search_bucket(hash, sdp, &name);
375         if (tmp) {
376                 write_unlock(gl_lock_addr(hash));
377                 glock_free(gl);
378                 gl = tmp;
379         } else {
380                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
381                 write_unlock(gl_lock_addr(hash));
382         }
383
384         *glp = gl;
385
386         return 0;
387
388 fail_aspace:
389         if (gl->gl_aspace)
390                 gfs2_aspace_put(gl->gl_aspace);
391 fail:
392         kmem_cache_free(gfs2_glock_cachep, gl);
393         return error;
394 }
395
396 /**
397  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
398  * @gl: the glock
399  * @state: the state we're requesting
400  * @flags: the modifier flags
401  * @gh: the holder structure
402  *
403  */
404
405 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
406                       struct gfs2_holder *gh)
407 {
408         INIT_LIST_HEAD(&gh->gh_list);
409         gh->gh_gl = gl;
410         gh->gh_ip = (unsigned long)__builtin_return_address(0);
411         gh->gh_owner_pid = get_pid(task_pid(current));
412         gh->gh_state = state;
413         gh->gh_flags = flags;
414         gh->gh_error = 0;
415         gh->gh_iflags = 0;
416         gfs2_glock_hold(gl);
417 }
418
419 /**
420  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
421  * @state: the state we're requesting
422  * @flags: the modifier flags
423  * @gh: the holder structure
424  *
425  * Don't mess with the glock.
426  *
427  */
428
429 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
430 {
431         gh->gh_state = state;
432         gh->gh_flags = flags;
433         gh->gh_iflags = 0;
434         gh->gh_ip = (unsigned long)__builtin_return_address(0);
435 }
436
437 /**
438  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
439  * @gh: the holder structure
440  *
441  */
442
443 void gfs2_holder_uninit(struct gfs2_holder *gh)
444 {
445         put_pid(gh->gh_owner_pid);
446         gfs2_glock_put(gh->gh_gl);
447         gh->gh_gl = NULL;
448         gh->gh_ip = 0;
449 }
450
451 static void gfs2_holder_wake(struct gfs2_holder *gh)
452 {
453         clear_bit(HIF_WAIT, &gh->gh_iflags);
454         smp_mb__after_clear_bit();
455         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
456 }
457
458 static int just_schedule(void *word)
459 {
460         schedule();
461         return 0;
462 }
463
464 static void wait_on_holder(struct gfs2_holder *gh)
465 {
466         might_sleep();
467         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
468 }
469
470 static void gfs2_demote_wake(struct gfs2_glock *gl)
471 {
472         gl->gl_demote_state = LM_ST_EXCLUSIVE;
473         clear_bit(GLF_DEMOTE, &gl->gl_flags);
474         smp_mb__after_clear_bit();
475         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
476 }
477
478 static void wait_on_demote(struct gfs2_glock *gl)
479 {
480         might_sleep();
481         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
482 }
483
484 /**
485  * rq_mutex - process a mutex request in the queue
486  * @gh: the glock holder
487  *
488  * Returns: 1 if the queue is blocked
489  */
490
491 static int rq_mutex(struct gfs2_holder *gh)
492 {
493         struct gfs2_glock *gl = gh->gh_gl;
494
495         list_del_init(&gh->gh_list);
496         /*  gh->gh_error never examined.  */
497         set_bit(GLF_LOCK, &gl->gl_flags);
498         clear_bit(HIF_WAIT, &gh->gh_iflags);
499         smp_mb();
500         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
501
502         return 1;
503 }
504
505 /**
506  * rq_promote - process a promote request in the queue
507  * @gh: the glock holder
508  *
509  * Acquire a new inter-node lock, or change a lock state to more restrictive.
510  *
511  * Returns: 1 if the queue is blocked
512  */
513
514 static int rq_promote(struct gfs2_holder *gh)
515 {
516         struct gfs2_glock *gl = gh->gh_gl;
517
518         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
519                 if (list_empty(&gl->gl_holders)) {
520                         gl->gl_req_gh = gh;
521                         set_bit(GLF_LOCK, &gl->gl_flags);
522                         spin_unlock(&gl->gl_spin);
523                         gfs2_glock_xmote_th(gh->gh_gl, gh);
524                         spin_lock(&gl->gl_spin);
525                 }
526                 return 1;
527         }
528
529         if (list_empty(&gl->gl_holders)) {
530                 set_bit(HIF_FIRST, &gh->gh_iflags);
531                 set_bit(GLF_LOCK, &gl->gl_flags);
532         } else {
533                 struct gfs2_holder *next_gh;
534                 if (gh->gh_state == LM_ST_EXCLUSIVE)
535                         return 1;
536                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
537                                      gh_list);
538                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
539                          return 1;
540         }
541
542         list_move_tail(&gh->gh_list, &gl->gl_holders);
543         gh->gh_error = 0;
544         set_bit(HIF_HOLDER, &gh->gh_iflags);
545
546         gfs2_holder_wake(gh);
547
548         return 0;
549 }
550
551 /**
552  * rq_demote - process a demote request in the queue
553  * @gh: the glock holder
554  *
555  * Returns: 1 if the queue is blocked
556  */
557
558 static int rq_demote(struct gfs2_glock *gl)
559 {
560         if (!list_empty(&gl->gl_holders))
561                 return 1;
562
563         if (gl->gl_state == gl->gl_demote_state ||
564             gl->gl_state == LM_ST_UNLOCKED) {
565                 gfs2_demote_wake(gl);
566                 return 0;
567         }
568
569         set_bit(GLF_LOCK, &gl->gl_flags);
570         set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
571
572         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
573             gl->gl_state != LM_ST_EXCLUSIVE) {
574                 spin_unlock(&gl->gl_spin);
575                 gfs2_glock_drop_th(gl);
576         } else {
577                 spin_unlock(&gl->gl_spin);
578                 gfs2_glock_xmote_th(gl, NULL);
579         }
580
581         spin_lock(&gl->gl_spin);
582         clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
583
584         return 0;
585 }
586
587 /**
588  * run_queue - process holder structures on a glock
589  * @gl: the glock
590  *
591  */
592 static void run_queue(struct gfs2_glock *gl)
593 {
594         struct gfs2_holder *gh;
595         int blocked = 1;
596
597         for (;;) {
598                 if (test_bit(GLF_LOCK, &gl->gl_flags))
599                         break;
600
601                 if (!list_empty(&gl->gl_waiters1)) {
602                         gh = list_entry(gl->gl_waiters1.next,
603                                         struct gfs2_holder, gh_list);
604                         blocked = rq_mutex(gh);
605                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
606                         blocked = rq_demote(gl);
607                         if (test_bit(GLF_WAITERS2, &gl->gl_flags) &&
608                                      !blocked) {
609                                 set_bit(GLF_DEMOTE, &gl->gl_flags);
610                                 gl->gl_demote_state = LM_ST_UNLOCKED;
611                         }
612                         clear_bit(GLF_WAITERS2, &gl->gl_flags);
613                 } else if (!list_empty(&gl->gl_waiters3)) {
614                         gh = list_entry(gl->gl_waiters3.next,
615                                         struct gfs2_holder, gh_list);
616                         blocked = rq_promote(gh);
617                 } else
618                         break;
619
620                 if (blocked)
621                         break;
622         }
623 }
624
625 /**
626  * gfs2_glmutex_lock - acquire a local lock on a glock
627  * @gl: the glock
628  *
629  * Gives caller exclusive access to manipulate a glock structure.
630  */
631
632 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
633 {
634         spin_lock(&gl->gl_spin);
635         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
636                 struct gfs2_holder gh;
637
638                 gfs2_holder_init(gl, 0, 0, &gh);
639                 set_bit(HIF_WAIT, &gh.gh_iflags);
640                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
641                 spin_unlock(&gl->gl_spin);
642                 wait_on_holder(&gh);
643                 gfs2_holder_uninit(&gh);
644         } else {
645                 gl->gl_owner_pid = get_pid(task_pid(current));
646                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
647                 spin_unlock(&gl->gl_spin);
648         }
649 }
650
651 /**
652  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
653  * @gl: the glock
654  *
655  * Returns: 1 if the glock is acquired
656  */
657
658 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
659 {
660         int acquired = 1;
661
662         spin_lock(&gl->gl_spin);
663         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
664                 acquired = 0;
665         } else {
666                 gl->gl_owner_pid = get_pid(task_pid(current));
667                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
668         }
669         spin_unlock(&gl->gl_spin);
670
671         return acquired;
672 }
673
674 /**
675  * gfs2_glmutex_unlock - release a local lock on a glock
676  * @gl: the glock
677  *
678  */
679
680 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
681 {
682         struct pid *pid;
683
684         spin_lock(&gl->gl_spin);
685         clear_bit(GLF_LOCK, &gl->gl_flags);
686         pid = gl->gl_owner_pid;
687         gl->gl_owner_pid = NULL;
688         gl->gl_ip = 0;
689         run_queue(gl);
690         spin_unlock(&gl->gl_spin);
691
692         put_pid(pid);
693 }
694
695 /**
696  * handle_callback - process a demote request
697  * @gl: the glock
698  * @state: the state the caller wants us to change to
699  *
700  * There are only two requests that we are going to see in actual
701  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
702  */
703
704 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
705                             int remote, unsigned long delay)
706 {
707         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
708
709         spin_lock(&gl->gl_spin);
710         set_bit(bit, &gl->gl_flags);
711         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
712                 gl->gl_demote_state = state;
713                 gl->gl_demote_time = jiffies;
714                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
715                     gl->gl_object) {
716                         gfs2_glock_schedule_for_reclaim(gl);
717                         spin_unlock(&gl->gl_spin);
718                         return;
719                 }
720         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
721                         gl->gl_demote_state != state) {
722                 if (test_bit(GLF_DEMOTE_IN_PROGRESS,  &gl->gl_flags)) 
723                         set_bit(GLF_WAITERS2, &gl->gl_flags);
724                 else 
725                         gl->gl_demote_state = LM_ST_UNLOCKED;
726         }
727         spin_unlock(&gl->gl_spin);
728 }
729
730 /**
731  * state_change - record that the glock is now in a different state
732  * @gl: the glock
733  * @new_state the new state
734  *
735  */
736
737 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
738 {
739         int held1, held2;
740
741         held1 = (gl->gl_state != LM_ST_UNLOCKED);
742         held2 = (new_state != LM_ST_UNLOCKED);
743
744         if (held1 != held2) {
745                 if (held2)
746                         gfs2_glock_hold(gl);
747                 else
748                         gfs2_glock_put(gl);
749         }
750
751         gl->gl_state = new_state;
752         gl->gl_tchange = jiffies;
753 }
754
755 /**
756  * drop_bh - Called after a lock module unlock completes
757  * @gl: the glock
758  * @ret: the return status
759  *
760  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
761  * Doesn't drop the reference on the glock the top half took out
762  *
763  */
764
765 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
766 {
767         struct gfs2_sbd *sdp = gl->gl_sbd;
768         const struct gfs2_glock_operations *glops = gl->gl_ops;
769         struct gfs2_holder *gh = gl->gl_req_gh;
770
771         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
772         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
773         gfs2_assert_warn(sdp, !ret);
774
775         state_change(gl, LM_ST_UNLOCKED);
776
777         if (glops->go_inval)
778                 glops->go_inval(gl, DIO_METADATA);
779
780         if (gh) {
781                 spin_lock(&gl->gl_spin);
782                 list_del_init(&gh->gh_list);
783                 gh->gh_error = 0;
784                 spin_unlock(&gl->gl_spin);
785         }
786
787         spin_lock(&gl->gl_spin);
788         gfs2_demote_wake(gl);
789         gl->gl_req_gh = NULL;
790         clear_bit(GLF_LOCK, &gl->gl_flags);
791         spin_unlock(&gl->gl_spin);
792
793         gfs2_glock_put(gl);
794
795         if (gh)
796                 gfs2_holder_wake(gh);
797 }
798
799 /**
800  * xmote_bh - Called after the lock module is done acquiring a lock
801  * @gl: The glock in question
802  * @ret: the int returned from the lock module
803  *
804  */
805
806 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
807 {
808         struct gfs2_sbd *sdp = gl->gl_sbd;
809         const struct gfs2_glock_operations *glops = gl->gl_ops;
810         struct gfs2_holder *gh = gl->gl_req_gh;
811         int prev_state = gl->gl_state;
812         int op_done = 1;
813
814         if ((ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
815                 drop_bh(gl, ret);
816                 return;
817         }
818
819         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
820         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
821         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
822
823         state_change(gl, ret & LM_OUT_ST_MASK);
824
825         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
826                 if (glops->go_inval)
827                         glops->go_inval(gl, DIO_METADATA);
828         } else if (gl->gl_state == LM_ST_DEFERRED) {
829                 /* We might not want to do this here.
830                    Look at moving to the inode glops. */
831                 if (glops->go_inval)
832                         glops->go_inval(gl, 0);
833         }
834
835         /*  Deal with each possible exit condition  */
836
837         if (!gh) {
838                 gl->gl_stamp = jiffies;
839                 if (ret & LM_OUT_CANCELED) {
840                         op_done = 0;
841                 } else {
842                         spin_lock(&gl->gl_spin);
843                         if (gl->gl_state != gl->gl_demote_state) {
844                                 spin_unlock(&gl->gl_spin);
845                                 gfs2_glock_drop_th(gl);
846                                 gfs2_glock_put(gl);
847                                 return;
848                         }
849                         gfs2_demote_wake(gl);
850                         spin_unlock(&gl->gl_spin);
851                 }
852         } else {
853                 spin_lock(&gl->gl_spin);
854                 list_del_init(&gh->gh_list);
855                 gh->gh_error = -EIO;
856                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
857                         goto out;
858                 gh->gh_error = GLR_CANCELED;
859                 if (ret & LM_OUT_CANCELED) 
860                         goto out;
861                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
862                         list_add_tail(&gh->gh_list, &gl->gl_holders);
863                         gh->gh_error = 0;
864                         set_bit(HIF_HOLDER, &gh->gh_iflags);
865                         set_bit(HIF_FIRST, &gh->gh_iflags);
866                         op_done = 0;
867                         goto out;
868                 }
869                 gh->gh_error = GLR_TRYFAILED;
870                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
871                         goto out;
872                 gh->gh_error = -EINVAL;
873                 if (gfs2_assert_withdraw(sdp, 0) == -1)
874                         fs_err(sdp, "ret = 0x%.8X\n", ret);
875 out:
876                 spin_unlock(&gl->gl_spin);
877         }
878
879         if (glops->go_xmote_bh)
880                 glops->go_xmote_bh(gl);
881
882         if (op_done) {
883                 spin_lock(&gl->gl_spin);
884                 gl->gl_req_gh = NULL;
885                 clear_bit(GLF_LOCK, &gl->gl_flags);
886                 spin_unlock(&gl->gl_spin);
887         }
888
889         gfs2_glock_put(gl);
890
891         if (gh)
892                 gfs2_holder_wake(gh);
893 }
894
895 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
896                                  unsigned int cur_state, unsigned int req_state,
897                                  unsigned int flags)
898 {
899         int ret = 0;
900         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
901                 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
902                                                          req_state, flags);
903         return ret;
904 }
905
906 /**
907  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
908  * @gl: The glock in question
909  * @state: the requested state
910  * @flags: modifier flags to the lock call
911  *
912  */
913
914 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
915 {
916         struct gfs2_sbd *sdp = gl->gl_sbd;
917         int flags = gh ? gh->gh_flags : 0;
918         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
919         const struct gfs2_glock_operations *glops = gl->gl_ops;
920         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
921                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
922                                  LM_FLAG_PRIORITY);
923         unsigned int lck_ret;
924
925         if (glops->go_xmote_th)
926                 glops->go_xmote_th(gl);
927
928         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
929         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
930         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
931         gfs2_assert_warn(sdp, state != gl->gl_state);
932
933         gfs2_glock_hold(gl);
934
935         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
936
937         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
938                 return;
939
940         if (lck_ret & LM_OUT_ASYNC)
941                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
942         else
943                 xmote_bh(gl, lck_ret);
944 }
945
946 static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
947                                    unsigned int cur_state)
948 {
949         int ret = 0;
950         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
951                 ret =  sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
952         return ret;
953 }
954
955 /**
956  * gfs2_glock_drop_th - call into the lock module to unlock a lock
957  * @gl: the glock
958  *
959  */
960
961 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
962 {
963         struct gfs2_sbd *sdp = gl->gl_sbd;
964         const struct gfs2_glock_operations *glops = gl->gl_ops;
965         unsigned int ret;
966
967         if (glops->go_xmote_th)
968                 glops->go_xmote_th(gl);
969
970         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
971         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
972         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
973
974         gfs2_glock_hold(gl);
975
976         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
977
978         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
979                 return;
980
981         if (!ret)
982                 drop_bh(gl, ret);
983         else
984                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
985 }
986
987 /**
988  * do_cancels - cancel requests for locks stuck waiting on an expire flag
989  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
990  *
991  * Don't cancel GL_NOCANCEL requests.
992  */
993
994 static void do_cancels(struct gfs2_holder *gh)
995 {
996         struct gfs2_glock *gl = gh->gh_gl;
997         struct gfs2_sbd *sdp = gl->gl_sbd;
998
999         spin_lock(&gl->gl_spin);
1000
1001         while (gl->gl_req_gh != gh &&
1002                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1003                !list_empty(&gh->gh_list)) {
1004                 if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1005                         spin_unlock(&gl->gl_spin);
1006                         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1007                                 sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
1008                         msleep(100);
1009                         spin_lock(&gl->gl_spin);
1010                 } else {
1011                         spin_unlock(&gl->gl_spin);
1012                         msleep(100);
1013                         spin_lock(&gl->gl_spin);
1014                 }
1015         }
1016
1017         spin_unlock(&gl->gl_spin);
1018 }
1019
1020 /**
1021  * glock_wait_internal - wait on a glock acquisition
1022  * @gh: the glock holder
1023  *
1024  * Returns: 0 on success
1025  */
1026
1027 static int glock_wait_internal(struct gfs2_holder *gh)
1028 {
1029         struct gfs2_glock *gl = gh->gh_gl;
1030         struct gfs2_sbd *sdp = gl->gl_sbd;
1031         const struct gfs2_glock_operations *glops = gl->gl_ops;
1032
1033         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1034                 return -EIO;
1035
1036         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1037                 spin_lock(&gl->gl_spin);
1038                 if (gl->gl_req_gh != gh &&
1039                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1040                     !list_empty(&gh->gh_list)) {
1041                         list_del_init(&gh->gh_list);
1042                         gh->gh_error = GLR_TRYFAILED;
1043                         run_queue(gl);
1044                         spin_unlock(&gl->gl_spin);
1045                         return gh->gh_error;
1046                 }
1047                 spin_unlock(&gl->gl_spin);
1048         }
1049
1050         if (gh->gh_flags & LM_FLAG_PRIORITY)
1051                 do_cancels(gh);
1052
1053         wait_on_holder(gh);
1054         if (gh->gh_error)
1055                 return gh->gh_error;
1056
1057         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1058         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1059                                                    gh->gh_flags));
1060
1061         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1062                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1063
1064                 if (glops->go_lock) {
1065                         gh->gh_error = glops->go_lock(gh);
1066                         if (gh->gh_error) {
1067                                 spin_lock(&gl->gl_spin);
1068                                 list_del_init(&gh->gh_list);
1069                                 spin_unlock(&gl->gl_spin);
1070                         }
1071                 }
1072
1073                 spin_lock(&gl->gl_spin);
1074                 gl->gl_req_gh = NULL;
1075                 clear_bit(GLF_LOCK, &gl->gl_flags);
1076                 run_queue(gl);
1077                 spin_unlock(&gl->gl_spin);
1078         }
1079
1080         return gh->gh_error;
1081 }
1082
1083 static inline struct gfs2_holder *
1084 find_holder_by_owner(struct list_head *head, struct pid *pid)
1085 {
1086         struct gfs2_holder *gh;
1087
1088         list_for_each_entry(gh, head, gh_list) {
1089                 if (gh->gh_owner_pid == pid)
1090                         return gh;
1091         }
1092
1093         return NULL;
1094 }
1095
1096 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1097 {
1098         va_list args;
1099
1100         va_start(args, fmt);
1101         if (gi) {
1102                 vsprintf(gi->string, fmt, args);
1103                 seq_printf(gi->seq, gi->string);
1104         }
1105         else
1106                 vprintk(fmt, args);
1107         va_end(args);
1108 }
1109
1110 /**
1111  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1112  * @gh: the holder structure to add
1113  *
1114  */
1115
1116 static void add_to_queue(struct gfs2_holder *gh)
1117 {
1118         struct gfs2_glock *gl = gh->gh_gl;
1119         struct gfs2_holder *existing;
1120
1121         BUG_ON(gh->gh_owner_pid == NULL);
1122         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1123                 BUG();
1124
1125         if (!(gh->gh_flags & GL_FLOCK)) {
1126                 existing = find_holder_by_owner(&gl->gl_holders, 
1127                                                 gh->gh_owner_pid);
1128                 if (existing) {
1129                         print_symbol(KERN_WARNING "original: %s\n", 
1130                                      existing->gh_ip);
1131                         printk(KERN_INFO "pid : %d\n",
1132                                         pid_nr(existing->gh_owner_pid));
1133                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1134                                existing->gh_gl->gl_name.ln_type, 
1135                                existing->gh_gl->gl_state);
1136                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1137                         printk(KERN_INFO "pid : %d\n",
1138                                         pid_nr(gh->gh_owner_pid));
1139                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1140                                gl->gl_name.ln_type, gl->gl_state);
1141                         BUG();
1142                 }
1143                 
1144                 existing = find_holder_by_owner(&gl->gl_waiters3, 
1145                                                 gh->gh_owner_pid);
1146                 if (existing) {
1147                         print_symbol(KERN_WARNING "original: %s\n", 
1148                                      existing->gh_ip);
1149                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1150                         BUG();
1151                 }
1152         }
1153
1154         if (gh->gh_flags & LM_FLAG_PRIORITY)
1155                 list_add(&gh->gh_list, &gl->gl_waiters3);
1156         else
1157                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1158 }
1159
1160 /**
1161  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1162  * @gh: the holder structure
1163  *
1164  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1165  *
1166  * Returns: 0, GLR_TRYFAILED, or errno on failure
1167  */
1168
1169 int gfs2_glock_nq(struct gfs2_holder *gh)
1170 {
1171         struct gfs2_glock *gl = gh->gh_gl;
1172         struct gfs2_sbd *sdp = gl->gl_sbd;
1173         int error = 0;
1174
1175 restart:
1176         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1177                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1178                 return -EIO;
1179         }
1180
1181         spin_lock(&gl->gl_spin);
1182         add_to_queue(gh);
1183         run_queue(gl);
1184         spin_unlock(&gl->gl_spin);
1185
1186         if (!(gh->gh_flags & GL_ASYNC)) {
1187                 error = glock_wait_internal(gh);
1188                 if (error == GLR_CANCELED) {
1189                         msleep(100);
1190                         goto restart;
1191                 }
1192         }
1193
1194         return error;
1195 }
1196
1197 /**
1198  * gfs2_glock_poll - poll to see if an async request has been completed
1199  * @gh: the holder
1200  *
1201  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1202  */
1203
1204 int gfs2_glock_poll(struct gfs2_holder *gh)
1205 {
1206         struct gfs2_glock *gl = gh->gh_gl;
1207         int ready = 0;
1208
1209         spin_lock(&gl->gl_spin);
1210
1211         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1212                 ready = 1;
1213         else if (list_empty(&gh->gh_list)) {
1214                 if (gh->gh_error == GLR_CANCELED) {
1215                         spin_unlock(&gl->gl_spin);
1216                         msleep(100);
1217                         if (gfs2_glock_nq(gh))
1218                                 return 1;
1219                         return 0;
1220                 } else
1221                         ready = 1;
1222         }
1223
1224         spin_unlock(&gl->gl_spin);
1225
1226         return ready;
1227 }
1228
1229 /**
1230  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1231  * @gh: the holder structure
1232  *
1233  * Returns: 0, GLR_TRYFAILED, or errno on failure
1234  */
1235
1236 int gfs2_glock_wait(struct gfs2_holder *gh)
1237 {
1238         int error;
1239
1240         error = glock_wait_internal(gh);
1241         if (error == GLR_CANCELED) {
1242                 msleep(100);
1243                 gh->gh_flags &= ~GL_ASYNC;
1244                 error = gfs2_glock_nq(gh);
1245         }
1246
1247         return error;
1248 }
1249
1250 /**
1251  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1252  * @gh: the glock holder
1253  *
1254  */
1255
1256 void gfs2_glock_dq(struct gfs2_holder *gh)
1257 {
1258         struct gfs2_glock *gl = gh->gh_gl;
1259         const struct gfs2_glock_operations *glops = gl->gl_ops;
1260         unsigned delay = 0;
1261
1262         if (gh->gh_flags & GL_NOCACHE)
1263                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1264
1265         gfs2_glmutex_lock(gl);
1266
1267         spin_lock(&gl->gl_spin);
1268         list_del_init(&gh->gh_list);
1269
1270         if (list_empty(&gl->gl_holders)) {
1271                 if (glops->go_unlock) {
1272                         spin_unlock(&gl->gl_spin);
1273                         glops->go_unlock(gh);
1274                         spin_lock(&gl->gl_spin);
1275                 }
1276                 gl->gl_stamp = jiffies;
1277         }
1278
1279         clear_bit(GLF_LOCK, &gl->gl_flags);
1280         spin_unlock(&gl->gl_spin);
1281
1282         gfs2_glock_hold(gl);
1283         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1284             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1285                 delay = gl->gl_ops->go_min_hold_time;
1286         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1287                 gfs2_glock_put(gl);
1288 }
1289
1290 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1291 {
1292         struct gfs2_glock *gl = gh->gh_gl;
1293         gfs2_glock_dq(gh);
1294         wait_on_demote(gl);
1295 }
1296
1297 /**
1298  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1299  * @gh: the holder structure
1300  *
1301  */
1302
1303 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1304 {
1305         gfs2_glock_dq(gh);
1306         gfs2_holder_uninit(gh);
1307 }
1308
1309 /**
1310  * gfs2_glock_nq_num - acquire a glock based on lock number
1311  * @sdp: the filesystem
1312  * @number: the lock number
1313  * @glops: the glock operations for the type of glock
1314  * @state: the state to acquire the glock in
1315  * @flags: modifier flags for the aquisition
1316  * @gh: the struct gfs2_holder
1317  *
1318  * Returns: errno
1319  */
1320
1321 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1322                       const struct gfs2_glock_operations *glops,
1323                       unsigned int state, int flags, struct gfs2_holder *gh)
1324 {
1325         struct gfs2_glock *gl;
1326         int error;
1327
1328         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1329         if (!error) {
1330                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1331                 gfs2_glock_put(gl);
1332         }
1333
1334         return error;
1335 }
1336
1337 /**
1338  * glock_compare - Compare two struct gfs2_glock structures for sorting
1339  * @arg_a: the first structure
1340  * @arg_b: the second structure
1341  *
1342  */
1343
1344 static int glock_compare(const void *arg_a, const void *arg_b)
1345 {
1346         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1347         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1348         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1349         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1350
1351         if (a->ln_number > b->ln_number)
1352                 return 1;
1353         if (a->ln_number < b->ln_number)
1354                 return -1;
1355         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1356         return 0;
1357 }
1358
1359 /**
1360  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1361  * @num_gh: the number of structures
1362  * @ghs: an array of struct gfs2_holder structures
1363  *
1364  * Returns: 0 on success (all glocks acquired),
1365  *          errno on failure (no glocks acquired)
1366  */
1367
1368 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1369                      struct gfs2_holder **p)
1370 {
1371         unsigned int x;
1372         int error = 0;
1373
1374         for (x = 0; x < num_gh; x++)
1375                 p[x] = &ghs[x];
1376
1377         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1378
1379         for (x = 0; x < num_gh; x++) {
1380                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1381
1382                 error = gfs2_glock_nq(p[x]);
1383                 if (error) {
1384                         while (x--)
1385                                 gfs2_glock_dq(p[x]);
1386                         break;
1387                 }
1388         }
1389
1390         return error;
1391 }
1392
1393 /**
1394  * gfs2_glock_nq_m - acquire multiple glocks
1395  * @num_gh: the number of structures
1396  * @ghs: an array of struct gfs2_holder structures
1397  *
1398  *
1399  * Returns: 0 on success (all glocks acquired),
1400  *          errno on failure (no glocks acquired)
1401  */
1402
1403 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1404 {
1405         struct gfs2_holder *tmp[4];
1406         struct gfs2_holder **pph = tmp;
1407         int error = 0;
1408
1409         switch(num_gh) {
1410         case 0:
1411                 return 0;
1412         case 1:
1413                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1414                 return gfs2_glock_nq(ghs);
1415         default:
1416                 if (num_gh <= 4)
1417                         break;
1418                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1419                 if (!pph)
1420                         return -ENOMEM;
1421         }
1422
1423         error = nq_m_sync(num_gh, ghs, pph);
1424
1425         if (pph != tmp)
1426                 kfree(pph);
1427
1428         return error;
1429 }
1430
1431 /**
1432  * gfs2_glock_dq_m - release multiple glocks
1433  * @num_gh: the number of structures
1434  * @ghs: an array of struct gfs2_holder structures
1435  *
1436  */
1437
1438 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1439 {
1440         unsigned int x;
1441
1442         for (x = 0; x < num_gh; x++)
1443                 gfs2_glock_dq(&ghs[x]);
1444 }
1445
1446 /**
1447  * gfs2_glock_dq_uninit_m - release multiple glocks
1448  * @num_gh: the number of structures
1449  * @ghs: an array of struct gfs2_holder structures
1450  *
1451  */
1452
1453 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1454 {
1455         unsigned int x;
1456
1457         for (x = 0; x < num_gh; x++)
1458                 gfs2_glock_dq_uninit(&ghs[x]);
1459 }
1460
1461 static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
1462 {
1463         int error = -EIO;
1464         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1465                 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
1466         return error;
1467 }
1468
1469 /**
1470  * gfs2_lvb_hold - attach a LVB from a glock
1471  * @gl: The glock in question
1472  *
1473  */
1474
1475 int gfs2_lvb_hold(struct gfs2_glock *gl)
1476 {
1477         int error;
1478
1479         gfs2_glmutex_lock(gl);
1480
1481         if (!atomic_read(&gl->gl_lvb_count)) {
1482                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1483                 if (error) {
1484                         gfs2_glmutex_unlock(gl);
1485                         return error;
1486                 }
1487                 gfs2_glock_hold(gl);
1488         }
1489         atomic_inc(&gl->gl_lvb_count);
1490
1491         gfs2_glmutex_unlock(gl);
1492
1493         return 0;
1494 }
1495
1496 /**
1497  * gfs2_lvb_unhold - detach a LVB from a glock
1498  * @gl: The glock in question
1499  *
1500  */
1501
1502 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1503 {
1504         struct gfs2_sbd *sdp = gl->gl_sbd;
1505
1506         gfs2_glock_hold(gl);
1507         gfs2_glmutex_lock(gl);
1508
1509         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1510         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1511                 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1512                         sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
1513                 gl->gl_lvb = NULL;
1514                 gfs2_glock_put(gl);
1515         }
1516
1517         gfs2_glmutex_unlock(gl);
1518         gfs2_glock_put(gl);
1519 }
1520
1521 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1522                         unsigned int state)
1523 {
1524         struct gfs2_glock *gl;
1525         unsigned long delay = 0;
1526         unsigned long holdtime;
1527         unsigned long now = jiffies;
1528
1529         gl = gfs2_glock_find(sdp, name);
1530         if (!gl)
1531                 return;
1532
1533         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1534         if (time_before(now, holdtime))
1535                 delay = holdtime - now;
1536
1537         handle_callback(gl, state, 1, delay);
1538         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1539                 gfs2_glock_put(gl);
1540 }
1541
1542 /**
1543  * gfs2_glock_cb - Callback used by locking module
1544  * @sdp: Pointer to the superblock
1545  * @type: Type of callback
1546  * @data: Type dependent data pointer
1547  *
1548  * Called by the locking module when it wants to tell us something.
1549  * Either we need to drop a lock, one of our ASYNC requests completed, or
1550  * a journal from another client needs to be recovered.
1551  */
1552
1553 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1554 {
1555         struct gfs2_sbd *sdp = cb_data;
1556
1557         switch (type) {
1558         case LM_CB_NEED_E:
1559                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1560                 return;
1561
1562         case LM_CB_NEED_D:
1563                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1564                 return;
1565
1566         case LM_CB_NEED_S:
1567                 blocking_cb(sdp, data, LM_ST_SHARED);
1568                 return;
1569
1570         case LM_CB_ASYNC: {
1571                 struct lm_async_cb *async = data;
1572                 struct gfs2_glock *gl;
1573
1574                 down_read(&gfs2_umount_flush_sem);
1575                 gl = gfs2_glock_find(sdp, &async->lc_name);
1576                 if (gfs2_assert_warn(sdp, gl))
1577                         return;
1578                 xmote_bh(gl, async->lc_ret);
1579                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1580                         gfs2_glock_put(gl);
1581                 up_read(&gfs2_umount_flush_sem);
1582                 return;
1583         }
1584
1585         case LM_CB_NEED_RECOVERY:
1586                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1587                 if (sdp->sd_recoverd_process)
1588                         wake_up_process(sdp->sd_recoverd_process);
1589                 return;
1590
1591         case LM_CB_DROPLOCKS:
1592                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1593                 gfs2_quota_scan(sdp);
1594                 return;
1595
1596         default:
1597                 gfs2_assert_warn(sdp, 0);
1598                 return;
1599         }
1600 }
1601
1602 /**
1603  * demote_ok - Check to see if it's ok to unlock a glock
1604  * @gl: the glock
1605  *
1606  * Returns: 1 if it's ok
1607  */
1608
1609 static int demote_ok(struct gfs2_glock *gl)
1610 {
1611         const struct gfs2_glock_operations *glops = gl->gl_ops;
1612         int demote = 1;
1613
1614         if (test_bit(GLF_STICKY, &gl->gl_flags))
1615                 demote = 0;
1616         else if (glops->go_demote_ok)
1617                 demote = glops->go_demote_ok(gl);
1618
1619         return demote;
1620 }
1621
1622 /**
1623  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1624  * @gl: the glock
1625  *
1626  */
1627
1628 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1629 {
1630         struct gfs2_sbd *sdp = gl->gl_sbd;
1631
1632         spin_lock(&sdp->sd_reclaim_lock);
1633         if (list_empty(&gl->gl_reclaim)) {
1634                 gfs2_glock_hold(gl);
1635                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1636                 atomic_inc(&sdp->sd_reclaim_count);
1637                 spin_unlock(&sdp->sd_reclaim_lock);
1638                 wake_up(&sdp->sd_reclaim_wq);
1639         } else
1640                 spin_unlock(&sdp->sd_reclaim_lock);
1641 }
1642
1643 /**
1644  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1645  * @sdp: the filesystem
1646  *
1647  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1648  * different glock and we notice that there are a lot of glocks in the
1649  * reclaim list.
1650  *
1651  */
1652
1653 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1654 {
1655         struct gfs2_glock *gl;
1656
1657         spin_lock(&sdp->sd_reclaim_lock);
1658         if (list_empty(&sdp->sd_reclaim_list)) {
1659                 spin_unlock(&sdp->sd_reclaim_lock);
1660                 return;
1661         }
1662         gl = list_entry(sdp->sd_reclaim_list.next,
1663                         struct gfs2_glock, gl_reclaim);
1664         list_del_init(&gl->gl_reclaim);
1665         spin_unlock(&sdp->sd_reclaim_lock);
1666
1667         atomic_dec(&sdp->sd_reclaim_count);
1668         atomic_inc(&sdp->sd_reclaimed);
1669
1670         if (gfs2_glmutex_trylock(gl)) {
1671                 if (list_empty(&gl->gl_holders) &&
1672                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1673                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1674                 gfs2_glmutex_unlock(gl);
1675         }
1676
1677         gfs2_glock_put(gl);
1678 }
1679
1680 /**
1681  * examine_bucket - Call a function for glock in a hash bucket
1682  * @examiner: the function
1683  * @sdp: the filesystem
1684  * @bucket: the bucket
1685  *
1686  * Returns: 1 if the bucket has entries
1687  */
1688
1689 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1690                           unsigned int hash)
1691 {
1692         struct gfs2_glock *gl, *prev = NULL;
1693         int has_entries = 0;
1694         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1695
1696         read_lock(gl_lock_addr(hash));
1697         /* Can't use hlist_for_each_entry - don't want prefetch here */
1698         if (hlist_empty(head))
1699                 goto out;
1700         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1701         while(1) {
1702                 if (!sdp || gl->gl_sbd == sdp) {
1703                         gfs2_glock_hold(gl);
1704                         read_unlock(gl_lock_addr(hash));
1705                         if (prev)
1706                                 gfs2_glock_put(prev);
1707                         prev = gl;
1708                         examiner(gl);
1709                         has_entries = 1;
1710                         read_lock(gl_lock_addr(hash));
1711                 }
1712                 if (gl->gl_list.next == NULL)
1713                         break;
1714                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1715         }
1716 out:
1717         read_unlock(gl_lock_addr(hash));
1718         if (prev)
1719                 gfs2_glock_put(prev);
1720         cond_resched();
1721         return has_entries;
1722 }
1723
1724 /**
1725  * scan_glock - look at a glock and see if we can reclaim it
1726  * @gl: the glock to look at
1727  *
1728  */
1729
1730 static void scan_glock(struct gfs2_glock *gl)
1731 {
1732         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1733                 return;
1734
1735         if (gfs2_glmutex_trylock(gl)) {
1736                 if (list_empty(&gl->gl_holders) &&
1737                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1738                         goto out_schedule;
1739                 gfs2_glmutex_unlock(gl);
1740         }
1741         return;
1742
1743 out_schedule:
1744         gfs2_glmutex_unlock(gl);
1745         gfs2_glock_schedule_for_reclaim(gl);
1746 }
1747
1748 /**
1749  * clear_glock - look at a glock and see if we can free it from glock cache
1750  * @gl: the glock to look at
1751  *
1752  */
1753
1754 static void clear_glock(struct gfs2_glock *gl)
1755 {
1756         struct gfs2_sbd *sdp = gl->gl_sbd;
1757         int released;
1758
1759         spin_lock(&sdp->sd_reclaim_lock);
1760         if (!list_empty(&gl->gl_reclaim)) {
1761                 list_del_init(&gl->gl_reclaim);
1762                 atomic_dec(&sdp->sd_reclaim_count);
1763                 spin_unlock(&sdp->sd_reclaim_lock);
1764                 released = gfs2_glock_put(gl);
1765                 gfs2_assert(sdp, !released);
1766         } else {
1767                 spin_unlock(&sdp->sd_reclaim_lock);
1768         }
1769
1770         if (gfs2_glmutex_trylock(gl)) {
1771                 if (list_empty(&gl->gl_holders) &&
1772                     gl->gl_state != LM_ST_UNLOCKED)
1773                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1774                 gfs2_glmutex_unlock(gl);
1775         }
1776 }
1777
1778 /**
1779  * gfs2_gl_hash_clear - Empty out the glock hash table
1780  * @sdp: the filesystem
1781  * @wait: wait until it's all gone
1782  *
1783  * Called when unmounting the filesystem, or when inter-node lock manager
1784  * requests DROPLOCKS because it is running out of capacity.
1785  */
1786
1787 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1788 {
1789         unsigned long t;
1790         unsigned int x;
1791         int cont;
1792
1793         t = jiffies;
1794
1795         for (;;) {
1796                 cont = 0;
1797                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1798                         if (examine_bucket(clear_glock, sdp, x))
1799                                 cont = 1;
1800                 }
1801
1802                 if (!wait || !cont)
1803                         break;
1804
1805                 if (time_after_eq(jiffies,
1806                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1807                         fs_warn(sdp, "Unmount seems to be stalled. "
1808                                      "Dumping lock state...\n");
1809                         gfs2_dump_lockstate(sdp);
1810                         t = jiffies;
1811                 }
1812
1813                 down_write(&gfs2_umount_flush_sem);
1814                 invalidate_inodes(sdp->sd_vfs);
1815                 up_write(&gfs2_umount_flush_sem);
1816                 msleep(10);
1817         }
1818 }
1819
1820 /*
1821  *  Diagnostic routines to help debug distributed deadlock
1822  */
1823
1824 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1825                               unsigned long address)
1826 {
1827         char buffer[KSYM_SYMBOL_LEN];
1828
1829         sprint_symbol(buffer, address);
1830         print_dbg(gi, fmt, buffer);
1831 }
1832
1833 /**
1834  * dump_holder - print information about a glock holder
1835  * @str: a string naming the type of holder
1836  * @gh: the glock holder
1837  *
1838  * Returns: 0 on success, -ENOBUFS when we run out of space
1839  */
1840
1841 static int dump_holder(struct glock_iter *gi, char *str,
1842                        struct gfs2_holder *gh)
1843 {
1844         unsigned int x;
1845         struct task_struct *gh_owner;
1846
1847         print_dbg(gi, "  %s\n", str);
1848         if (gh->gh_owner_pid) {
1849                 print_dbg(gi, "    owner = %ld ",
1850                                 (long)pid_nr(gh->gh_owner_pid));
1851                 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1852                 if (gh_owner)
1853                         print_dbg(gi, "(%s)\n", gh_owner->comm);
1854                 else
1855                         print_dbg(gi, "(ended)\n");
1856         } else
1857                 print_dbg(gi, "    owner = -1\n");
1858         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1859         print_dbg(gi, "    gh_flags =");
1860         for (x = 0; x < 32; x++)
1861                 if (gh->gh_flags & (1 << x))
1862                         print_dbg(gi, " %u", x);
1863         print_dbg(gi, " \n");
1864         print_dbg(gi, "    error = %d\n", gh->gh_error);
1865         print_dbg(gi, "    gh_iflags =");
1866         for (x = 0; x < 32; x++)
1867                 if (test_bit(x, &gh->gh_iflags))
1868                         print_dbg(gi, " %u", x);
1869         print_dbg(gi, " \n");
1870         gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1871
1872         return 0;
1873 }
1874
1875 /**
1876  * dump_inode - print information about an inode
1877  * @ip: the inode
1878  *
1879  * Returns: 0 on success, -ENOBUFS when we run out of space
1880  */
1881
1882 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1883 {
1884         unsigned int x;
1885
1886         print_dbg(gi, "  Inode:\n");
1887         print_dbg(gi, "    num = %llu/%llu\n",
1888                   (unsigned long long)ip->i_no_formal_ino,
1889                   (unsigned long long)ip->i_no_addr);
1890         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1891         print_dbg(gi, "    i_flags =");
1892         for (x = 0; x < 32; x++)
1893                 if (test_bit(x, &ip->i_flags))
1894                         print_dbg(gi, " %u", x);
1895         print_dbg(gi, " \n");
1896         return 0;
1897 }
1898
1899 /**
1900  * dump_glock - print information about a glock
1901  * @gl: the glock
1902  * @count: where we are in the buffer
1903  *
1904  * Returns: 0 on success, -ENOBUFS when we run out of space
1905  */
1906
1907 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1908 {
1909         struct gfs2_holder *gh;
1910         unsigned int x;
1911         int error = -ENOBUFS;
1912         struct task_struct *gl_owner;
1913
1914         spin_lock(&gl->gl_spin);
1915
1916         print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1917                    (unsigned long long)gl->gl_name.ln_number);
1918         print_dbg(gi, "  gl_flags =");
1919         for (x = 0; x < 32; x++) {
1920                 if (test_bit(x, &gl->gl_flags))
1921                         print_dbg(gi, " %u", x);
1922         }
1923         if (!test_bit(GLF_LOCK, &gl->gl_flags))
1924                 print_dbg(gi, " (unlocked)");
1925         print_dbg(gi, " \n");
1926         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1927         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1928         if (gl->gl_owner_pid) {
1929                 gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID);
1930                 if (gl_owner)
1931                         print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1932                                   pid_nr(gl->gl_owner_pid), gl_owner->comm);
1933                 else
1934                         print_dbg(gi, "  gl_owner = %d (ended)\n",
1935                                   pid_nr(gl->gl_owner_pid));
1936         } else
1937                 print_dbg(gi, "  gl_owner = -1\n");
1938         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1939         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1940         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1941         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1942         print_dbg(gi, "  reclaim = %s\n",
1943                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1944         if (gl->gl_aspace)
1945                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1946                            gl->gl_aspace->i_mapping->nrpages);
1947         else
1948                 print_dbg(gi, "  aspace = no\n");
1949         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1950         if (gl->gl_req_gh) {
1951                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1952                 if (error)
1953                         goto out;
1954         }
1955         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1956                 error = dump_holder(gi, "Holder", gh);
1957                 if (error)
1958                         goto out;
1959         }
1960         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1961                 error = dump_holder(gi, "Waiter1", gh);
1962                 if (error)
1963                         goto out;
1964         }
1965         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1966                 error = dump_holder(gi, "Waiter3", gh);
1967                 if (error)
1968                         goto out;
1969         }
1970         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1971                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1972                           gl->gl_demote_state, (unsigned long long)
1973                           (jiffies - gl->gl_demote_time)*(1000000/HZ));
1974         }
1975         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1976                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1977                         list_empty(&gl->gl_holders)) {
1978                         error = dump_inode(gi, gl->gl_object);
1979                         if (error)
1980                                 goto out;
1981                 } else {
1982                         error = -ENOBUFS;
1983                         print_dbg(gi, "  Inode: busy\n");
1984                 }
1985         }
1986
1987         error = 0;
1988
1989 out:
1990         spin_unlock(&gl->gl_spin);
1991         return error;
1992 }
1993
1994 /**
1995  * gfs2_dump_lockstate - print out the current lockstate
1996  * @sdp: the filesystem
1997  * @ub: the buffer to copy the information into
1998  *
1999  * If @ub is NULL, dump the lockstate to the console.
2000  *
2001  */
2002
2003 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2004 {
2005         struct gfs2_glock *gl;
2006         struct hlist_node *h;
2007         unsigned int x;
2008         int error = 0;
2009
2010         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2011
2012                 read_lock(gl_lock_addr(x));
2013
2014                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
2015                         if (gl->gl_sbd != sdp)
2016                                 continue;
2017
2018                         error = dump_glock(NULL, gl);
2019                         if (error)
2020                                 break;
2021                 }
2022
2023                 read_unlock(gl_lock_addr(x));
2024
2025                 if (error)
2026                         break;
2027         }
2028
2029
2030         return error;
2031 }
2032
2033 /**
2034  * gfs2_scand - Look for cached glocks and inodes to toss from memory
2035  * @sdp: Pointer to GFS2 superblock
2036  *
2037  * One of these daemons runs, finding candidates to add to sd_reclaim_list.
2038  * See gfs2_glockd()
2039  */
2040
2041 static int gfs2_scand(void *data)
2042 {
2043         unsigned x;
2044         unsigned delay;
2045
2046         while (!kthread_should_stop()) {
2047                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2048                         examine_bucket(scan_glock, NULL, x);
2049                 if (freezing(current))
2050                         refrigerator();
2051                 delay = scand_secs;
2052                 if (delay < 1)
2053                         delay = 1;
2054                 schedule_timeout_interruptible(delay * HZ);
2055         }
2056
2057         return 0;
2058 }
2059
2060
2061
2062 int __init gfs2_glock_init(void)
2063 {
2064         unsigned i;
2065         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2066                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2067         }
2068 #ifdef GL_HASH_LOCK_SZ
2069         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2070                 rwlock_init(&gl_hash_locks[i]);
2071         }
2072 #endif
2073
2074         scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2075         if (IS_ERR(scand_process))
2076                 return PTR_ERR(scand_process);
2077
2078         glock_workqueue = create_workqueue("glock_workqueue");
2079         if (IS_ERR(glock_workqueue)) {
2080                 kthread_stop(scand_process);
2081                 return PTR_ERR(glock_workqueue);
2082         }
2083
2084         return 0;
2085 }
2086
2087 void gfs2_glock_exit(void)
2088 {
2089         destroy_workqueue(glock_workqueue);
2090         kthread_stop(scand_process);
2091 }
2092
2093 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2094 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2095
2096 static int gfs2_glock_iter_next(struct glock_iter *gi)
2097 {
2098         struct gfs2_glock *gl;
2099
2100 restart:
2101         read_lock(gl_lock_addr(gi->hash));
2102         gl = gi->gl;
2103         if (gl) {
2104                 gi->gl = hlist_entry(gl->gl_list.next,
2105                                      struct gfs2_glock, gl_list);
2106                 if (gi->gl)
2107                         gfs2_glock_hold(gi->gl);
2108         }
2109         read_unlock(gl_lock_addr(gi->hash));
2110         if (gl)
2111                 gfs2_glock_put(gl);
2112         if (gl && gi->gl == NULL)
2113                 gi->hash++;
2114         while(gi->gl == NULL) {
2115                 if (gi->hash >= GFS2_GL_HASH_SIZE)
2116                         return 1;
2117                 read_lock(gl_lock_addr(gi->hash));
2118                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2119                                      struct gfs2_glock, gl_list);
2120                 if (gi->gl)
2121                         gfs2_glock_hold(gi->gl);
2122                 read_unlock(gl_lock_addr(gi->hash));
2123                 gi->hash++;
2124         }
2125
2126         if (gi->sdp != gi->gl->gl_sbd)
2127                 goto restart;
2128
2129         return 0;
2130 }
2131
2132 static void gfs2_glock_iter_free(struct glock_iter *gi)
2133 {
2134         if (gi->gl)
2135                 gfs2_glock_put(gi->gl);
2136         kfree(gi);
2137 }
2138
2139 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2140 {
2141         struct glock_iter *gi;
2142
2143         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2144         if (!gi)
2145                 return NULL;
2146
2147         gi->sdp = sdp;
2148         gi->hash = 0;
2149         gi->seq = NULL;
2150         gi->gl = NULL;
2151         memset(gi->string, 0, sizeof(gi->string));
2152
2153         if (gfs2_glock_iter_next(gi)) {
2154                 gfs2_glock_iter_free(gi);
2155                 return NULL;
2156         }
2157
2158         return gi;
2159 }
2160
2161 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2162 {
2163         struct glock_iter *gi;
2164         loff_t n = *pos;
2165
2166         gi = gfs2_glock_iter_init(file->private);
2167         if (!gi)
2168                 return NULL;
2169
2170         while(n--) {
2171                 if (gfs2_glock_iter_next(gi)) {
2172                         gfs2_glock_iter_free(gi);
2173                         return NULL;
2174                 }
2175         }
2176
2177         return gi;
2178 }
2179
2180 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2181                                  loff_t *pos)
2182 {
2183         struct glock_iter *gi = iter_ptr;
2184
2185         (*pos)++;
2186
2187         if (gfs2_glock_iter_next(gi)) {
2188                 gfs2_glock_iter_free(gi);
2189                 return NULL;
2190         }
2191
2192         return gi;
2193 }
2194
2195 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2196 {
2197         struct glock_iter *gi = iter_ptr;
2198         if (gi)
2199                 gfs2_glock_iter_free(gi);
2200 }
2201
2202 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2203 {
2204         struct glock_iter *gi = iter_ptr;
2205
2206         gi->seq = file;
2207         dump_glock(gi, gi->gl);
2208
2209         return 0;
2210 }
2211
2212 static const struct seq_operations gfs2_glock_seq_ops = {
2213         .start = gfs2_glock_seq_start,
2214         .next  = gfs2_glock_seq_next,
2215         .stop  = gfs2_glock_seq_stop,
2216         .show  = gfs2_glock_seq_show,
2217 };
2218
2219 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2220 {
2221         struct seq_file *seq;
2222         int ret;
2223
2224         ret = seq_open(file, &gfs2_glock_seq_ops);
2225         if (ret)
2226                 return ret;
2227
2228         seq = file->private_data;
2229         seq->private = inode->i_private;
2230
2231         return 0;
2232 }
2233
2234 static const struct file_operations gfs2_debug_fops = {
2235         .owner   = THIS_MODULE,
2236         .open    = gfs2_debugfs_open,
2237         .read    = seq_read,
2238         .llseek  = seq_lseek,
2239         .release = seq_release
2240 };
2241
2242 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2243 {
2244         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2245         if (!sdp->debugfs_dir)
2246                 return -ENOMEM;
2247         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2248                                                          S_IFREG | S_IRUGO,
2249                                                          sdp->debugfs_dir, sdp,
2250                                                          &gfs2_debug_fops);
2251         if (!sdp->debugfs_dentry_glocks)
2252                 return -ENOMEM;
2253
2254         return 0;
2255 }
2256
2257 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2258 {
2259         if (sdp && sdp->debugfs_dir) {
2260                 if (sdp->debugfs_dentry_glocks) {
2261                         debugfs_remove(sdp->debugfs_dentry_glocks);
2262                         sdp->debugfs_dentry_glocks = NULL;
2263                 }
2264                 debugfs_remove(sdp->debugfs_dir);
2265                 sdp->debugfs_dir = NULL;
2266         }
2267 }
2268
2269 int gfs2_register_debugfs(void)
2270 {
2271         gfs2_root = debugfs_create_dir("gfs2", NULL);
2272         return gfs2_root ? 0 : -ENOMEM;
2273 }
2274
2275 void gfs2_unregister_debugfs(void)
2276 {
2277         debugfs_remove(gfs2_root);
2278         gfs2_root = NULL;
2279 }