xen: make blkif_getgeo static
[safe/jmp/linux-2.6] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43
44 struct gfs2_gl_hash_bucket {
45         struct hlist_head hb_list;
46 };
47
48 struct glock_iter {
49         int hash;                     /* hash bucket index         */
50         struct gfs2_sbd *sdp;         /* incore superblock         */
51         struct gfs2_glock *gl;        /* current glock struct      */
52         struct seq_file *seq;         /* sequence file for debugfs */
53         char string[512];             /* scratch space             */
54 };
55
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
57
58 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
60 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
61 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
62 static void run_queue(struct gfs2_glock *gl);
63
64 static DECLARE_RWSEM(gfs2_umount_flush_sem);
65 static struct dentry *gfs2_root;
66 static struct task_struct *scand_process;
67 static unsigned int scand_secs = 5;
68 static struct workqueue_struct *glock_workqueue;
69
70 #define GFS2_GL_HASH_SHIFT      15
71 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
72 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
73
74 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
75 static struct dentry *gfs2_root;
76
77 /*
78  * Despite what you might think, the numbers below are not arbitrary :-)
79  * They are taken from the ipv4 routing hash code, which is well tested
80  * and thus should be nearly optimal. Later on we might tweek the numbers
81  * but for now this should be fine.
82  *
83  * The reason for putting the locks in a separate array from the list heads
84  * is that we can have fewer locks than list heads and save memory. We use
85  * the same hash function for both, but with a different hash mask.
86  */
87 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
88         defined(CONFIG_PROVE_LOCKING)
89
90 #ifdef CONFIG_LOCKDEP
91 # define GL_HASH_LOCK_SZ        256
92 #else
93 # if NR_CPUS >= 32
94 #  define GL_HASH_LOCK_SZ       4096
95 # elif NR_CPUS >= 16
96 #  define GL_HASH_LOCK_SZ       2048
97 # elif NR_CPUS >= 8
98 #  define GL_HASH_LOCK_SZ       1024
99 # elif NR_CPUS >= 4
100 #  define GL_HASH_LOCK_SZ       512
101 # else
102 #  define GL_HASH_LOCK_SZ       256
103 # endif
104 #endif
105
106 /* We never want more locks than chains */
107 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
108 # undef GL_HASH_LOCK_SZ
109 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
110 #endif
111
112 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
113
114 static inline rwlock_t *gl_lock_addr(unsigned int x)
115 {
116         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
117 }
118 #else /* not SMP, so no spinlocks required */
119 static inline rwlock_t *gl_lock_addr(unsigned int x)
120 {
121         return NULL;
122 }
123 #endif
124
125 /**
126  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
127  * @actual: the current state of the lock
128  * @requested: the lock state that was requested by the caller
129  * @flags: the modifier flags passed in by the caller
130  *
131  * Returns: 1 if the locks are compatible, 0 otherwise
132  */
133
134 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
135                                    int flags)
136 {
137         if (actual == requested)
138                 return 1;
139
140         if (flags & GL_EXACT)
141                 return 0;
142
143         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
144                 return 1;
145
146         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
147                 return 1;
148
149         return 0;
150 }
151
152 /**
153  * gl_hash() - Turn glock number into hash bucket number
154  * @lock: The glock number
155  *
156  * Returns: The number of the corresponding hash bucket
157  */
158
159 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
160                             const struct lm_lockname *name)
161 {
162         unsigned int h;
163
164         h = jhash(&name->ln_number, sizeof(u64), 0);
165         h = jhash(&name->ln_type, sizeof(unsigned int), h);
166         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
167         h &= GFS2_GL_HASH_MASK;
168
169         return h;
170 }
171
172 /**
173  * glock_free() - Perform a few checks and then release struct gfs2_glock
174  * @gl: The glock to release
175  *
176  * Also calls lock module to release its internal structure for this glock.
177  *
178  */
179
180 static void glock_free(struct gfs2_glock *gl)
181 {
182         struct gfs2_sbd *sdp = gl->gl_sbd;
183         struct inode *aspace = gl->gl_aspace;
184
185         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
186                 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
187
188         if (aspace)
189                 gfs2_aspace_put(aspace);
190
191         kmem_cache_free(gfs2_glock_cachep, gl);
192 }
193
194 /**
195  * gfs2_glock_hold() - increment reference count on glock
196  * @gl: The glock to hold
197  *
198  */
199
200 static void gfs2_glock_hold(struct gfs2_glock *gl)
201 {
202         atomic_inc(&gl->gl_ref);
203 }
204
205 /**
206  * gfs2_glock_put() - Decrement reference count on glock
207  * @gl: The glock to put
208  *
209  */
210
211 int gfs2_glock_put(struct gfs2_glock *gl)
212 {
213         int rv = 0;
214         struct gfs2_sbd *sdp = gl->gl_sbd;
215
216         write_lock(gl_lock_addr(gl->gl_hash));
217         if (atomic_dec_and_test(&gl->gl_ref)) {
218                 hlist_del(&gl->gl_list);
219                 write_unlock(gl_lock_addr(gl->gl_hash));
220                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
221                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
222                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
223                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
224                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
225                 glock_free(gl);
226                 rv = 1;
227                 goto out;
228         }
229         write_unlock(gl_lock_addr(gl->gl_hash));
230 out:
231         return rv;
232 }
233
234 /**
235  * search_bucket() - Find struct gfs2_glock by lock number
236  * @bucket: the bucket to search
237  * @name: The lock name
238  *
239  * Returns: NULL, or the struct gfs2_glock with the requested number
240  */
241
242 static struct gfs2_glock *search_bucket(unsigned int hash,
243                                         const struct gfs2_sbd *sdp,
244                                         const struct lm_lockname *name)
245 {
246         struct gfs2_glock *gl;
247         struct hlist_node *h;
248
249         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
250                 if (!lm_name_equal(&gl->gl_name, name))
251                         continue;
252                 if (gl->gl_sbd != sdp)
253                         continue;
254
255                 atomic_inc(&gl->gl_ref);
256
257                 return gl;
258         }
259
260         return NULL;
261 }
262
263 /**
264  * gfs2_glock_find() - Find glock by lock number
265  * @sdp: The GFS2 superblock
266  * @name: The lock name
267  *
268  * Returns: NULL, or the struct gfs2_glock with the requested number
269  */
270
271 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
272                                           const struct lm_lockname *name)
273 {
274         unsigned int hash = gl_hash(sdp, name);
275         struct gfs2_glock *gl;
276
277         read_lock(gl_lock_addr(hash));
278         gl = search_bucket(hash, sdp, name);
279         read_unlock(gl_lock_addr(hash));
280
281         return gl;
282 }
283
284 static void glock_work_func(struct work_struct *work)
285 {
286         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
287
288         spin_lock(&gl->gl_spin);
289         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
290                 set_bit(GLF_DEMOTE, &gl->gl_flags);
291         run_queue(gl);
292         spin_unlock(&gl->gl_spin);
293         gfs2_glock_put(gl);
294 }
295
296 static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
297                      void **lockp)
298 {
299         int error = -EIO;
300         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
301                 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
302                                 sdp->sd_lockstruct.ls_lockspace, name, lockp);
303         return error;
304 }
305
306 /**
307  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
308  * @sdp: The GFS2 superblock
309  * @number: the lock number
310  * @glops: The glock_operations to use
311  * @create: If 0, don't create the glock if it doesn't exist
312  * @glp: the glock is returned here
313  *
314  * This does not lock a glock, just finds/creates structures for one.
315  *
316  * Returns: errno
317  */
318
319 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
320                    const struct gfs2_glock_operations *glops, int create,
321                    struct gfs2_glock **glp)
322 {
323         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
324         struct gfs2_glock *gl, *tmp;
325         unsigned int hash = gl_hash(sdp, &name);
326         int error;
327
328         read_lock(gl_lock_addr(hash));
329         gl = search_bucket(hash, sdp, &name);
330         read_unlock(gl_lock_addr(hash));
331
332         if (gl || !create) {
333                 *glp = gl;
334                 return 0;
335         }
336
337         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
338         if (!gl)
339                 return -ENOMEM;
340
341         gl->gl_flags = 0;
342         gl->gl_name = name;
343         atomic_set(&gl->gl_ref, 1);
344         gl->gl_state = LM_ST_UNLOCKED;
345         gl->gl_demote_state = LM_ST_EXCLUSIVE;
346         gl->gl_hash = hash;
347         gl->gl_owner_pid = NULL;
348         gl->gl_ip = 0;
349         gl->gl_ops = glops;
350         gl->gl_req_gh = NULL;
351         gl->gl_stamp = jiffies;
352         gl->gl_tchange = jiffies;
353         gl->gl_object = NULL;
354         gl->gl_sbd = sdp;
355         gl->gl_aspace = NULL;
356         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
357
358         /* If this glock protects actual on-disk data or metadata blocks,
359            create a VFS inode to manage the pages/buffers holding them. */
360         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
361                 gl->gl_aspace = gfs2_aspace_get(sdp);
362                 if (!gl->gl_aspace) {
363                         error = -ENOMEM;
364                         goto fail;
365                 }
366         }
367
368         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
369         if (error)
370                 goto fail_aspace;
371
372         write_lock(gl_lock_addr(hash));
373         tmp = search_bucket(hash, sdp, &name);
374         if (tmp) {
375                 write_unlock(gl_lock_addr(hash));
376                 glock_free(gl);
377                 gl = tmp;
378         } else {
379                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
380                 write_unlock(gl_lock_addr(hash));
381         }
382
383         *glp = gl;
384
385         return 0;
386
387 fail_aspace:
388         if (gl->gl_aspace)
389                 gfs2_aspace_put(gl->gl_aspace);
390 fail:
391         kmem_cache_free(gfs2_glock_cachep, gl);
392         return error;
393 }
394
395 /**
396  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
397  * @gl: the glock
398  * @state: the state we're requesting
399  * @flags: the modifier flags
400  * @gh: the holder structure
401  *
402  */
403
404 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
405                       struct gfs2_holder *gh)
406 {
407         INIT_LIST_HEAD(&gh->gh_list);
408         gh->gh_gl = gl;
409         gh->gh_ip = (unsigned long)__builtin_return_address(0);
410         gh->gh_owner_pid = get_pid(task_pid(current));
411         gh->gh_state = state;
412         gh->gh_flags = flags;
413         gh->gh_error = 0;
414         gh->gh_iflags = 0;
415         gfs2_glock_hold(gl);
416 }
417
418 /**
419  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
420  * @state: the state we're requesting
421  * @flags: the modifier flags
422  * @gh: the holder structure
423  *
424  * Don't mess with the glock.
425  *
426  */
427
428 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
429 {
430         gh->gh_state = state;
431         gh->gh_flags = flags;
432         gh->gh_iflags = 0;
433         gh->gh_ip = (unsigned long)__builtin_return_address(0);
434 }
435
436 /**
437  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
438  * @gh: the holder structure
439  *
440  */
441
442 void gfs2_holder_uninit(struct gfs2_holder *gh)
443 {
444         put_pid(gh->gh_owner_pid);
445         gfs2_glock_put(gh->gh_gl);
446         gh->gh_gl = NULL;
447         gh->gh_ip = 0;
448 }
449
450 static void gfs2_holder_wake(struct gfs2_holder *gh)
451 {
452         clear_bit(HIF_WAIT, &gh->gh_iflags);
453         smp_mb__after_clear_bit();
454         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
455 }
456
457 static int just_schedule(void *word)
458 {
459         schedule();
460         return 0;
461 }
462
463 static void wait_on_holder(struct gfs2_holder *gh)
464 {
465         might_sleep();
466         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
467 }
468
469 static void gfs2_demote_wake(struct gfs2_glock *gl)
470 {
471         gl->gl_demote_state = LM_ST_EXCLUSIVE;
472         clear_bit(GLF_DEMOTE, &gl->gl_flags);
473         smp_mb__after_clear_bit();
474         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
475 }
476
477 static void wait_on_demote(struct gfs2_glock *gl)
478 {
479         might_sleep();
480         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
481 }
482
483 /**
484  * rq_mutex - process a mutex request in the queue
485  * @gh: the glock holder
486  *
487  * Returns: 1 if the queue is blocked
488  */
489
490 static int rq_mutex(struct gfs2_holder *gh)
491 {
492         struct gfs2_glock *gl = gh->gh_gl;
493
494         list_del_init(&gh->gh_list);
495         /*  gh->gh_error never examined.  */
496         set_bit(GLF_LOCK, &gl->gl_flags);
497         clear_bit(HIF_WAIT, &gh->gh_iflags);
498         smp_mb();
499         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
500
501         return 1;
502 }
503
504 /**
505  * rq_promote - process a promote request in the queue
506  * @gh: the glock holder
507  *
508  * Acquire a new inter-node lock, or change a lock state to more restrictive.
509  *
510  * Returns: 1 if the queue is blocked
511  */
512
513 static int rq_promote(struct gfs2_holder *gh)
514 {
515         struct gfs2_glock *gl = gh->gh_gl;
516
517         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
518                 if (list_empty(&gl->gl_holders)) {
519                         gl->gl_req_gh = gh;
520                         set_bit(GLF_LOCK, &gl->gl_flags);
521                         spin_unlock(&gl->gl_spin);
522                         gfs2_glock_xmote_th(gh->gh_gl, gh);
523                         spin_lock(&gl->gl_spin);
524                 }
525                 return 1;
526         }
527
528         if (list_empty(&gl->gl_holders)) {
529                 set_bit(HIF_FIRST, &gh->gh_iflags);
530                 set_bit(GLF_LOCK, &gl->gl_flags);
531         } else {
532                 struct gfs2_holder *next_gh;
533                 if (gh->gh_state == LM_ST_EXCLUSIVE)
534                         return 1;
535                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
536                                      gh_list);
537                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
538                          return 1;
539         }
540
541         list_move_tail(&gh->gh_list, &gl->gl_holders);
542         gh->gh_error = 0;
543         set_bit(HIF_HOLDER, &gh->gh_iflags);
544
545         gfs2_holder_wake(gh);
546
547         return 0;
548 }
549
550 /**
551  * rq_demote - process a demote request in the queue
552  * @gh: the glock holder
553  *
554  * Returns: 1 if the queue is blocked
555  */
556
557 static int rq_demote(struct gfs2_glock *gl)
558 {
559         if (!list_empty(&gl->gl_holders))
560                 return 1;
561
562         if (gl->gl_state == gl->gl_demote_state ||
563             gl->gl_state == LM_ST_UNLOCKED) {
564                 gfs2_demote_wake(gl);
565                 return 0;
566         }
567
568         set_bit(GLF_LOCK, &gl->gl_flags);
569         set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
570
571         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
572             gl->gl_state != LM_ST_EXCLUSIVE) {
573                 spin_unlock(&gl->gl_spin);
574                 gfs2_glock_drop_th(gl);
575         } else {
576                 spin_unlock(&gl->gl_spin);
577                 gfs2_glock_xmote_th(gl, NULL);
578         }
579
580         spin_lock(&gl->gl_spin);
581         clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
582
583         return 0;
584 }
585
586 /**
587  * run_queue - process holder structures on a glock
588  * @gl: the glock
589  *
590  */
591 static void run_queue(struct gfs2_glock *gl)
592 {
593         struct gfs2_holder *gh;
594         int blocked = 1;
595
596         for (;;) {
597                 if (test_bit(GLF_LOCK, &gl->gl_flags))
598                         break;
599
600                 if (!list_empty(&gl->gl_waiters1)) {
601                         gh = list_entry(gl->gl_waiters1.next,
602                                         struct gfs2_holder, gh_list);
603                         blocked = rq_mutex(gh);
604                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
605                         blocked = rq_demote(gl);
606                         if (test_bit(GLF_WAITERS2, &gl->gl_flags) &&
607                                      !blocked) {
608                                 set_bit(GLF_DEMOTE, &gl->gl_flags);
609                                 gl->gl_demote_state = LM_ST_UNLOCKED;
610                         }
611                         clear_bit(GLF_WAITERS2, &gl->gl_flags);
612                 } else if (!list_empty(&gl->gl_waiters3)) {
613                         gh = list_entry(gl->gl_waiters3.next,
614                                         struct gfs2_holder, gh_list);
615                         blocked = rq_promote(gh);
616                 } else
617                         break;
618
619                 if (blocked)
620                         break;
621         }
622 }
623
624 /**
625  * gfs2_glmutex_lock - acquire a local lock on a glock
626  * @gl: the glock
627  *
628  * Gives caller exclusive access to manipulate a glock structure.
629  */
630
631 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
632 {
633         spin_lock(&gl->gl_spin);
634         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
635                 struct gfs2_holder gh;
636
637                 gfs2_holder_init(gl, 0, 0, &gh);
638                 set_bit(HIF_WAIT, &gh.gh_iflags);
639                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
640                 spin_unlock(&gl->gl_spin);
641                 wait_on_holder(&gh);
642                 gfs2_holder_uninit(&gh);
643         } else {
644                 gl->gl_owner_pid = get_pid(task_pid(current));
645                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
646                 spin_unlock(&gl->gl_spin);
647         }
648 }
649
650 /**
651  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
652  * @gl: the glock
653  *
654  * Returns: 1 if the glock is acquired
655  */
656
657 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
658 {
659         int acquired = 1;
660
661         spin_lock(&gl->gl_spin);
662         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
663                 acquired = 0;
664         } else {
665                 gl->gl_owner_pid = get_pid(task_pid(current));
666                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
667         }
668         spin_unlock(&gl->gl_spin);
669
670         return acquired;
671 }
672
673 /**
674  * gfs2_glmutex_unlock - release a local lock on a glock
675  * @gl: the glock
676  *
677  */
678
679 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
680 {
681         struct pid *pid;
682
683         spin_lock(&gl->gl_spin);
684         clear_bit(GLF_LOCK, &gl->gl_flags);
685         pid = gl->gl_owner_pid;
686         gl->gl_owner_pid = NULL;
687         gl->gl_ip = 0;
688         run_queue(gl);
689         spin_unlock(&gl->gl_spin);
690
691         put_pid(pid);
692 }
693
694 /**
695  * handle_callback - process a demote request
696  * @gl: the glock
697  * @state: the state the caller wants us to change to
698  *
699  * There are only two requests that we are going to see in actual
700  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
701  */
702
703 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
704                             int remote, unsigned long delay)
705 {
706         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
707
708         spin_lock(&gl->gl_spin);
709         set_bit(bit, &gl->gl_flags);
710         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
711                 gl->gl_demote_state = state;
712                 gl->gl_demote_time = jiffies;
713                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
714                     gl->gl_object) {
715                         gfs2_glock_schedule_for_reclaim(gl);
716                         spin_unlock(&gl->gl_spin);
717                         return;
718                 }
719         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
720                         gl->gl_demote_state != state) {
721                 if (test_bit(GLF_DEMOTE_IN_PROGRESS,  &gl->gl_flags)) 
722                         set_bit(GLF_WAITERS2, &gl->gl_flags);
723                 else 
724                         gl->gl_demote_state = LM_ST_UNLOCKED;
725         }
726         spin_unlock(&gl->gl_spin);
727 }
728
729 /**
730  * state_change - record that the glock is now in a different state
731  * @gl: the glock
732  * @new_state the new state
733  *
734  */
735
736 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
737 {
738         int held1, held2;
739
740         held1 = (gl->gl_state != LM_ST_UNLOCKED);
741         held2 = (new_state != LM_ST_UNLOCKED);
742
743         if (held1 != held2) {
744                 if (held2)
745                         gfs2_glock_hold(gl);
746                 else
747                         gfs2_glock_put(gl);
748         }
749
750         gl->gl_state = new_state;
751         gl->gl_tchange = jiffies;
752 }
753
754 /**
755  * drop_bh - Called after a lock module unlock completes
756  * @gl: the glock
757  * @ret: the return status
758  *
759  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
760  * Doesn't drop the reference on the glock the top half took out
761  *
762  */
763
764 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
765 {
766         struct gfs2_sbd *sdp = gl->gl_sbd;
767         struct gfs2_holder *gh = gl->gl_req_gh;
768
769         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
770         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
771         gfs2_assert_warn(sdp, !ret);
772
773         state_change(gl, LM_ST_UNLOCKED);
774
775         if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) {
776                 spin_lock(&gl->gl_spin);
777                 gh->gh_error = 0;
778                 spin_unlock(&gl->gl_spin);
779                 gfs2_glock_xmote_th(gl, gl->gl_req_gh);
780                 gfs2_glock_put(gl);
781                 return;
782         }
783
784         spin_lock(&gl->gl_spin);
785         gfs2_demote_wake(gl);
786         clear_bit(GLF_LOCK, &gl->gl_flags);
787         spin_unlock(&gl->gl_spin);
788         gfs2_glock_put(gl);
789 }
790
791 /**
792  * xmote_bh - Called after the lock module is done acquiring a lock
793  * @gl: The glock in question
794  * @ret: the int returned from the lock module
795  *
796  */
797
798 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
799 {
800         struct gfs2_sbd *sdp = gl->gl_sbd;
801         const struct gfs2_glock_operations *glops = gl->gl_ops;
802         struct gfs2_holder *gh = gl->gl_req_gh;
803         int op_done = 1;
804
805         if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
806                 drop_bh(gl, ret);
807                 return;
808         }
809
810         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
811         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
812         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
813
814         state_change(gl, ret & LM_OUT_ST_MASK);
815
816         /*  Deal with each possible exit condition  */
817
818         if (!gh) {
819                 gl->gl_stamp = jiffies;
820                 if (ret & LM_OUT_CANCELED) {
821                         op_done = 0;
822                 } else {
823                         spin_lock(&gl->gl_spin);
824                         if (gl->gl_state != gl->gl_demote_state) {
825                                 spin_unlock(&gl->gl_spin);
826                                 gfs2_glock_drop_th(gl);
827                                 gfs2_glock_put(gl);
828                                 return;
829                         }
830                         gfs2_demote_wake(gl);
831                         spin_unlock(&gl->gl_spin);
832                 }
833         } else {
834                 spin_lock(&gl->gl_spin);
835                 if (ret & LM_OUT_CONV_DEADLK) {
836                         gh->gh_error = 0;
837                         set_bit(GLF_CONV_DEADLK, &gl->gl_flags);
838                         spin_unlock(&gl->gl_spin);
839                         gfs2_glock_drop_th(gl);
840                         gfs2_glock_put(gl);
841                         return;
842                 }
843                 list_del_init(&gh->gh_list);
844                 gh->gh_error = -EIO;
845                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
846                         goto out;
847                 gh->gh_error = GLR_CANCELED;
848                 if (ret & LM_OUT_CANCELED) 
849                         goto out;
850                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
851                         list_add_tail(&gh->gh_list, &gl->gl_holders);
852                         gh->gh_error = 0;
853                         set_bit(HIF_HOLDER, &gh->gh_iflags);
854                         set_bit(HIF_FIRST, &gh->gh_iflags);
855                         op_done = 0;
856                         goto out;
857                 }
858                 gh->gh_error = GLR_TRYFAILED;
859                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
860                         goto out;
861                 gh->gh_error = -EINVAL;
862                 if (gfs2_assert_withdraw(sdp, 0) == -1)
863                         fs_err(sdp, "ret = 0x%.8X\n", ret);
864 out:
865                 spin_unlock(&gl->gl_spin);
866         }
867
868         if (glops->go_xmote_bh)
869                 glops->go_xmote_bh(gl);
870
871         if (op_done) {
872                 spin_lock(&gl->gl_spin);
873                 gl->gl_req_gh = NULL;
874                 clear_bit(GLF_LOCK, &gl->gl_flags);
875                 spin_unlock(&gl->gl_spin);
876         }
877
878         gfs2_glock_put(gl);
879
880         if (gh)
881                 gfs2_holder_wake(gh);
882 }
883
884 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
885                                  unsigned int cur_state, unsigned int req_state,
886                                  unsigned int flags)
887 {
888         int ret = 0;
889         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
890                 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
891                                                          req_state, flags);
892         return ret;
893 }
894
895 /**
896  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
897  * @gl: The glock in question
898  * @state: the requested state
899  * @flags: modifier flags to the lock call
900  *
901  */
902
903 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
904 {
905         struct gfs2_sbd *sdp = gl->gl_sbd;
906         int flags = gh ? gh->gh_flags : 0;
907         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
908         const struct gfs2_glock_operations *glops = gl->gl_ops;
909         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
910                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
911                                  LM_FLAG_PRIORITY);
912         unsigned int lck_ret;
913
914         if (glops->go_xmote_th)
915                 glops->go_xmote_th(gl);
916         if (state == LM_ST_DEFERRED && glops->go_inval)
917                 glops->go_inval(gl, DIO_METADATA);
918
919         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
920         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
921         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
922         gfs2_assert_warn(sdp, state != gl->gl_state);
923
924         gfs2_glock_hold(gl);
925
926         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
927
928         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
929                 return;
930
931         if (lck_ret & LM_OUT_ASYNC)
932                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
933         else
934                 xmote_bh(gl, lck_ret);
935 }
936
937 static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
938                                    unsigned int cur_state)
939 {
940         int ret = 0;
941         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
942                 ret =  sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
943         return ret;
944 }
945
946 /**
947  * gfs2_glock_drop_th - call into the lock module to unlock a lock
948  * @gl: the glock
949  *
950  */
951
952 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
953 {
954         struct gfs2_sbd *sdp = gl->gl_sbd;
955         const struct gfs2_glock_operations *glops = gl->gl_ops;
956         unsigned int ret;
957
958         if (glops->go_xmote_th)
959                 glops->go_xmote_th(gl);
960         if (glops->go_inval)
961                 glops->go_inval(gl, DIO_METADATA);
962
963         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
964         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
965         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
966
967         gfs2_glock_hold(gl);
968
969         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
970
971         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
972                 return;
973
974         if (!ret)
975                 drop_bh(gl, ret);
976         else
977                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
978 }
979
980 /**
981  * do_cancels - cancel requests for locks stuck waiting on an expire flag
982  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
983  *
984  * Don't cancel GL_NOCANCEL requests.
985  */
986
987 static void do_cancels(struct gfs2_holder *gh)
988 {
989         struct gfs2_glock *gl = gh->gh_gl;
990         struct gfs2_sbd *sdp = gl->gl_sbd;
991
992         spin_lock(&gl->gl_spin);
993
994         while (gl->gl_req_gh != gh &&
995                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
996                !list_empty(&gh->gh_list)) {
997                 if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
998                         spin_unlock(&gl->gl_spin);
999                         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1000                                 sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
1001                         msleep(100);
1002                         spin_lock(&gl->gl_spin);
1003                 } else {
1004                         spin_unlock(&gl->gl_spin);
1005                         msleep(100);
1006                         spin_lock(&gl->gl_spin);
1007                 }
1008         }
1009
1010         spin_unlock(&gl->gl_spin);
1011 }
1012
1013 /**
1014  * glock_wait_internal - wait on a glock acquisition
1015  * @gh: the glock holder
1016  *
1017  * Returns: 0 on success
1018  */
1019
1020 static int glock_wait_internal(struct gfs2_holder *gh)
1021 {
1022         struct gfs2_glock *gl = gh->gh_gl;
1023         struct gfs2_sbd *sdp = gl->gl_sbd;
1024         const struct gfs2_glock_operations *glops = gl->gl_ops;
1025
1026         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1027                 return -EIO;
1028
1029         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1030                 spin_lock(&gl->gl_spin);
1031                 if (gl->gl_req_gh != gh &&
1032                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1033                     !list_empty(&gh->gh_list)) {
1034                         list_del_init(&gh->gh_list);
1035                         gh->gh_error = GLR_TRYFAILED;
1036                         run_queue(gl);
1037                         spin_unlock(&gl->gl_spin);
1038                         return gh->gh_error;
1039                 }
1040                 spin_unlock(&gl->gl_spin);
1041         }
1042
1043         if (gh->gh_flags & LM_FLAG_PRIORITY)
1044                 do_cancels(gh);
1045
1046         wait_on_holder(gh);
1047         if (gh->gh_error)
1048                 return gh->gh_error;
1049
1050         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1051         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1052                                                    gh->gh_flags));
1053
1054         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1055                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1056
1057                 if (glops->go_lock) {
1058                         gh->gh_error = glops->go_lock(gh);
1059                         if (gh->gh_error) {
1060                                 spin_lock(&gl->gl_spin);
1061                                 list_del_init(&gh->gh_list);
1062                                 spin_unlock(&gl->gl_spin);
1063                         }
1064                 }
1065
1066                 spin_lock(&gl->gl_spin);
1067                 gl->gl_req_gh = NULL;
1068                 clear_bit(GLF_LOCK, &gl->gl_flags);
1069                 run_queue(gl);
1070                 spin_unlock(&gl->gl_spin);
1071         }
1072
1073         return gh->gh_error;
1074 }
1075
1076 static inline struct gfs2_holder *
1077 find_holder_by_owner(struct list_head *head, struct pid *pid)
1078 {
1079         struct gfs2_holder *gh;
1080
1081         list_for_each_entry(gh, head, gh_list) {
1082                 if (gh->gh_owner_pid == pid)
1083                         return gh;
1084         }
1085
1086         return NULL;
1087 }
1088
1089 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1090 {
1091         va_list args;
1092
1093         va_start(args, fmt);
1094         if (gi) {
1095                 vsprintf(gi->string, fmt, args);
1096                 seq_printf(gi->seq, gi->string);
1097         }
1098         else
1099                 vprintk(fmt, args);
1100         va_end(args);
1101 }
1102
1103 /**
1104  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1105  * @gh: the holder structure to add
1106  *
1107  */
1108
1109 static void add_to_queue(struct gfs2_holder *gh)
1110 {
1111         struct gfs2_glock *gl = gh->gh_gl;
1112         struct gfs2_holder *existing;
1113
1114         BUG_ON(gh->gh_owner_pid == NULL);
1115         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1116                 BUG();
1117
1118         if (!(gh->gh_flags & GL_FLOCK)) {
1119                 existing = find_holder_by_owner(&gl->gl_holders, 
1120                                                 gh->gh_owner_pid);
1121                 if (existing) {
1122                         print_symbol(KERN_WARNING "original: %s\n", 
1123                                      existing->gh_ip);
1124                         printk(KERN_INFO "pid : %d\n",
1125                                         pid_nr(existing->gh_owner_pid));
1126                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1127                                existing->gh_gl->gl_name.ln_type, 
1128                                existing->gh_gl->gl_state);
1129                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1130                         printk(KERN_INFO "pid : %d\n",
1131                                         pid_nr(gh->gh_owner_pid));
1132                         printk(KERN_INFO "lock type : %d lock state : %d\n",
1133                                gl->gl_name.ln_type, gl->gl_state);
1134                         BUG();
1135                 }
1136                 
1137                 existing = find_holder_by_owner(&gl->gl_waiters3, 
1138                                                 gh->gh_owner_pid);
1139                 if (existing) {
1140                         print_symbol(KERN_WARNING "original: %s\n", 
1141                                      existing->gh_ip);
1142                         print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1143                         BUG();
1144                 }
1145         }
1146
1147         if (gh->gh_flags & LM_FLAG_PRIORITY)
1148                 list_add(&gh->gh_list, &gl->gl_waiters3);
1149         else
1150                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1151 }
1152
1153 /**
1154  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1155  * @gh: the holder structure
1156  *
1157  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1158  *
1159  * Returns: 0, GLR_TRYFAILED, or errno on failure
1160  */
1161
1162 int gfs2_glock_nq(struct gfs2_holder *gh)
1163 {
1164         struct gfs2_glock *gl = gh->gh_gl;
1165         struct gfs2_sbd *sdp = gl->gl_sbd;
1166         int error = 0;
1167
1168 restart:
1169         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1170                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1171                 return -EIO;
1172         }
1173
1174         spin_lock(&gl->gl_spin);
1175         add_to_queue(gh);
1176         run_queue(gl);
1177         spin_unlock(&gl->gl_spin);
1178
1179         if (!(gh->gh_flags & GL_ASYNC)) {
1180                 error = glock_wait_internal(gh);
1181                 if (error == GLR_CANCELED) {
1182                         msleep(100);
1183                         goto restart;
1184                 }
1185         }
1186
1187         return error;
1188 }
1189
1190 /**
1191  * gfs2_glock_poll - poll to see if an async request has been completed
1192  * @gh: the holder
1193  *
1194  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1195  */
1196
1197 int gfs2_glock_poll(struct gfs2_holder *gh)
1198 {
1199         struct gfs2_glock *gl = gh->gh_gl;
1200         int ready = 0;
1201
1202         spin_lock(&gl->gl_spin);
1203
1204         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1205                 ready = 1;
1206         else if (list_empty(&gh->gh_list)) {
1207                 if (gh->gh_error == GLR_CANCELED) {
1208                         spin_unlock(&gl->gl_spin);
1209                         msleep(100);
1210                         if (gfs2_glock_nq(gh))
1211                                 return 1;
1212                         return 0;
1213                 } else
1214                         ready = 1;
1215         }
1216
1217         spin_unlock(&gl->gl_spin);
1218
1219         return ready;
1220 }
1221
1222 /**
1223  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1224  * @gh: the holder structure
1225  *
1226  * Returns: 0, GLR_TRYFAILED, or errno on failure
1227  */
1228
1229 int gfs2_glock_wait(struct gfs2_holder *gh)
1230 {
1231         int error;
1232
1233         error = glock_wait_internal(gh);
1234         if (error == GLR_CANCELED) {
1235                 msleep(100);
1236                 gh->gh_flags &= ~GL_ASYNC;
1237                 error = gfs2_glock_nq(gh);
1238         }
1239
1240         return error;
1241 }
1242
1243 /**
1244  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1245  * @gh: the glock holder
1246  *
1247  */
1248
1249 void gfs2_glock_dq(struct gfs2_holder *gh)
1250 {
1251         struct gfs2_glock *gl = gh->gh_gl;
1252         const struct gfs2_glock_operations *glops = gl->gl_ops;
1253         unsigned delay = 0;
1254
1255         if (gh->gh_flags & GL_NOCACHE)
1256                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1257
1258         gfs2_glmutex_lock(gl);
1259
1260         spin_lock(&gl->gl_spin);
1261         list_del_init(&gh->gh_list);
1262
1263         if (list_empty(&gl->gl_holders)) {
1264                 if (glops->go_unlock) {
1265                         spin_unlock(&gl->gl_spin);
1266                         glops->go_unlock(gh);
1267                         spin_lock(&gl->gl_spin);
1268                 }
1269                 gl->gl_stamp = jiffies;
1270         }
1271
1272         clear_bit(GLF_LOCK, &gl->gl_flags);
1273         spin_unlock(&gl->gl_spin);
1274
1275         gfs2_glock_hold(gl);
1276         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1277             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1278                 delay = gl->gl_ops->go_min_hold_time;
1279         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1280                 gfs2_glock_put(gl);
1281 }
1282
1283 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1284 {
1285         struct gfs2_glock *gl = gh->gh_gl;
1286         gfs2_glock_dq(gh);
1287         wait_on_demote(gl);
1288 }
1289
1290 /**
1291  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1292  * @gh: the holder structure
1293  *
1294  */
1295
1296 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1297 {
1298         gfs2_glock_dq(gh);
1299         gfs2_holder_uninit(gh);
1300 }
1301
1302 /**
1303  * gfs2_glock_nq_num - acquire a glock based on lock number
1304  * @sdp: the filesystem
1305  * @number: the lock number
1306  * @glops: the glock operations for the type of glock
1307  * @state: the state to acquire the glock in
1308  * @flags: modifier flags for the aquisition
1309  * @gh: the struct gfs2_holder
1310  *
1311  * Returns: errno
1312  */
1313
1314 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1315                       const struct gfs2_glock_operations *glops,
1316                       unsigned int state, int flags, struct gfs2_holder *gh)
1317 {
1318         struct gfs2_glock *gl;
1319         int error;
1320
1321         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1322         if (!error) {
1323                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1324                 gfs2_glock_put(gl);
1325         }
1326
1327         return error;
1328 }
1329
1330 /**
1331  * glock_compare - Compare two struct gfs2_glock structures for sorting
1332  * @arg_a: the first structure
1333  * @arg_b: the second structure
1334  *
1335  */
1336
1337 static int glock_compare(const void *arg_a, const void *arg_b)
1338 {
1339         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1340         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1341         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1342         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1343
1344         if (a->ln_number > b->ln_number)
1345                 return 1;
1346         if (a->ln_number < b->ln_number)
1347                 return -1;
1348         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1349         return 0;
1350 }
1351
1352 /**
1353  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1354  * @num_gh: the number of structures
1355  * @ghs: an array of struct gfs2_holder structures
1356  *
1357  * Returns: 0 on success (all glocks acquired),
1358  *          errno on failure (no glocks acquired)
1359  */
1360
1361 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1362                      struct gfs2_holder **p)
1363 {
1364         unsigned int x;
1365         int error = 0;
1366
1367         for (x = 0; x < num_gh; x++)
1368                 p[x] = &ghs[x];
1369
1370         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1371
1372         for (x = 0; x < num_gh; x++) {
1373                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1374
1375                 error = gfs2_glock_nq(p[x]);
1376                 if (error) {
1377                         while (x--)
1378                                 gfs2_glock_dq(p[x]);
1379                         break;
1380                 }
1381         }
1382
1383         return error;
1384 }
1385
1386 /**
1387  * gfs2_glock_nq_m - acquire multiple glocks
1388  * @num_gh: the number of structures
1389  * @ghs: an array of struct gfs2_holder structures
1390  *
1391  *
1392  * Returns: 0 on success (all glocks acquired),
1393  *          errno on failure (no glocks acquired)
1394  */
1395
1396 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1397 {
1398         struct gfs2_holder *tmp[4];
1399         struct gfs2_holder **pph = tmp;
1400         int error = 0;
1401
1402         switch(num_gh) {
1403         case 0:
1404                 return 0;
1405         case 1:
1406                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1407                 return gfs2_glock_nq(ghs);
1408         default:
1409                 if (num_gh <= 4)
1410                         break;
1411                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1412                 if (!pph)
1413                         return -ENOMEM;
1414         }
1415
1416         error = nq_m_sync(num_gh, ghs, pph);
1417
1418         if (pph != tmp)
1419                 kfree(pph);
1420
1421         return error;
1422 }
1423
1424 /**
1425  * gfs2_glock_dq_m - release multiple glocks
1426  * @num_gh: the number of structures
1427  * @ghs: an array of struct gfs2_holder structures
1428  *
1429  */
1430
1431 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1432 {
1433         unsigned int x;
1434
1435         for (x = 0; x < num_gh; x++)
1436                 gfs2_glock_dq(&ghs[x]);
1437 }
1438
1439 /**
1440  * gfs2_glock_dq_uninit_m - release multiple glocks
1441  * @num_gh: the number of structures
1442  * @ghs: an array of struct gfs2_holder structures
1443  *
1444  */
1445
1446 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1447 {
1448         unsigned int x;
1449
1450         for (x = 0; x < num_gh; x++)
1451                 gfs2_glock_dq_uninit(&ghs[x]);
1452 }
1453
1454 static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
1455 {
1456         int error = -EIO;
1457         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1458                 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
1459         return error;
1460 }
1461
1462 /**
1463  * gfs2_lvb_hold - attach a LVB from a glock
1464  * @gl: The glock in question
1465  *
1466  */
1467
1468 int gfs2_lvb_hold(struct gfs2_glock *gl)
1469 {
1470         int error;
1471
1472         gfs2_glmutex_lock(gl);
1473
1474         if (!atomic_read(&gl->gl_lvb_count)) {
1475                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1476                 if (error) {
1477                         gfs2_glmutex_unlock(gl);
1478                         return error;
1479                 }
1480                 gfs2_glock_hold(gl);
1481         }
1482         atomic_inc(&gl->gl_lvb_count);
1483
1484         gfs2_glmutex_unlock(gl);
1485
1486         return 0;
1487 }
1488
1489 /**
1490  * gfs2_lvb_unhold - detach a LVB from a glock
1491  * @gl: The glock in question
1492  *
1493  */
1494
1495 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1496 {
1497         struct gfs2_sbd *sdp = gl->gl_sbd;
1498
1499         gfs2_glock_hold(gl);
1500         gfs2_glmutex_lock(gl);
1501
1502         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1503         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1504                 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1505                         sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
1506                 gl->gl_lvb = NULL;
1507                 gfs2_glock_put(gl);
1508         }
1509
1510         gfs2_glmutex_unlock(gl);
1511         gfs2_glock_put(gl);
1512 }
1513
1514 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1515                         unsigned int state)
1516 {
1517         struct gfs2_glock *gl;
1518         unsigned long delay = 0;
1519         unsigned long holdtime;
1520         unsigned long now = jiffies;
1521
1522         gl = gfs2_glock_find(sdp, name);
1523         if (!gl)
1524                 return;
1525
1526         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1527         if (time_before(now, holdtime))
1528                 delay = holdtime - now;
1529
1530         handle_callback(gl, state, 1, delay);
1531         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1532                 gfs2_glock_put(gl);
1533 }
1534
1535 /**
1536  * gfs2_glock_cb - Callback used by locking module
1537  * @sdp: Pointer to the superblock
1538  * @type: Type of callback
1539  * @data: Type dependent data pointer
1540  *
1541  * Called by the locking module when it wants to tell us something.
1542  * Either we need to drop a lock, one of our ASYNC requests completed, or
1543  * a journal from another client needs to be recovered.
1544  */
1545
1546 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1547 {
1548         struct gfs2_sbd *sdp = cb_data;
1549
1550         switch (type) {
1551         case LM_CB_NEED_E:
1552                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1553                 return;
1554
1555         case LM_CB_NEED_D:
1556                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1557                 return;
1558
1559         case LM_CB_NEED_S:
1560                 blocking_cb(sdp, data, LM_ST_SHARED);
1561                 return;
1562
1563         case LM_CB_ASYNC: {
1564                 struct lm_async_cb *async = data;
1565                 struct gfs2_glock *gl;
1566
1567                 down_read(&gfs2_umount_flush_sem);
1568                 gl = gfs2_glock_find(sdp, &async->lc_name);
1569                 if (gfs2_assert_warn(sdp, gl))
1570                         return;
1571                 xmote_bh(gl, async->lc_ret);
1572                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1573                         gfs2_glock_put(gl);
1574                 up_read(&gfs2_umount_flush_sem);
1575                 return;
1576         }
1577
1578         case LM_CB_NEED_RECOVERY:
1579                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1580                 if (sdp->sd_recoverd_process)
1581                         wake_up_process(sdp->sd_recoverd_process);
1582                 return;
1583
1584         case LM_CB_DROPLOCKS:
1585                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1586                 gfs2_quota_scan(sdp);
1587                 return;
1588
1589         default:
1590                 gfs2_assert_warn(sdp, 0);
1591                 return;
1592         }
1593 }
1594
1595 /**
1596  * demote_ok - Check to see if it's ok to unlock a glock
1597  * @gl: the glock
1598  *
1599  * Returns: 1 if it's ok
1600  */
1601
1602 static int demote_ok(struct gfs2_glock *gl)
1603 {
1604         const struct gfs2_glock_operations *glops = gl->gl_ops;
1605         int demote = 1;
1606
1607         if (test_bit(GLF_STICKY, &gl->gl_flags))
1608                 demote = 0;
1609         else if (glops->go_demote_ok)
1610                 demote = glops->go_demote_ok(gl);
1611
1612         return demote;
1613 }
1614
1615 /**
1616  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1617  * @gl: the glock
1618  *
1619  */
1620
1621 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1622 {
1623         struct gfs2_sbd *sdp = gl->gl_sbd;
1624
1625         spin_lock(&sdp->sd_reclaim_lock);
1626         if (list_empty(&gl->gl_reclaim)) {
1627                 gfs2_glock_hold(gl);
1628                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1629                 atomic_inc(&sdp->sd_reclaim_count);
1630                 spin_unlock(&sdp->sd_reclaim_lock);
1631                 wake_up(&sdp->sd_reclaim_wq);
1632         } else
1633                 spin_unlock(&sdp->sd_reclaim_lock);
1634 }
1635
1636 /**
1637  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1638  * @sdp: the filesystem
1639  *
1640  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1641  * different glock and we notice that there are a lot of glocks in the
1642  * reclaim list.
1643  *
1644  */
1645
1646 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1647 {
1648         struct gfs2_glock *gl;
1649
1650         spin_lock(&sdp->sd_reclaim_lock);
1651         if (list_empty(&sdp->sd_reclaim_list)) {
1652                 spin_unlock(&sdp->sd_reclaim_lock);
1653                 return;
1654         }
1655         gl = list_entry(sdp->sd_reclaim_list.next,
1656                         struct gfs2_glock, gl_reclaim);
1657         list_del_init(&gl->gl_reclaim);
1658         spin_unlock(&sdp->sd_reclaim_lock);
1659
1660         atomic_dec(&sdp->sd_reclaim_count);
1661         atomic_inc(&sdp->sd_reclaimed);
1662
1663         if (gfs2_glmutex_trylock(gl)) {
1664                 if (list_empty(&gl->gl_holders) &&
1665                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1666                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1667                 gfs2_glmutex_unlock(gl);
1668         }
1669
1670         gfs2_glock_put(gl);
1671 }
1672
1673 /**
1674  * examine_bucket - Call a function for glock in a hash bucket
1675  * @examiner: the function
1676  * @sdp: the filesystem
1677  * @bucket: the bucket
1678  *
1679  * Returns: 1 if the bucket has entries
1680  */
1681
1682 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1683                           unsigned int hash)
1684 {
1685         struct gfs2_glock *gl, *prev = NULL;
1686         int has_entries = 0;
1687         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1688
1689         read_lock(gl_lock_addr(hash));
1690         /* Can't use hlist_for_each_entry - don't want prefetch here */
1691         if (hlist_empty(head))
1692                 goto out;
1693         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1694         while(1) {
1695                 if (!sdp || gl->gl_sbd == sdp) {
1696                         gfs2_glock_hold(gl);
1697                         read_unlock(gl_lock_addr(hash));
1698                         if (prev)
1699                                 gfs2_glock_put(prev);
1700                         prev = gl;
1701                         examiner(gl);
1702                         has_entries = 1;
1703                         read_lock(gl_lock_addr(hash));
1704                 }
1705                 if (gl->gl_list.next == NULL)
1706                         break;
1707                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1708         }
1709 out:
1710         read_unlock(gl_lock_addr(hash));
1711         if (prev)
1712                 gfs2_glock_put(prev);
1713         cond_resched();
1714         return has_entries;
1715 }
1716
1717 /**
1718  * scan_glock - look at a glock and see if we can reclaim it
1719  * @gl: the glock to look at
1720  *
1721  */
1722
1723 static void scan_glock(struct gfs2_glock *gl)
1724 {
1725         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1726                 return;
1727
1728         if (gfs2_glmutex_trylock(gl)) {
1729                 if (list_empty(&gl->gl_holders) &&
1730                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1731                         goto out_schedule;
1732                 gfs2_glmutex_unlock(gl);
1733         }
1734         return;
1735
1736 out_schedule:
1737         gfs2_glmutex_unlock(gl);
1738         gfs2_glock_schedule_for_reclaim(gl);
1739 }
1740
1741 /**
1742  * clear_glock - look at a glock and see if we can free it from glock cache
1743  * @gl: the glock to look at
1744  *
1745  */
1746
1747 static void clear_glock(struct gfs2_glock *gl)
1748 {
1749         struct gfs2_sbd *sdp = gl->gl_sbd;
1750         int released;
1751
1752         spin_lock(&sdp->sd_reclaim_lock);
1753         if (!list_empty(&gl->gl_reclaim)) {
1754                 list_del_init(&gl->gl_reclaim);
1755                 atomic_dec(&sdp->sd_reclaim_count);
1756                 spin_unlock(&sdp->sd_reclaim_lock);
1757                 released = gfs2_glock_put(gl);
1758                 gfs2_assert(sdp, !released);
1759         } else {
1760                 spin_unlock(&sdp->sd_reclaim_lock);
1761         }
1762
1763         if (gfs2_glmutex_trylock(gl)) {
1764                 if (list_empty(&gl->gl_holders) &&
1765                     gl->gl_state != LM_ST_UNLOCKED)
1766                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1767                 gfs2_glmutex_unlock(gl);
1768         }
1769 }
1770
1771 /**
1772  * gfs2_gl_hash_clear - Empty out the glock hash table
1773  * @sdp: the filesystem
1774  * @wait: wait until it's all gone
1775  *
1776  * Called when unmounting the filesystem, or when inter-node lock manager
1777  * requests DROPLOCKS because it is running out of capacity.
1778  */
1779
1780 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1781 {
1782         unsigned long t;
1783         unsigned int x;
1784         int cont;
1785
1786         t = jiffies;
1787
1788         for (;;) {
1789                 cont = 0;
1790                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1791                         if (examine_bucket(clear_glock, sdp, x))
1792                                 cont = 1;
1793                 }
1794
1795                 if (!wait || !cont)
1796                         break;
1797
1798                 if (time_after_eq(jiffies,
1799                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1800                         fs_warn(sdp, "Unmount seems to be stalled. "
1801                                      "Dumping lock state...\n");
1802                         gfs2_dump_lockstate(sdp);
1803                         t = jiffies;
1804                 }
1805
1806                 down_write(&gfs2_umount_flush_sem);
1807                 invalidate_inodes(sdp->sd_vfs);
1808                 up_write(&gfs2_umount_flush_sem);
1809                 msleep(10);
1810         }
1811 }
1812
1813 /*
1814  *  Diagnostic routines to help debug distributed deadlock
1815  */
1816
1817 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1818                               unsigned long address)
1819 {
1820         char buffer[KSYM_SYMBOL_LEN];
1821
1822         sprint_symbol(buffer, address);
1823         print_dbg(gi, fmt, buffer);
1824 }
1825
1826 /**
1827  * dump_holder - print information about a glock holder
1828  * @str: a string naming the type of holder
1829  * @gh: the glock holder
1830  *
1831  * Returns: 0 on success, -ENOBUFS when we run out of space
1832  */
1833
1834 static int dump_holder(struct glock_iter *gi, char *str,
1835                        struct gfs2_holder *gh)
1836 {
1837         unsigned int x;
1838         struct task_struct *gh_owner;
1839
1840         print_dbg(gi, "  %s\n", str);
1841         if (gh->gh_owner_pid) {
1842                 print_dbg(gi, "    owner = %ld ",
1843                                 (long)pid_nr(gh->gh_owner_pid));
1844                 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1845                 if (gh_owner)
1846                         print_dbg(gi, "(%s)\n", gh_owner->comm);
1847                 else
1848                         print_dbg(gi, "(ended)\n");
1849         } else
1850                 print_dbg(gi, "    owner = -1\n");
1851         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1852         print_dbg(gi, "    gh_flags =");
1853         for (x = 0; x < 32; x++)
1854                 if (gh->gh_flags & (1 << x))
1855                         print_dbg(gi, " %u", x);
1856         print_dbg(gi, " \n");
1857         print_dbg(gi, "    error = %d\n", gh->gh_error);
1858         print_dbg(gi, "    gh_iflags =");
1859         for (x = 0; x < 32; x++)
1860                 if (test_bit(x, &gh->gh_iflags))
1861                         print_dbg(gi, " %u", x);
1862         print_dbg(gi, " \n");
1863         gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1864
1865         return 0;
1866 }
1867
1868 /**
1869  * dump_inode - print information about an inode
1870  * @ip: the inode
1871  *
1872  * Returns: 0 on success, -ENOBUFS when we run out of space
1873  */
1874
1875 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1876 {
1877         unsigned int x;
1878
1879         print_dbg(gi, "  Inode:\n");
1880         print_dbg(gi, "    num = %llu/%llu\n",
1881                   (unsigned long long)ip->i_no_formal_ino,
1882                   (unsigned long long)ip->i_no_addr);
1883         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1884         print_dbg(gi, "    i_flags =");
1885         for (x = 0; x < 32; x++)
1886                 if (test_bit(x, &ip->i_flags))
1887                         print_dbg(gi, " %u", x);
1888         print_dbg(gi, " \n");
1889         return 0;
1890 }
1891
1892 /**
1893  * dump_glock - print information about a glock
1894  * @gl: the glock
1895  * @count: where we are in the buffer
1896  *
1897  * Returns: 0 on success, -ENOBUFS when we run out of space
1898  */
1899
1900 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1901 {
1902         struct gfs2_holder *gh;
1903         unsigned int x;
1904         int error = -ENOBUFS;
1905         struct task_struct *gl_owner;
1906
1907         spin_lock(&gl->gl_spin);
1908
1909         print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1910                    (unsigned long long)gl->gl_name.ln_number);
1911         print_dbg(gi, "  gl_flags =");
1912         for (x = 0; x < 32; x++) {
1913                 if (test_bit(x, &gl->gl_flags))
1914                         print_dbg(gi, " %u", x);
1915         }
1916         if (!test_bit(GLF_LOCK, &gl->gl_flags))
1917                 print_dbg(gi, " (unlocked)");
1918         print_dbg(gi, " \n");
1919         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1920         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1921         if (gl->gl_owner_pid) {
1922                 gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID);
1923                 if (gl_owner)
1924                         print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1925                                   pid_nr(gl->gl_owner_pid), gl_owner->comm);
1926                 else
1927                         print_dbg(gi, "  gl_owner = %d (ended)\n",
1928                                   pid_nr(gl->gl_owner_pid));
1929         } else
1930                 print_dbg(gi, "  gl_owner = -1\n");
1931         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1932         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1933         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1934         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1935         print_dbg(gi, "  reclaim = %s\n",
1936                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1937         if (gl->gl_aspace)
1938                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1939                            gl->gl_aspace->i_mapping->nrpages);
1940         else
1941                 print_dbg(gi, "  aspace = no\n");
1942         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1943         if (gl->gl_req_gh) {
1944                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1945                 if (error)
1946                         goto out;
1947         }
1948         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1949                 error = dump_holder(gi, "Holder", gh);
1950                 if (error)
1951                         goto out;
1952         }
1953         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1954                 error = dump_holder(gi, "Waiter1", gh);
1955                 if (error)
1956                         goto out;
1957         }
1958         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1959                 error = dump_holder(gi, "Waiter3", gh);
1960                 if (error)
1961                         goto out;
1962         }
1963         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1964                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1965                           gl->gl_demote_state, (unsigned long long)
1966                           (jiffies - gl->gl_demote_time)*(1000000/HZ));
1967         }
1968         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1969                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1970                         list_empty(&gl->gl_holders)) {
1971                         error = dump_inode(gi, gl->gl_object);
1972                         if (error)
1973                                 goto out;
1974                 } else {
1975                         error = -ENOBUFS;
1976                         print_dbg(gi, "  Inode: busy\n");
1977                 }
1978         }
1979
1980         error = 0;
1981
1982 out:
1983         spin_unlock(&gl->gl_spin);
1984         return error;
1985 }
1986
1987 /**
1988  * gfs2_dump_lockstate - print out the current lockstate
1989  * @sdp: the filesystem
1990  * @ub: the buffer to copy the information into
1991  *
1992  * If @ub is NULL, dump the lockstate to the console.
1993  *
1994  */
1995
1996 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1997 {
1998         struct gfs2_glock *gl;
1999         struct hlist_node *h;
2000         unsigned int x;
2001         int error = 0;
2002
2003         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2004
2005                 read_lock(gl_lock_addr(x));
2006
2007                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
2008                         if (gl->gl_sbd != sdp)
2009                                 continue;
2010
2011                         error = dump_glock(NULL, gl);
2012                         if (error)
2013                                 break;
2014                 }
2015
2016                 read_unlock(gl_lock_addr(x));
2017
2018                 if (error)
2019                         break;
2020         }
2021
2022
2023         return error;
2024 }
2025
2026 /**
2027  * gfs2_scand - Look for cached glocks and inodes to toss from memory
2028  * @sdp: Pointer to GFS2 superblock
2029  *
2030  * One of these daemons runs, finding candidates to add to sd_reclaim_list.
2031  * See gfs2_glockd()
2032  */
2033
2034 static int gfs2_scand(void *data)
2035 {
2036         unsigned x;
2037         unsigned delay;
2038
2039         while (!kthread_should_stop()) {
2040                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2041                         examine_bucket(scan_glock, NULL, x);
2042                 if (freezing(current))
2043                         refrigerator();
2044                 delay = scand_secs;
2045                 if (delay < 1)
2046                         delay = 1;
2047                 schedule_timeout_interruptible(delay * HZ);
2048         }
2049
2050         return 0;
2051 }
2052
2053
2054
2055 int __init gfs2_glock_init(void)
2056 {
2057         unsigned i;
2058         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2059                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2060         }
2061 #ifdef GL_HASH_LOCK_SZ
2062         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2063                 rwlock_init(&gl_hash_locks[i]);
2064         }
2065 #endif
2066
2067         scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2068         if (IS_ERR(scand_process))
2069                 return PTR_ERR(scand_process);
2070
2071         glock_workqueue = create_workqueue("glock_workqueue");
2072         if (IS_ERR(glock_workqueue)) {
2073                 kthread_stop(scand_process);
2074                 return PTR_ERR(glock_workqueue);
2075         }
2076
2077         return 0;
2078 }
2079
2080 void gfs2_glock_exit(void)
2081 {
2082         destroy_workqueue(glock_workqueue);
2083         kthread_stop(scand_process);
2084 }
2085
2086 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2087 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2088
2089 static int gfs2_glock_iter_next(struct glock_iter *gi)
2090 {
2091         struct gfs2_glock *gl;
2092
2093 restart:
2094         read_lock(gl_lock_addr(gi->hash));
2095         gl = gi->gl;
2096         if (gl) {
2097                 gi->gl = hlist_entry(gl->gl_list.next,
2098                                      struct gfs2_glock, gl_list);
2099                 if (gi->gl)
2100                         gfs2_glock_hold(gi->gl);
2101         }
2102         read_unlock(gl_lock_addr(gi->hash));
2103         if (gl)
2104                 gfs2_glock_put(gl);
2105         if (gl && gi->gl == NULL)
2106                 gi->hash++;
2107         while(gi->gl == NULL) {
2108                 if (gi->hash >= GFS2_GL_HASH_SIZE)
2109                         return 1;
2110                 read_lock(gl_lock_addr(gi->hash));
2111                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2112                                      struct gfs2_glock, gl_list);
2113                 if (gi->gl)
2114                         gfs2_glock_hold(gi->gl);
2115                 read_unlock(gl_lock_addr(gi->hash));
2116                 gi->hash++;
2117         }
2118
2119         if (gi->sdp != gi->gl->gl_sbd)
2120                 goto restart;
2121
2122         return 0;
2123 }
2124
2125 static void gfs2_glock_iter_free(struct glock_iter *gi)
2126 {
2127         if (gi->gl)
2128                 gfs2_glock_put(gi->gl);
2129         kfree(gi);
2130 }
2131
2132 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2133 {
2134         struct glock_iter *gi;
2135
2136         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2137         if (!gi)
2138                 return NULL;
2139
2140         gi->sdp = sdp;
2141         gi->hash = 0;
2142         gi->seq = NULL;
2143         gi->gl = NULL;
2144         memset(gi->string, 0, sizeof(gi->string));
2145
2146         if (gfs2_glock_iter_next(gi)) {
2147                 gfs2_glock_iter_free(gi);
2148                 return NULL;
2149         }
2150
2151         return gi;
2152 }
2153
2154 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2155 {
2156         struct glock_iter *gi;
2157         loff_t n = *pos;
2158
2159         gi = gfs2_glock_iter_init(file->private);
2160         if (!gi)
2161                 return NULL;
2162
2163         while(n--) {
2164                 if (gfs2_glock_iter_next(gi)) {
2165                         gfs2_glock_iter_free(gi);
2166                         return NULL;
2167                 }
2168         }
2169
2170         return gi;
2171 }
2172
2173 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2174                                  loff_t *pos)
2175 {
2176         struct glock_iter *gi = iter_ptr;
2177
2178         (*pos)++;
2179
2180         if (gfs2_glock_iter_next(gi)) {
2181                 gfs2_glock_iter_free(gi);
2182                 return NULL;
2183         }
2184
2185         return gi;
2186 }
2187
2188 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2189 {
2190         struct glock_iter *gi = iter_ptr;
2191         if (gi)
2192                 gfs2_glock_iter_free(gi);
2193 }
2194
2195 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2196 {
2197         struct glock_iter *gi = iter_ptr;
2198
2199         gi->seq = file;
2200         dump_glock(gi, gi->gl);
2201
2202         return 0;
2203 }
2204
2205 static const struct seq_operations gfs2_glock_seq_ops = {
2206         .start = gfs2_glock_seq_start,
2207         .next  = gfs2_glock_seq_next,
2208         .stop  = gfs2_glock_seq_stop,
2209         .show  = gfs2_glock_seq_show,
2210 };
2211
2212 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2213 {
2214         struct seq_file *seq;
2215         int ret;
2216
2217         ret = seq_open(file, &gfs2_glock_seq_ops);
2218         if (ret)
2219                 return ret;
2220
2221         seq = file->private_data;
2222         seq->private = inode->i_private;
2223
2224         return 0;
2225 }
2226
2227 static const struct file_operations gfs2_debug_fops = {
2228         .owner   = THIS_MODULE,
2229         .open    = gfs2_debugfs_open,
2230         .read    = seq_read,
2231         .llseek  = seq_lseek,
2232         .release = seq_release
2233 };
2234
2235 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2236 {
2237         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2238         if (!sdp->debugfs_dir)
2239                 return -ENOMEM;
2240         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2241                                                          S_IFREG | S_IRUGO,
2242                                                          sdp->debugfs_dir, sdp,
2243                                                          &gfs2_debug_fops);
2244         if (!sdp->debugfs_dentry_glocks)
2245                 return -ENOMEM;
2246
2247         return 0;
2248 }
2249
2250 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2251 {
2252         if (sdp && sdp->debugfs_dir) {
2253                 if (sdp->debugfs_dentry_glocks) {
2254                         debugfs_remove(sdp->debugfs_dentry_glocks);
2255                         sdp->debugfs_dentry_glocks = NULL;
2256                 }
2257                 debugfs_remove(sdp->debugfs_dir);
2258                 sdp->debugfs_dir = NULL;
2259         }
2260 }
2261
2262 int gfs2_register_debugfs(void)
2263 {
2264         gfs2_root = debugfs_create_dir("gfs2", NULL);
2265         return gfs2_root ? 0 : -ENOMEM;
2266 }
2267
2268 void gfs2_unregister_debugfs(void)
2269 {
2270         debugfs_remove(gfs2_root);
2271         gfs2_root = NULL;
2272 }