2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <asm/uaccess.h>
37 struct gfs2_gl_hash_bucket {
38 struct hlist_head hb_list;
41 typedef void (*glock_examiner) (struct gfs2_glock * gl);
43 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
44 static int dump_glock(struct gfs2_glock *gl);
45 static int dump_inode(struct gfs2_inode *ip);
47 #define GFS2_GL_HASH_SHIFT 15
48 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
49 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
51 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
54 * Despite what you might think, the numbers below are not arbitrary :-)
55 * They are taken from the ipv4 routing hash code, which is well tested
56 * and thus should be nearly optimal. Later on we might tweek the numbers
57 * but for now this should be fine.
59 * The reason for putting the locks in a separate array from the list heads
60 * is that we can have fewer locks than list heads and save memory. We use
61 * the same hash function for both, but with a different hash mask.
63 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
64 defined(CONFIG_PROVE_LOCKING)
67 # define GL_HASH_LOCK_SZ 256
70 # define GL_HASH_LOCK_SZ 4096
72 # define GL_HASH_LOCK_SZ 2048
74 # define GL_HASH_LOCK_SZ 1024
76 # define GL_HASH_LOCK_SZ 512
78 # define GL_HASH_LOCK_SZ 256
82 /* We never want more locks than chains */
83 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
84 # undef GL_HASH_LOCK_SZ
85 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
88 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
90 static inline rwlock_t *gl_lock_addr(unsigned int x)
92 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
94 #else /* not SMP, so no spinlocks required */
95 static inline rwlock_t *gl_lock_addr(unsigned int x)
102 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
103 * @actual: the current state of the lock
104 * @requested: the lock state that was requested by the caller
105 * @flags: the modifier flags passed in by the caller
107 * Returns: 1 if the locks are compatible, 0 otherwise
110 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
113 if (actual == requested)
116 if (flags & GL_EXACT)
119 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
122 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
129 * gl_hash() - Turn glock number into hash bucket number
130 * @lock: The glock number
132 * Returns: The number of the corresponding hash bucket
135 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
136 const struct lm_lockname *name)
140 h = jhash(&name->ln_number, sizeof(u64), 0);
141 h = jhash(&name->ln_type, sizeof(unsigned int), h);
142 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
143 h &= GFS2_GL_HASH_MASK;
149 * glock_free() - Perform a few checks and then release struct gfs2_glock
150 * @gl: The glock to release
152 * Also calls lock module to release its internal structure for this glock.
156 static void glock_free(struct gfs2_glock *gl)
158 struct gfs2_sbd *sdp = gl->gl_sbd;
159 struct inode *aspace = gl->gl_aspace;
161 gfs2_lm_put_lock(sdp, gl->gl_lock);
164 gfs2_aspace_put(aspace);
166 kmem_cache_free(gfs2_glock_cachep, gl);
170 * gfs2_glock_hold() - increment reference count on glock
171 * @gl: The glock to hold
175 void gfs2_glock_hold(struct gfs2_glock *gl)
177 atomic_inc(&gl->gl_ref);
181 * gfs2_glock_put() - Decrement reference count on glock
182 * @gl: The glock to put
186 int gfs2_glock_put(struct gfs2_glock *gl)
189 struct gfs2_sbd *sdp = gl->gl_sbd;
191 write_lock(gl_lock_addr(gl->gl_hash));
192 if (atomic_dec_and_test(&gl->gl_ref)) {
193 hlist_del(&gl->gl_list);
194 write_unlock(gl_lock_addr(gl->gl_hash));
195 BUG_ON(spin_is_locked(&gl->gl_spin));
196 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
197 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
198 gfs2_assert(sdp, list_empty(&gl->gl_holders));
199 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
200 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
201 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
206 write_unlock(gl_lock_addr(gl->gl_hash));
212 * queue_empty - check to see if a glock's queue is empty
214 * @head: the head of the queue to check
216 * This function protects the list in the event that a process already
217 * has a holder on the list and is adding a second holder for itself.
218 * The glmutex lock is what generally prevents processes from working
219 * on the same glock at once, but the special case of adding a second
220 * holder for yourself ("recursive" locking) doesn't involve locking
221 * glmutex, making the spin lock necessary.
223 * Returns: 1 if the queue is empty
226 static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
229 spin_lock(&gl->gl_spin);
230 empty = list_empty(head);
231 spin_unlock(&gl->gl_spin);
236 * search_bucket() - Find struct gfs2_glock by lock number
237 * @bucket: the bucket to search
238 * @name: The lock name
240 * Returns: NULL, or the struct gfs2_glock with the requested number
243 static struct gfs2_glock *search_bucket(unsigned int hash,
244 const struct gfs2_sbd *sdp,
245 const struct lm_lockname *name)
247 struct gfs2_glock *gl;
248 struct hlist_node *h;
250 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
251 if (!lm_name_equal(&gl->gl_name, name))
253 if (gl->gl_sbd != sdp)
256 atomic_inc(&gl->gl_ref);
265 * gfs2_glock_find() - Find glock by lock number
266 * @sdp: The GFS2 superblock
267 * @name: The lock name
269 * Returns: NULL, or the struct gfs2_glock with the requested number
272 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
273 const struct lm_lockname *name)
275 unsigned int hash = gl_hash(sdp, name);
276 struct gfs2_glock *gl;
278 read_lock(gl_lock_addr(hash));
279 gl = search_bucket(hash, sdp, name);
280 read_unlock(gl_lock_addr(hash));
286 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
287 * @sdp: The GFS2 superblock
288 * @number: the lock number
289 * @glops: The glock_operations to use
290 * @create: If 0, don't create the glock if it doesn't exist
291 * @glp: the glock is returned here
293 * This does not lock a glock, just finds/creates structures for one.
298 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
299 const struct gfs2_glock_operations *glops, int create,
300 struct gfs2_glock **glp)
302 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
303 struct gfs2_glock *gl, *tmp;
304 unsigned int hash = gl_hash(sdp, &name);
307 read_lock(gl_lock_addr(hash));
308 gl = search_bucket(hash, sdp, &name);
309 read_unlock(gl_lock_addr(hash));
316 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
322 atomic_set(&gl->gl_ref, 1);
323 gl->gl_state = LM_ST_UNLOCKED;
328 gl->gl_req_gh = NULL;
329 gl->gl_req_bh = NULL;
331 gl->gl_stamp = jiffies;
332 gl->gl_object = NULL;
334 gl->gl_aspace = NULL;
335 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
337 /* If this glock protects actual on-disk data or metadata blocks,
338 create a VFS inode to manage the pages/buffers holding them. */
339 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
340 gl->gl_aspace = gfs2_aspace_get(sdp);
341 if (!gl->gl_aspace) {
347 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
351 write_lock(gl_lock_addr(hash));
352 tmp = search_bucket(hash, sdp, &name);
354 write_unlock(gl_lock_addr(hash));
358 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
359 write_unlock(gl_lock_addr(hash));
368 gfs2_aspace_put(gl->gl_aspace);
370 kmem_cache_free(gfs2_glock_cachep, gl);
375 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
377 * @state: the state we're requesting
378 * @flags: the modifier flags
379 * @gh: the holder structure
383 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
384 struct gfs2_holder *gh)
386 INIT_LIST_HEAD(&gh->gh_list);
388 gh->gh_ip = (unsigned long)__builtin_return_address(0);
389 gh->gh_owner = current;
390 gh->gh_state = state;
391 gh->gh_flags = flags;
395 if (gh->gh_state == LM_ST_EXCLUSIVE)
396 gh->gh_flags |= GL_LOCAL_EXCL;
402 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
403 * @state: the state we're requesting
404 * @flags: the modifier flags
405 * @gh: the holder structure
407 * Don't mess with the glock.
411 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
413 gh->gh_state = state;
414 gh->gh_flags = flags;
415 if (gh->gh_state == LM_ST_EXCLUSIVE)
416 gh->gh_flags |= GL_LOCAL_EXCL;
418 gh->gh_iflags &= 1 << HIF_ALLOCED;
419 gh->gh_ip = (unsigned long)__builtin_return_address(0);
423 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
424 * @gh: the holder structure
428 void gfs2_holder_uninit(struct gfs2_holder *gh)
430 gfs2_glock_put(gh->gh_gl);
436 * gfs2_holder_get - get a struct gfs2_holder structure
438 * @state: the state we're requesting
439 * @flags: the modifier flags
442 * Figure out how big an impact this function has. Either:
443 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
444 * 2) Leave it like it is
446 * Returns: the holder structure, NULL on ENOMEM
449 static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
451 int flags, gfp_t gfp_flags)
453 struct gfs2_holder *gh;
455 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
459 gfs2_holder_init(gl, state, flags, gh);
460 set_bit(HIF_ALLOCED, &gh->gh_iflags);
461 gh->gh_ip = (unsigned long)__builtin_return_address(0);
466 * gfs2_holder_put - get rid of a struct gfs2_holder structure
467 * @gh: the holder structure
471 static void gfs2_holder_put(struct gfs2_holder *gh)
473 gfs2_holder_uninit(gh);
477 static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh)
479 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) {
483 clear_bit(HIF_WAIT, &gh->gh_iflags);
485 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
488 static int holder_wait(void *word)
494 static void wait_on_holder(struct gfs2_holder *gh)
497 wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
501 * rq_mutex - process a mutex request in the queue
502 * @gh: the glock holder
504 * Returns: 1 if the queue is blocked
507 static int rq_mutex(struct gfs2_holder *gh)
509 struct gfs2_glock *gl = gh->gh_gl;
511 list_del_init(&gh->gh_list);
512 /* gh->gh_error never examined. */
513 set_bit(GLF_LOCK, &gl->gl_flags);
514 clear_bit(HIF_WAIT, &gh->gh_flags);
516 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
522 * rq_promote - process a promote request in the queue
523 * @gh: the glock holder
525 * Acquire a new inter-node lock, or change a lock state to more restrictive.
527 * Returns: 1 if the queue is blocked
530 static int rq_promote(struct gfs2_holder *gh)
532 struct gfs2_glock *gl = gh->gh_gl;
533 struct gfs2_sbd *sdp = gl->gl_sbd;
534 const struct gfs2_glock_operations *glops = gl->gl_ops;
536 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
537 if (list_empty(&gl->gl_holders)) {
539 set_bit(GLF_LOCK, &gl->gl_flags);
540 spin_unlock(&gl->gl_spin);
542 if (atomic_read(&sdp->sd_reclaim_count) >
543 gfs2_tune_get(sdp, gt_reclaim_limit) &&
544 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
545 gfs2_reclaim_glock(sdp);
546 gfs2_reclaim_glock(sdp);
549 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
550 spin_lock(&gl->gl_spin);
555 if (list_empty(&gl->gl_holders)) {
556 set_bit(HIF_FIRST, &gh->gh_iflags);
557 set_bit(GLF_LOCK, &gl->gl_flags);
559 struct gfs2_holder *next_gh;
560 if (gh->gh_flags & GL_LOCAL_EXCL)
562 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
564 if (next_gh->gh_flags & GL_LOCAL_EXCL)
568 list_move_tail(&gh->gh_list, &gl->gl_holders);
570 set_bit(HIF_HOLDER, &gh->gh_iflags);
572 gfs2_holder_dispose_or_wake(gh);
578 * rq_demote - process a demote request in the queue
579 * @gh: the glock holder
581 * Returns: 1 if the queue is blocked
584 static int rq_demote(struct gfs2_holder *gh)
586 struct gfs2_glock *gl = gh->gh_gl;
587 const struct gfs2_glock_operations *glops = gl->gl_ops;
589 if (!list_empty(&gl->gl_holders))
592 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
593 list_del_init(&gh->gh_list);
595 spin_unlock(&gl->gl_spin);
596 gfs2_holder_dispose_or_wake(gh);
597 spin_lock(&gl->gl_spin);
600 set_bit(GLF_LOCK, &gl->gl_flags);
601 spin_unlock(&gl->gl_spin);
603 if (gh->gh_state == LM_ST_UNLOCKED ||
604 gl->gl_state != LM_ST_EXCLUSIVE)
605 glops->go_drop_th(gl);
607 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
609 spin_lock(&gl->gl_spin);
616 * run_queue - process holder structures on a glock
620 static void run_queue(struct gfs2_glock *gl)
622 struct gfs2_holder *gh;
626 if (test_bit(GLF_LOCK, &gl->gl_flags))
629 if (!list_empty(&gl->gl_waiters1)) {
630 gh = list_entry(gl->gl_waiters1.next,
631 struct gfs2_holder, gh_list);
633 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
634 blocked = rq_mutex(gh);
636 gfs2_assert_warn(gl->gl_sbd, 0);
638 } else if (!list_empty(&gl->gl_waiters2) &&
639 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
640 gh = list_entry(gl->gl_waiters2.next,
641 struct gfs2_holder, gh_list);
643 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
644 blocked = rq_demote(gh);
646 gfs2_assert_warn(gl->gl_sbd, 0);
648 } else if (!list_empty(&gl->gl_waiters3)) {
649 gh = list_entry(gl->gl_waiters3.next,
650 struct gfs2_holder, gh_list);
652 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
653 blocked = rq_promote(gh);
655 gfs2_assert_warn(gl->gl_sbd, 0);
666 * gfs2_glmutex_lock - acquire a local lock on a glock
669 * Gives caller exclusive access to manipulate a glock structure.
672 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
674 struct gfs2_holder gh;
676 gfs2_holder_init(gl, 0, 0, &gh);
677 set_bit(HIF_MUTEX, &gh.gh_iflags);
678 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
681 spin_lock(&gl->gl_spin);
682 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
683 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
685 gl->gl_owner = current;
686 gl->gl_ip = (unsigned long)__builtin_return_address(0);
687 clear_bit(HIF_WAIT, &gh.gh_iflags);
689 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
691 spin_unlock(&gl->gl_spin);
694 gfs2_holder_uninit(&gh);
698 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
701 * Returns: 1 if the glock is acquired
704 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
708 spin_lock(&gl->gl_spin);
709 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
712 gl->gl_owner = current;
713 gl->gl_ip = (unsigned long)__builtin_return_address(0);
715 spin_unlock(&gl->gl_spin);
721 * gfs2_glmutex_unlock - release a local lock on a glock
726 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
728 spin_lock(&gl->gl_spin);
729 clear_bit(GLF_LOCK, &gl->gl_flags);
733 BUG_ON(!spin_is_locked(&gl->gl_spin));
734 spin_unlock(&gl->gl_spin);
738 * handle_callback - add a demote request to a lock's queue
740 * @state: the state the caller wants us to change to
742 * Note: This may fail sliently if we are out of memory.
745 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
747 struct gfs2_holder *gh, *new_gh = NULL;
750 spin_lock(&gl->gl_spin);
752 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
753 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
754 gl->gl_req_gh != gh) {
755 if (gh->gh_state != state)
756 gh->gh_state = LM_ST_UNLOCKED;
762 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
765 spin_unlock(&gl->gl_spin);
767 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
770 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
771 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
772 set_bit(HIF_WAIT, &new_gh->gh_iflags);
778 spin_unlock(&gl->gl_spin);
781 gfs2_holder_put(new_gh);
785 * state_change - record that the glock is now in a different state
787 * @new_state the new state
791 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
795 held1 = (gl->gl_state != LM_ST_UNLOCKED);
796 held2 = (new_state != LM_ST_UNLOCKED);
798 if (held1 != held2) {
805 gl->gl_state = new_state;
809 * xmote_bh - Called after the lock module is done acquiring a lock
810 * @gl: The glock in question
811 * @ret: the int returned from the lock module
815 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
817 struct gfs2_sbd *sdp = gl->gl_sbd;
818 const struct gfs2_glock_operations *glops = gl->gl_ops;
819 struct gfs2_holder *gh = gl->gl_req_gh;
820 int prev_state = gl->gl_state;
823 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
824 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
825 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
827 state_change(gl, ret & LM_OUT_ST_MASK);
829 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
831 glops->go_inval(gl, DIO_METADATA);
832 } else if (gl->gl_state == LM_ST_DEFERRED) {
833 /* We might not want to do this here.
834 Look at moving to the inode glops. */
836 glops->go_inval(gl, 0);
839 /* Deal with each possible exit condition */
842 gl->gl_stamp = jiffies;
843 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
844 spin_lock(&gl->gl_spin);
845 list_del_init(&gh->gh_list);
847 spin_unlock(&gl->gl_spin);
848 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
849 spin_lock(&gl->gl_spin);
850 list_del_init(&gh->gh_list);
851 if (gl->gl_state == gh->gh_state ||
852 gl->gl_state == LM_ST_UNLOCKED) {
855 if (gfs2_assert_warn(sdp, gh->gh_flags &
856 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
857 fs_warn(sdp, "ret = 0x%.8X\n", ret);
858 gh->gh_error = GLR_TRYFAILED;
860 spin_unlock(&gl->gl_spin);
862 if (ret & LM_OUT_CANCELED)
863 handle_callback(gl, LM_ST_UNLOCKED);
865 } else if (ret & LM_OUT_CANCELED) {
866 spin_lock(&gl->gl_spin);
867 list_del_init(&gh->gh_list);
868 gh->gh_error = GLR_CANCELED;
869 spin_unlock(&gl->gl_spin);
871 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
872 spin_lock(&gl->gl_spin);
873 list_move_tail(&gh->gh_list, &gl->gl_holders);
875 set_bit(HIF_HOLDER, &gh->gh_iflags);
876 spin_unlock(&gl->gl_spin);
878 set_bit(HIF_FIRST, &gh->gh_iflags);
882 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
883 spin_lock(&gl->gl_spin);
884 list_del_init(&gh->gh_list);
885 gh->gh_error = GLR_TRYFAILED;
886 spin_unlock(&gl->gl_spin);
889 if (gfs2_assert_withdraw(sdp, 0) == -1)
890 fs_err(sdp, "ret = 0x%.8X\n", ret);
893 if (glops->go_xmote_bh)
894 glops->go_xmote_bh(gl);
897 spin_lock(&gl->gl_spin);
898 gl->gl_req_gh = NULL;
899 gl->gl_req_bh = NULL;
900 clear_bit(GLF_LOCK, &gl->gl_flags);
902 spin_unlock(&gl->gl_spin);
908 gfs2_holder_dispose_or_wake(gh);
912 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
913 * @gl: The glock in question
914 * @state: the requested state
915 * @flags: modifier flags to the lock call
919 void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
921 struct gfs2_sbd *sdp = gl->gl_sbd;
922 const struct gfs2_glock_operations *glops = gl->gl_ops;
923 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
924 LM_FLAG_NOEXP | LM_FLAG_ANY |
926 unsigned int lck_ret;
928 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
929 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
930 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
931 gfs2_assert_warn(sdp, state != gl->gl_state);
933 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
937 gl->gl_req_bh = xmote_bh;
939 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
941 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
944 if (lck_ret & LM_OUT_ASYNC)
945 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
947 xmote_bh(gl, lck_ret);
951 * drop_bh - Called after a lock module unlock completes
953 * @ret: the return status
955 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
956 * Doesn't drop the reference on the glock the top half took out
960 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
962 struct gfs2_sbd *sdp = gl->gl_sbd;
963 const struct gfs2_glock_operations *glops = gl->gl_ops;
964 struct gfs2_holder *gh = gl->gl_req_gh;
966 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
967 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
968 gfs2_assert_warn(sdp, !ret);
970 state_change(gl, LM_ST_UNLOCKED);
973 glops->go_inval(gl, DIO_METADATA);
976 spin_lock(&gl->gl_spin);
977 list_del_init(&gh->gh_list);
979 spin_unlock(&gl->gl_spin);
982 if (glops->go_drop_bh)
983 glops->go_drop_bh(gl);
985 spin_lock(&gl->gl_spin);
986 gl->gl_req_gh = NULL;
987 gl->gl_req_bh = NULL;
988 clear_bit(GLF_LOCK, &gl->gl_flags);
990 spin_unlock(&gl->gl_spin);
995 gfs2_holder_dispose_or_wake(gh);
999 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1004 void gfs2_glock_drop_th(struct gfs2_glock *gl)
1006 struct gfs2_sbd *sdp = gl->gl_sbd;
1007 const struct gfs2_glock_operations *glops = gl->gl_ops;
1010 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1011 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1012 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1014 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1017 gfs2_glock_hold(gl);
1018 gl->gl_req_bh = drop_bh;
1020 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1022 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1028 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1032 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1033 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1035 * Don't cancel GL_NOCANCEL requests.
1038 static void do_cancels(struct gfs2_holder *gh)
1040 struct gfs2_glock *gl = gh->gh_gl;
1042 spin_lock(&gl->gl_spin);
1044 while (gl->gl_req_gh != gh &&
1045 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1046 !list_empty(&gh->gh_list)) {
1047 if (gl->gl_req_bh && !(gl->gl_req_gh &&
1048 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1049 spin_unlock(&gl->gl_spin);
1050 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1052 spin_lock(&gl->gl_spin);
1054 spin_unlock(&gl->gl_spin);
1056 spin_lock(&gl->gl_spin);
1060 spin_unlock(&gl->gl_spin);
1064 * glock_wait_internal - wait on a glock acquisition
1065 * @gh: the glock holder
1067 * Returns: 0 on success
1070 static int glock_wait_internal(struct gfs2_holder *gh)
1072 struct gfs2_glock *gl = gh->gh_gl;
1073 struct gfs2_sbd *sdp = gl->gl_sbd;
1074 const struct gfs2_glock_operations *glops = gl->gl_ops;
1076 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1079 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1080 spin_lock(&gl->gl_spin);
1081 if (gl->gl_req_gh != gh &&
1082 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1083 !list_empty(&gh->gh_list)) {
1084 list_del_init(&gh->gh_list);
1085 gh->gh_error = GLR_TRYFAILED;
1087 spin_unlock(&gl->gl_spin);
1088 return gh->gh_error;
1090 spin_unlock(&gl->gl_spin);
1093 if (gh->gh_flags & LM_FLAG_PRIORITY)
1098 return gh->gh_error;
1100 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1101 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1104 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1105 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1107 if (glops->go_lock) {
1108 gh->gh_error = glops->go_lock(gh);
1110 spin_lock(&gl->gl_spin);
1111 list_del_init(&gh->gh_list);
1112 spin_unlock(&gl->gl_spin);
1116 spin_lock(&gl->gl_spin);
1117 gl->gl_req_gh = NULL;
1118 gl->gl_req_bh = NULL;
1119 clear_bit(GLF_LOCK, &gl->gl_flags);
1121 spin_unlock(&gl->gl_spin);
1124 return gh->gh_error;
1127 static inline struct gfs2_holder *
1128 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1130 struct gfs2_holder *gh;
1132 list_for_each_entry(gh, head, gh_list) {
1133 if (gh->gh_owner == owner)
1141 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1142 * @gh: the holder structure to add
1146 static void add_to_queue(struct gfs2_holder *gh)
1148 struct gfs2_glock *gl = gh->gh_gl;
1149 struct gfs2_holder *existing;
1151 BUG_ON(!gh->gh_owner);
1152 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1155 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1157 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1158 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
1159 printk(KERN_INFO "lock type : %d lock state : %d\n",
1160 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1161 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1162 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
1163 printk(KERN_INFO "lock type : %d lock state : %d\n",
1164 gl->gl_name.ln_type, gl->gl_state);
1168 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1170 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1171 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1175 if (gh->gh_flags & LM_FLAG_PRIORITY)
1176 list_add(&gh->gh_list, &gl->gl_waiters3);
1178 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1182 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1183 * @gh: the holder structure
1185 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1187 * Returns: 0, GLR_TRYFAILED, or errno on failure
1190 int gfs2_glock_nq(struct gfs2_holder *gh)
1192 struct gfs2_glock *gl = gh->gh_gl;
1193 struct gfs2_sbd *sdp = gl->gl_sbd;
1197 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1198 set_bit(HIF_ABORTED, &gh->gh_iflags);
1202 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1204 spin_lock(&gl->gl_spin);
1207 spin_unlock(&gl->gl_spin);
1209 if (!(gh->gh_flags & GL_ASYNC)) {
1210 error = glock_wait_internal(gh);
1211 if (error == GLR_CANCELED) {
1221 * gfs2_glock_poll - poll to see if an async request has been completed
1224 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1227 int gfs2_glock_poll(struct gfs2_holder *gh)
1229 struct gfs2_glock *gl = gh->gh_gl;
1232 spin_lock(&gl->gl_spin);
1234 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1236 else if (list_empty(&gh->gh_list)) {
1237 if (gh->gh_error == GLR_CANCELED) {
1238 spin_unlock(&gl->gl_spin);
1240 if (gfs2_glock_nq(gh))
1247 spin_unlock(&gl->gl_spin);
1253 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1254 * @gh: the holder structure
1256 * Returns: 0, GLR_TRYFAILED, or errno on failure
1259 int gfs2_glock_wait(struct gfs2_holder *gh)
1263 error = glock_wait_internal(gh);
1264 if (error == GLR_CANCELED) {
1266 gh->gh_flags &= ~GL_ASYNC;
1267 error = gfs2_glock_nq(gh);
1274 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1275 * @gh: the glock holder
1279 void gfs2_glock_dq(struct gfs2_holder *gh)
1281 struct gfs2_glock *gl = gh->gh_gl;
1282 const struct gfs2_glock_operations *glops = gl->gl_ops;
1284 if (gh->gh_flags & GL_NOCACHE)
1285 handle_callback(gl, LM_ST_UNLOCKED);
1287 gfs2_glmutex_lock(gl);
1289 spin_lock(&gl->gl_spin);
1290 list_del_init(&gh->gh_list);
1292 if (list_empty(&gl->gl_holders)) {
1293 spin_unlock(&gl->gl_spin);
1295 if (glops->go_unlock)
1296 glops->go_unlock(gh);
1298 gl->gl_stamp = jiffies;
1300 spin_lock(&gl->gl_spin);
1303 clear_bit(GLF_LOCK, &gl->gl_flags);
1305 spin_unlock(&gl->gl_spin);
1309 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1310 * @gh: the holder structure
1314 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1317 gfs2_holder_uninit(gh);
1321 * gfs2_glock_nq_num - acquire a glock based on lock number
1322 * @sdp: the filesystem
1323 * @number: the lock number
1324 * @glops: the glock operations for the type of glock
1325 * @state: the state to acquire the glock in
1326 * @flags: modifier flags for the aquisition
1327 * @gh: the struct gfs2_holder
1332 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1333 const struct gfs2_glock_operations *glops,
1334 unsigned int state, int flags, struct gfs2_holder *gh)
1336 struct gfs2_glock *gl;
1339 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1341 error = gfs2_glock_nq_init(gl, state, flags, gh);
1349 * glock_compare - Compare two struct gfs2_glock structures for sorting
1350 * @arg_a: the first structure
1351 * @arg_b: the second structure
1355 static int glock_compare(const void *arg_a, const void *arg_b)
1357 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1358 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1359 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1360 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1362 if (a->ln_number > b->ln_number)
1364 if (a->ln_number < b->ln_number)
1366 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
1368 if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL))
1374 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1375 * @num_gh: the number of structures
1376 * @ghs: an array of struct gfs2_holder structures
1378 * Returns: 0 on success (all glocks acquired),
1379 * errno on failure (no glocks acquired)
1382 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1383 struct gfs2_holder **p)
1388 for (x = 0; x < num_gh; x++)
1391 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1393 for (x = 0; x < num_gh; x++) {
1394 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1396 error = gfs2_glock_nq(p[x]);
1399 gfs2_glock_dq(p[x]);
1408 * gfs2_glock_nq_m - acquire multiple glocks
1409 * @num_gh: the number of structures
1410 * @ghs: an array of struct gfs2_holder structures
1412 * Figure out how big an impact this function has. Either:
1413 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1414 * 2) Forget async stuff and just call nq_m_sync()
1415 * 3) Leave it like it is
1417 * Returns: 0 on success (all glocks acquired),
1418 * errno on failure (no glocks acquired)
1421 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1425 int borked = 0, serious = 0;
1432 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1433 return gfs2_glock_nq(ghs);
1436 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1440 for (x = 0; x < num_gh; x++) {
1441 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1442 error = gfs2_glock_nq(&ghs[x]);
1451 for (x = 0; x < num_gh; x++) {
1452 error = e[x] = glock_wait_internal(&ghs[x]);
1455 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1465 for (x = 0; x < num_gh; x++)
1467 gfs2_glock_dq(&ghs[x]);
1472 for (x = 0; x < num_gh; x++)
1473 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1475 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1484 * gfs2_glock_dq_m - release multiple glocks
1485 * @num_gh: the number of structures
1486 * @ghs: an array of struct gfs2_holder structures
1490 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1494 for (x = 0; x < num_gh; x++)
1495 gfs2_glock_dq(&ghs[x]);
1499 * gfs2_glock_dq_uninit_m - release multiple glocks
1500 * @num_gh: the number of structures
1501 * @ghs: an array of struct gfs2_holder structures
1505 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1509 for (x = 0; x < num_gh; x++)
1510 gfs2_glock_dq_uninit(&ghs[x]);
1514 * gfs2_lvb_hold - attach a LVB from a glock
1515 * @gl: The glock in question
1519 int gfs2_lvb_hold(struct gfs2_glock *gl)
1523 gfs2_glmutex_lock(gl);
1525 if (!atomic_read(&gl->gl_lvb_count)) {
1526 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1528 gfs2_glmutex_unlock(gl);
1531 gfs2_glock_hold(gl);
1533 atomic_inc(&gl->gl_lvb_count);
1535 gfs2_glmutex_unlock(gl);
1541 * gfs2_lvb_unhold - detach a LVB from a glock
1542 * @gl: The glock in question
1546 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1548 gfs2_glock_hold(gl);
1549 gfs2_glmutex_lock(gl);
1551 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1552 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1553 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1558 gfs2_glmutex_unlock(gl);
1562 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1565 struct gfs2_glock *gl;
1567 gl = gfs2_glock_find(sdp, name);
1571 handle_callback(gl, state);
1573 spin_lock(&gl->gl_spin);
1575 spin_unlock(&gl->gl_spin);
1581 * gfs2_glock_cb - Callback used by locking module
1582 * @sdp: Pointer to the superblock
1583 * @type: Type of callback
1584 * @data: Type dependent data pointer
1586 * Called by the locking module when it wants to tell us something.
1587 * Either we need to drop a lock, one of our ASYNC requests completed, or
1588 * a journal from another client needs to be recovered.
1591 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1593 struct gfs2_sbd *sdp = cb_data;
1597 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1601 blocking_cb(sdp, data, LM_ST_DEFERRED);
1605 blocking_cb(sdp, data, LM_ST_SHARED);
1609 struct lm_async_cb *async = data;
1610 struct gfs2_glock *gl;
1612 gl = gfs2_glock_find(sdp, &async->lc_name);
1613 if (gfs2_assert_warn(sdp, gl))
1615 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1616 gl->gl_req_bh(gl, async->lc_ret);
1621 case LM_CB_NEED_RECOVERY:
1622 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1623 if (sdp->sd_recoverd_process)
1624 wake_up_process(sdp->sd_recoverd_process);
1627 case LM_CB_DROPLOCKS:
1628 gfs2_gl_hash_clear(sdp, NO_WAIT);
1629 gfs2_quota_scan(sdp);
1633 gfs2_assert_warn(sdp, 0);
1639 * demote_ok - Check to see if it's ok to unlock a glock
1642 * Returns: 1 if it's ok
1645 static int demote_ok(struct gfs2_glock *gl)
1647 const struct gfs2_glock_operations *glops = gl->gl_ops;
1650 if (test_bit(GLF_STICKY, &gl->gl_flags))
1652 else if (glops->go_demote_ok)
1653 demote = glops->go_demote_ok(gl);
1659 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1664 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1666 struct gfs2_sbd *sdp = gl->gl_sbd;
1668 spin_lock(&sdp->sd_reclaim_lock);
1669 if (list_empty(&gl->gl_reclaim)) {
1670 gfs2_glock_hold(gl);
1671 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1672 atomic_inc(&sdp->sd_reclaim_count);
1674 spin_unlock(&sdp->sd_reclaim_lock);
1676 wake_up(&sdp->sd_reclaim_wq);
1680 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1681 * @sdp: the filesystem
1683 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1684 * different glock and we notice that there are a lot of glocks in the
1689 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1691 struct gfs2_glock *gl;
1693 spin_lock(&sdp->sd_reclaim_lock);
1694 if (list_empty(&sdp->sd_reclaim_list)) {
1695 spin_unlock(&sdp->sd_reclaim_lock);
1698 gl = list_entry(sdp->sd_reclaim_list.next,
1699 struct gfs2_glock, gl_reclaim);
1700 list_del_init(&gl->gl_reclaim);
1701 spin_unlock(&sdp->sd_reclaim_lock);
1703 atomic_dec(&sdp->sd_reclaim_count);
1704 atomic_inc(&sdp->sd_reclaimed);
1706 if (gfs2_glmutex_trylock(gl)) {
1707 if (queue_empty(gl, &gl->gl_holders) &&
1708 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1709 handle_callback(gl, LM_ST_UNLOCKED);
1710 gfs2_glmutex_unlock(gl);
1717 * examine_bucket - Call a function for glock in a hash bucket
1718 * @examiner: the function
1719 * @sdp: the filesystem
1720 * @bucket: the bucket
1722 * Returns: 1 if the bucket has entries
1725 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1728 struct gfs2_glock *gl, *prev = NULL;
1729 int has_entries = 0;
1730 struct hlist_head *head = &gl_hash_table[hash].hb_list;
1732 read_lock(gl_lock_addr(hash));
1733 /* Can't use hlist_for_each_entry - don't want prefetch here */
1734 if (hlist_empty(head))
1736 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1738 if (gl->gl_sbd == sdp) {
1739 gfs2_glock_hold(gl);
1740 read_unlock(gl_lock_addr(hash));
1742 gfs2_glock_put(prev);
1746 read_lock(gl_lock_addr(hash));
1748 if (gl->gl_list.next == NULL)
1750 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1753 read_unlock(gl_lock_addr(hash));
1755 gfs2_glock_put(prev);
1760 * scan_glock - look at a glock and see if we can reclaim it
1761 * @gl: the glock to look at
1765 static void scan_glock(struct gfs2_glock *gl)
1767 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1770 if (gfs2_glmutex_trylock(gl)) {
1771 if (queue_empty(gl, &gl->gl_holders) &&
1772 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1774 gfs2_glmutex_unlock(gl);
1779 gfs2_glmutex_unlock(gl);
1780 gfs2_glock_schedule_for_reclaim(gl);
1784 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1785 * @sdp: the filesystem
1789 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1793 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1794 examine_bucket(scan_glock, sdp, x);
1798 * clear_glock - look at a glock and see if we can free it from glock cache
1799 * @gl: the glock to look at
1803 static void clear_glock(struct gfs2_glock *gl)
1805 struct gfs2_sbd *sdp = gl->gl_sbd;
1808 spin_lock(&sdp->sd_reclaim_lock);
1809 if (!list_empty(&gl->gl_reclaim)) {
1810 list_del_init(&gl->gl_reclaim);
1811 atomic_dec(&sdp->sd_reclaim_count);
1812 spin_unlock(&sdp->sd_reclaim_lock);
1813 released = gfs2_glock_put(gl);
1814 gfs2_assert(sdp, !released);
1816 spin_unlock(&sdp->sd_reclaim_lock);
1819 if (gfs2_glmutex_trylock(gl)) {
1820 if (queue_empty(gl, &gl->gl_holders) &&
1821 gl->gl_state != LM_ST_UNLOCKED)
1822 handle_callback(gl, LM_ST_UNLOCKED);
1823 gfs2_glmutex_unlock(gl);
1828 * gfs2_gl_hash_clear - Empty out the glock hash table
1829 * @sdp: the filesystem
1830 * @wait: wait until it's all gone
1832 * Called when unmounting the filesystem, or when inter-node lock manager
1833 * requests DROPLOCKS because it is running out of capacity.
1836 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1846 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1847 if (examine_bucket(clear_glock, sdp, x))
1854 if (time_after_eq(jiffies,
1855 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1856 fs_warn(sdp, "Unmount seems to be stalled. "
1857 "Dumping lock state...\n");
1858 gfs2_dump_lockstate(sdp);
1862 invalidate_inodes(sdp->sd_vfs);
1868 * Diagnostic routines to help debug distributed deadlock
1872 * dump_holder - print information about a glock holder
1873 * @str: a string naming the type of holder
1874 * @gh: the glock holder
1876 * Returns: 0 on success, -ENOBUFS when we run out of space
1879 static int dump_holder(char *str, struct gfs2_holder *gh)
1882 int error = -ENOBUFS;
1884 printk(KERN_INFO " %s\n", str);
1885 printk(KERN_INFO " owner = %ld\n",
1886 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
1887 printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
1888 printk(KERN_INFO " gh_flags =");
1889 for (x = 0; x < 32; x++)
1890 if (gh->gh_flags & (1 << x))
1893 printk(KERN_INFO " error = %d\n", gh->gh_error);
1894 printk(KERN_INFO " gh_iflags =");
1895 for (x = 0; x < 32; x++)
1896 if (test_bit(x, &gh->gh_iflags))
1899 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
1907 * dump_inode - print information about an inode
1910 * Returns: 0 on success, -ENOBUFS when we run out of space
1913 static int dump_inode(struct gfs2_inode *ip)
1916 int error = -ENOBUFS;
1918 printk(KERN_INFO " Inode:\n");
1919 printk(KERN_INFO " num = %llu %llu\n",
1920 (unsigned long long)ip->i_num.no_formal_ino,
1921 (unsigned long long)ip->i_num.no_addr);
1922 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_inode.i_mode));
1923 printk(KERN_INFO " i_flags =");
1924 for (x = 0; x < 32; x++)
1925 if (test_bit(x, &ip->i_flags))
1935 * dump_glock - print information about a glock
1937 * @count: where we are in the buffer
1939 * Returns: 0 on success, -ENOBUFS when we run out of space
1942 static int dump_glock(struct gfs2_glock *gl)
1944 struct gfs2_holder *gh;
1946 int error = -ENOBUFS;
1948 spin_lock(&gl->gl_spin);
1950 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
1951 (unsigned long long)gl->gl_name.ln_number);
1952 printk(KERN_INFO " gl_flags =");
1953 for (x = 0; x < 32; x++) {
1954 if (test_bit(x, &gl->gl_flags))
1958 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1959 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
1960 printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
1961 print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
1962 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1963 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1964 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1965 printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
1966 printk(KERN_INFO " le = %s\n",
1967 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1968 printk(KERN_INFO " reclaim = %s\n",
1969 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1971 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1972 gl->gl_aspace->i_mapping->nrpages);
1974 printk(KERN_INFO " aspace = no\n");
1975 printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
1976 if (gl->gl_req_gh) {
1977 error = dump_holder("Request", gl->gl_req_gh);
1981 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1982 error = dump_holder("Holder", gh);
1986 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1987 error = dump_holder("Waiter1", gh);
1991 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
1992 error = dump_holder("Waiter2", gh);
1996 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1997 error = dump_holder("Waiter3", gh);
2001 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
2002 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2003 list_empty(&gl->gl_holders)) {
2004 error = dump_inode(gl->gl_object);
2009 printk(KERN_INFO " Inode: busy\n");
2016 spin_unlock(&gl->gl_spin);
2021 * gfs2_dump_lockstate - print out the current lockstate
2022 * @sdp: the filesystem
2023 * @ub: the buffer to copy the information into
2025 * If @ub is NULL, dump the lockstate to the console.
2029 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2031 struct gfs2_glock *gl;
2032 struct hlist_node *h;
2036 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2038 read_lock(gl_lock_addr(x));
2040 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
2041 if (gl->gl_sbd != sdp)
2044 error = dump_glock(gl);
2049 read_unlock(gl_lock_addr(x));
2059 int __init gfs2_glock_init(void)
2062 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2063 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2065 #ifdef GL_HASH_LOCK_SZ
2066 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2067 rwlock_init(&gl_hash_locks[i]);