2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <linux/rwsem.h>
23 #include <asm/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/debugfs.h>
26 #include <linux/kthread.h>
27 #include <linux/freezer.h>
28 #include <linux/workqueue.h>
29 #include <linux/jiffies.h>
42 #define CREATE_TRACE_POINTS
43 #include "trace_gfs2.h"
45 struct gfs2_gl_hash_bucket {
46 struct hlist_head hb_list;
49 struct gfs2_glock_iter {
50 int hash; /* hash bucket index */
51 struct gfs2_sbd *sdp; /* incore superblock */
52 struct gfs2_glock *gl; /* current glock struct */
53 char string[512]; /* scratch space */
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
58 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
60 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
61 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
63 static DECLARE_RWSEM(gfs2_umount_flush_sem);
64 static struct dentry *gfs2_root;
65 static struct workqueue_struct *glock_workqueue;
66 static LIST_HEAD(lru_list);
67 static atomic_t lru_count = ATOMIC_INIT(0);
68 static DEFINE_SPINLOCK(lru_lock);
70 #define GFS2_GL_HASH_SHIFT 15
71 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
72 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
74 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
75 static struct dentry *gfs2_root;
78 * Despite what you might think, the numbers below are not arbitrary :-)
79 * They are taken from the ipv4 routing hash code, which is well tested
80 * and thus should be nearly optimal. Later on we might tweek the numbers
81 * but for now this should be fine.
83 * The reason for putting the locks in a separate array from the list heads
84 * is that we can have fewer locks than list heads and save memory. We use
85 * the same hash function for both, but with a different hash mask.
87 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
88 defined(CONFIG_PROVE_LOCKING)
91 # define GL_HASH_LOCK_SZ 256
94 # define GL_HASH_LOCK_SZ 4096
96 # define GL_HASH_LOCK_SZ 2048
98 # define GL_HASH_LOCK_SZ 1024
100 # define GL_HASH_LOCK_SZ 512
102 # define GL_HASH_LOCK_SZ 256
106 /* We never want more locks than chains */
107 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
108 # undef GL_HASH_LOCK_SZ
109 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
112 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
114 static inline rwlock_t *gl_lock_addr(unsigned int x)
116 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
118 #else /* not SMP, so no spinlocks required */
119 static inline rwlock_t *gl_lock_addr(unsigned int x)
126 * gl_hash() - Turn glock number into hash bucket number
127 * @lock: The glock number
129 * Returns: The number of the corresponding hash bucket
132 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
133 const struct lm_lockname *name)
137 h = jhash(&name->ln_number, sizeof(u64), 0);
138 h = jhash(&name->ln_type, sizeof(unsigned int), h);
139 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
140 h &= GFS2_GL_HASH_MASK;
146 * glock_free() - Perform a few checks and then release struct gfs2_glock
147 * @gl: The glock to release
149 * Also calls lock module to release its internal structure for this glock.
153 static void glock_free(struct gfs2_glock *gl)
155 struct gfs2_sbd *sdp = gl->gl_sbd;
156 struct inode *aspace = gl->gl_aspace;
159 gfs2_aspace_put(aspace);
160 trace_gfs2_glock_put(gl);
161 sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
165 * gfs2_glock_hold() - increment reference count on glock
166 * @gl: The glock to hold
170 static void gfs2_glock_hold(struct gfs2_glock *gl)
172 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
173 atomic_inc(&gl->gl_ref);
177 * demote_ok - Check to see if it's ok to unlock a glock
180 * Returns: 1 if it's ok
183 static int demote_ok(const struct gfs2_glock *gl)
185 const struct gfs2_glock_operations *glops = gl->gl_ops;
187 if (gl->gl_state == LM_ST_UNLOCKED)
189 if (!list_empty(&gl->gl_holders))
191 if (glops->go_demote_ok)
192 return glops->go_demote_ok(gl);
197 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
202 static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
205 may_reclaim = (demote_ok(gl) &&
206 (atomic_read(&gl->gl_ref) == 1 ||
207 (gl->gl_name.ln_type == LM_TYPE_INODE &&
208 atomic_read(&gl->gl_ref) <= 2)));
209 spin_lock(&lru_lock);
210 if (list_empty(&gl->gl_lru) && may_reclaim) {
211 list_add_tail(&gl->gl_lru, &lru_list);
212 atomic_inc(&lru_count);
214 spin_unlock(&lru_lock);
218 * gfs2_glock_put_nolock() - Decrement reference count on glock
219 * @gl: The glock to put
221 * This function should only be used if the caller has its own reference
222 * to the glock, in addition to the one it is dropping.
225 static void gfs2_glock_put_nolock(struct gfs2_glock *gl)
227 if (atomic_dec_and_test(&gl->gl_ref))
229 gfs2_glock_schedule_for_reclaim(gl);
233 * gfs2_glock_put() - Decrement reference count on glock
234 * @gl: The glock to put
238 int gfs2_glock_put(struct gfs2_glock *gl)
242 write_lock(gl_lock_addr(gl->gl_hash));
243 if (atomic_dec_and_test(&gl->gl_ref)) {
244 hlist_del(&gl->gl_list);
245 write_unlock(gl_lock_addr(gl->gl_hash));
246 spin_lock(&lru_lock);
247 if (!list_empty(&gl->gl_lru)) {
248 list_del_init(&gl->gl_lru);
249 atomic_dec(&lru_count);
251 spin_unlock(&lru_lock);
252 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
257 spin_lock(&gl->gl_spin);
258 gfs2_glock_schedule_for_reclaim(gl);
259 spin_unlock(&gl->gl_spin);
260 write_unlock(gl_lock_addr(gl->gl_hash));
266 * search_bucket() - Find struct gfs2_glock by lock number
267 * @bucket: the bucket to search
268 * @name: The lock name
270 * Returns: NULL, or the struct gfs2_glock with the requested number
273 static struct gfs2_glock *search_bucket(unsigned int hash,
274 const struct gfs2_sbd *sdp,
275 const struct lm_lockname *name)
277 struct gfs2_glock *gl;
278 struct hlist_node *h;
280 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
281 if (!lm_name_equal(&gl->gl_name, name))
283 if (gl->gl_sbd != sdp)
286 atomic_inc(&gl->gl_ref);
295 * may_grant - check if its ok to grant a new lock
297 * @gh: The lock request which we wish to grant
299 * Returns: true if its ok to grant the lock
302 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
304 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
305 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
306 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
308 if (gl->gl_state == gh->gh_state)
310 if (gh->gh_flags & GL_EXACT)
312 if (gl->gl_state == LM_ST_EXCLUSIVE) {
313 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
315 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
318 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
323 static void gfs2_holder_wake(struct gfs2_holder *gh)
325 clear_bit(HIF_WAIT, &gh->gh_iflags);
326 smp_mb__after_clear_bit();
327 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
331 * do_promote - promote as many requests as possible on the current queue
334 * Returns: 1 if there is a blocked holder at the head of the list, or 2
335 * if a type specific operation is underway.
338 static int do_promote(struct gfs2_glock *gl)
339 __releases(&gl->gl_spin)
340 __acquires(&gl->gl_spin)
342 const struct gfs2_glock_operations *glops = gl->gl_ops;
343 struct gfs2_holder *gh, *tmp;
347 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
348 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
350 if (may_grant(gl, gh)) {
351 if (gh->gh_list.prev == &gl->gl_holders &&
353 spin_unlock(&gl->gl_spin);
354 /* FIXME: eliminate this eventually */
355 ret = glops->go_lock(gh);
356 spin_lock(&gl->gl_spin);
361 list_del_init(&gh->gh_list);
362 trace_gfs2_glock_queue(gh, 0);
363 gfs2_holder_wake(gh);
366 set_bit(HIF_HOLDER, &gh->gh_iflags);
367 trace_gfs2_promote(gh, 1);
368 gfs2_holder_wake(gh);
371 set_bit(HIF_HOLDER, &gh->gh_iflags);
372 trace_gfs2_promote(gh, 0);
373 gfs2_holder_wake(gh);
376 if (gh->gh_list.prev == &gl->gl_holders)
384 * do_error - Something unexpected has happened during a lock request
388 static inline void do_error(struct gfs2_glock *gl, const int ret)
390 struct gfs2_holder *gh, *tmp;
392 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
393 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
395 if (ret & LM_OUT_ERROR)
397 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
398 gh->gh_error = GLR_TRYFAILED;
401 list_del_init(&gh->gh_list);
402 trace_gfs2_glock_queue(gh, 0);
403 gfs2_holder_wake(gh);
408 * find_first_waiter - find the first gh that's waiting for the glock
412 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
414 struct gfs2_holder *gh;
416 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
417 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
424 * state_change - record that the glock is now in a different state
426 * @new_state the new state
430 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
434 held1 = (gl->gl_state != LM_ST_UNLOCKED);
435 held2 = (new_state != LM_ST_UNLOCKED);
437 if (held1 != held2) {
441 gfs2_glock_put_nolock(gl);
444 gl->gl_state = new_state;
445 gl->gl_tchange = jiffies;
448 static void gfs2_demote_wake(struct gfs2_glock *gl)
450 gl->gl_demote_state = LM_ST_EXCLUSIVE;
451 clear_bit(GLF_DEMOTE, &gl->gl_flags);
452 smp_mb__after_clear_bit();
453 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
457 * finish_xmote - The DLM has replied to one of our lock requests
459 * @ret: The status from the DLM
463 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
465 const struct gfs2_glock_operations *glops = gl->gl_ops;
466 struct gfs2_holder *gh;
467 unsigned state = ret & LM_OUT_ST_MASK;
470 spin_lock(&gl->gl_spin);
471 trace_gfs2_glock_state_change(gl, state);
472 state_change(gl, state);
473 gh = find_first_waiter(gl);
475 /* Demote to UN request arrived during demote to SH or DF */
476 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
477 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
478 gl->gl_target = LM_ST_UNLOCKED;
480 /* Check for state != intended state */
481 if (unlikely(state != gl->gl_target)) {
482 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
483 /* move to back of queue and try next entry */
484 if (ret & LM_OUT_CANCELED) {
485 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
486 list_move_tail(&gh->gh_list, &gl->gl_holders);
487 gh = find_first_waiter(gl);
488 gl->gl_target = gh->gh_state;
491 /* Some error or failed "try lock" - report it */
492 if ((ret & LM_OUT_ERROR) ||
493 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
494 gl->gl_target = gl->gl_state;
500 /* Unlocked due to conversion deadlock, try again */
503 do_xmote(gl, gh, gl->gl_target);
505 /* Conversion fails, unlock and try again */
508 do_xmote(gl, gh, LM_ST_UNLOCKED);
510 default: /* Everything else */
511 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
514 spin_unlock(&gl->gl_spin);
519 /* Fast path - we got what we asked for */
520 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
521 gfs2_demote_wake(gl);
522 if (state != LM_ST_UNLOCKED) {
523 if (glops->go_xmote_bh) {
524 spin_unlock(&gl->gl_spin);
525 rv = glops->go_xmote_bh(gl, gh);
528 spin_lock(&gl->gl_spin);
539 clear_bit(GLF_LOCK, &gl->gl_flags);
541 spin_unlock(&gl->gl_spin);
545 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
546 unsigned int req_state,
549 int ret = LM_OUT_ERROR;
551 if (!sdp->sd_lockstruct.ls_ops->lm_lock)
552 return req_state == LM_ST_UNLOCKED ? 0 : req_state;
554 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
555 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
561 * do_xmote - Calls the DLM to change the state of a lock
562 * @gl: The lock state
563 * @gh: The holder (only for promotes)
564 * @target: The target lock state
568 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
569 __releases(&gl->gl_spin)
570 __acquires(&gl->gl_spin)
572 const struct gfs2_glock_operations *glops = gl->gl_ops;
573 struct gfs2_sbd *sdp = gl->gl_sbd;
574 unsigned int lck_flags = gh ? gh->gh_flags : 0;
577 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
579 BUG_ON(gl->gl_state == target);
580 BUG_ON(gl->gl_state == gl->gl_target);
581 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
583 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
584 do_error(gl, 0); /* Fail queued try locks */
586 spin_unlock(&gl->gl_spin);
587 if (glops->go_xmote_th)
588 glops->go_xmote_th(gl);
589 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
590 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
591 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
594 if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
595 gl->gl_state == LM_ST_DEFERRED) &&
596 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
597 lck_flags |= LM_FLAG_TRY_1CB;
598 ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
600 if (!(ret & LM_OUT_ASYNC)) {
601 finish_xmote(gl, ret);
603 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
606 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
608 spin_lock(&gl->gl_spin);
612 * find_first_holder - find the first "holder" gh
616 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
618 struct gfs2_holder *gh;
620 if (!list_empty(&gl->gl_holders)) {
621 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
622 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
629 * run_queue - do all outstanding tasks related to a glock
630 * @gl: The glock in question
631 * @nonblock: True if we must not block in run_queue
635 static void run_queue(struct gfs2_glock *gl, const int nonblock)
636 __releases(&gl->gl_spin)
637 __acquires(&gl->gl_spin)
639 struct gfs2_holder *gh = NULL;
642 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
645 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
647 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
648 gl->gl_demote_state != gl->gl_state) {
649 if (find_first_holder(gl))
653 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
654 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
655 gl->gl_target = gl->gl_demote_state;
657 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
658 gfs2_demote_wake(gl);
659 ret = do_promote(gl);
664 gh = find_first_waiter(gl);
665 gl->gl_target = gh->gh_state;
666 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
667 do_error(gl, 0); /* Fail queued try locks */
669 do_xmote(gl, gh, gl->gl_target);
675 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
676 gfs2_glock_put_nolock(gl);
678 clear_bit(GLF_LOCK, &gl->gl_flags);
682 static void glock_work_func(struct work_struct *work)
684 unsigned long delay = 0;
685 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
687 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
688 finish_xmote(gl, gl->gl_reply);
689 down_read(&gfs2_umount_flush_sem);
690 spin_lock(&gl->gl_spin);
691 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
692 gl->gl_state != LM_ST_UNLOCKED &&
693 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
694 unsigned long holdtime, now = jiffies;
695 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
696 if (time_before(now, holdtime))
697 delay = holdtime - now;
698 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
701 spin_unlock(&gl->gl_spin);
702 up_read(&gfs2_umount_flush_sem);
704 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
709 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
710 * @sdp: The GFS2 superblock
711 * @number: the lock number
712 * @glops: The glock_operations to use
713 * @create: If 0, don't create the glock if it doesn't exist
714 * @glp: the glock is returned here
716 * This does not lock a glock, just finds/creates structures for one.
721 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
722 const struct gfs2_glock_operations *glops, int create,
723 struct gfs2_glock **glp)
725 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
726 struct gfs2_glock *gl, *tmp;
727 unsigned int hash = gl_hash(sdp, &name);
730 read_lock(gl_lock_addr(hash));
731 gl = search_bucket(hash, sdp, &name);
732 read_unlock(gl_lock_addr(hash));
740 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
746 atomic_set(&gl->gl_ref, 1);
747 gl->gl_state = LM_ST_UNLOCKED;
748 gl->gl_target = LM_ST_UNLOCKED;
749 gl->gl_demote_state = LM_ST_EXCLUSIVE;
752 snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
753 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
754 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
755 gl->gl_tchange = jiffies;
756 gl->gl_object = NULL;
758 gl->gl_aspace = NULL;
759 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
761 /* If this glock protects actual on-disk data or metadata blocks,
762 create a VFS inode to manage the pages/buffers holding them. */
763 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
764 gl->gl_aspace = gfs2_aspace_get(sdp);
765 if (!gl->gl_aspace) {
771 write_lock(gl_lock_addr(hash));
772 tmp = search_bucket(hash, sdp, &name);
774 write_unlock(gl_lock_addr(hash));
778 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
779 write_unlock(gl_lock_addr(hash));
787 kmem_cache_free(gfs2_glock_cachep, gl);
792 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
794 * @state: the state we're requesting
795 * @flags: the modifier flags
796 * @gh: the holder structure
800 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
801 struct gfs2_holder *gh)
803 INIT_LIST_HEAD(&gh->gh_list);
805 gh->gh_ip = (unsigned long)__builtin_return_address(0);
806 gh->gh_owner_pid = get_pid(task_pid(current));
807 gh->gh_state = state;
808 gh->gh_flags = flags;
815 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
816 * @state: the state we're requesting
817 * @flags: the modifier flags
818 * @gh: the holder structure
820 * Don't mess with the glock.
824 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
826 gh->gh_state = state;
827 gh->gh_flags = flags;
829 gh->gh_ip = (unsigned long)__builtin_return_address(0);
833 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
834 * @gh: the holder structure
838 void gfs2_holder_uninit(struct gfs2_holder *gh)
840 put_pid(gh->gh_owner_pid);
841 gfs2_glock_put(gh->gh_gl);
847 * gfs2_glock_holder_wait
850 * This function and gfs2_glock_demote_wait both show up in the WCHAN
851 * field. Thus I've separated these otherwise identical functions in
852 * order to be more informative to the user.
855 static int gfs2_glock_holder_wait(void *word)
861 static int gfs2_glock_demote_wait(void *word)
867 static void wait_on_holder(struct gfs2_holder *gh)
870 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
873 static void wait_on_demote(struct gfs2_glock *gl)
876 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
880 * handle_callback - process a demote request
882 * @state: the state the caller wants us to change to
884 * There are only two requests that we are going to see in actual
885 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
888 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
891 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
893 set_bit(bit, &gl->gl_flags);
894 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
895 gl->gl_demote_state = state;
896 gl->gl_demote_time = jiffies;
897 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
898 gl->gl_demote_state != state) {
899 gl->gl_demote_state = LM_ST_UNLOCKED;
901 trace_gfs2_demote_rq(gl);
905 * gfs2_glock_wait - wait on a glock acquisition
906 * @gh: the glock holder
908 * Returns: 0 on success
911 int gfs2_glock_wait(struct gfs2_holder *gh)
917 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
923 struct gfs2_glock_iter *gi = seq->private;
924 vsprintf(gi->string, fmt, args);
925 seq_printf(seq, gi->string);
927 printk(KERN_ERR " ");
934 * add_to_queue - Add a holder to the wait queue (but look for recursion)
935 * @gh: the holder structure to add
937 * Eventually we should move the recursive locking trap to a
938 * debugging option or something like that. This is the fast
939 * path and needs to have the minimum number of distractions.
943 static inline void add_to_queue(struct gfs2_holder *gh)
944 __releases(&gl->gl_spin)
945 __acquires(&gl->gl_spin)
947 struct gfs2_glock *gl = gh->gh_gl;
948 struct gfs2_sbd *sdp = gl->gl_sbd;
949 struct list_head *insert_pt = NULL;
950 struct gfs2_holder *gh2;
953 BUG_ON(gh->gh_owner_pid == NULL);
954 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
957 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
958 if (test_bit(GLF_LOCK, &gl->gl_flags))
960 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
964 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
965 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
966 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
969 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
970 !may_grant(gl, gh)) {
972 gh->gh_error = GLR_TRYFAILED;
973 gfs2_holder_wake(gh);
976 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
978 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
979 insert_pt = &gh2->gh_list;
981 if (likely(insert_pt == NULL)) {
982 list_add_tail(&gh->gh_list, &gl->gl_holders);
983 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
987 trace_gfs2_glock_queue(gh, 1);
988 list_add_tail(&gh->gh_list, insert_pt);
990 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
991 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
992 spin_unlock(&gl->gl_spin);
993 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
994 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
995 spin_lock(&gl->gl_spin);
1000 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1001 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1002 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1003 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1004 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1005 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1006 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1007 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1008 __dump_glock(NULL, gl);
1013 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1014 * @gh: the holder structure
1016 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1018 * Returns: 0, GLR_TRYFAILED, or errno on failure
1021 int gfs2_glock_nq(struct gfs2_holder *gh)
1023 struct gfs2_glock *gl = gh->gh_gl;
1024 struct gfs2_sbd *sdp = gl->gl_sbd;
1027 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1030 spin_lock(&gl->gl_spin);
1033 spin_unlock(&gl->gl_spin);
1035 if (!(gh->gh_flags & GL_ASYNC))
1036 error = gfs2_glock_wait(gh);
1042 * gfs2_glock_poll - poll to see if an async request has been completed
1045 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1048 int gfs2_glock_poll(struct gfs2_holder *gh)
1050 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1054 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1055 * @gh: the glock holder
1059 void gfs2_glock_dq(struct gfs2_holder *gh)
1061 struct gfs2_glock *gl = gh->gh_gl;
1062 const struct gfs2_glock_operations *glops = gl->gl_ops;
1066 spin_lock(&gl->gl_spin);
1067 if (gh->gh_flags & GL_NOCACHE)
1068 handle_callback(gl, LM_ST_UNLOCKED, 0);
1070 list_del_init(&gh->gh_list);
1071 if (find_first_holder(gl) == NULL) {
1072 if (glops->go_unlock) {
1073 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1074 spin_unlock(&gl->gl_spin);
1075 glops->go_unlock(gh);
1076 spin_lock(&gl->gl_spin);
1077 clear_bit(GLF_LOCK, &gl->gl_flags);
1079 if (list_empty(&gl->gl_holders) &&
1080 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1081 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1084 trace_gfs2_glock_queue(gh, 0);
1085 spin_unlock(&gl->gl_spin);
1086 if (likely(fast_path))
1089 gfs2_glock_hold(gl);
1090 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1091 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1092 delay = gl->gl_ops->go_min_hold_time;
1093 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1097 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1099 struct gfs2_glock *gl = gh->gh_gl;
1105 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1106 * @gh: the holder structure
1110 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1113 gfs2_holder_uninit(gh);
1117 * gfs2_glock_nq_num - acquire a glock based on lock number
1118 * @sdp: the filesystem
1119 * @number: the lock number
1120 * @glops: the glock operations for the type of glock
1121 * @state: the state to acquire the glock in
1122 * @flags: modifier flags for the aquisition
1123 * @gh: the struct gfs2_holder
1128 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1129 const struct gfs2_glock_operations *glops,
1130 unsigned int state, int flags, struct gfs2_holder *gh)
1132 struct gfs2_glock *gl;
1135 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1137 error = gfs2_glock_nq_init(gl, state, flags, gh);
1145 * glock_compare - Compare two struct gfs2_glock structures for sorting
1146 * @arg_a: the first structure
1147 * @arg_b: the second structure
1151 static int glock_compare(const void *arg_a, const void *arg_b)
1153 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1154 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1155 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1156 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1158 if (a->ln_number > b->ln_number)
1160 if (a->ln_number < b->ln_number)
1162 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1167 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1168 * @num_gh: the number of structures
1169 * @ghs: an array of struct gfs2_holder structures
1171 * Returns: 0 on success (all glocks acquired),
1172 * errno on failure (no glocks acquired)
1175 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1176 struct gfs2_holder **p)
1181 for (x = 0; x < num_gh; x++)
1184 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1186 for (x = 0; x < num_gh; x++) {
1187 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1189 error = gfs2_glock_nq(p[x]);
1192 gfs2_glock_dq(p[x]);
1201 * gfs2_glock_nq_m - acquire multiple glocks
1202 * @num_gh: the number of structures
1203 * @ghs: an array of struct gfs2_holder structures
1206 * Returns: 0 on success (all glocks acquired),
1207 * errno on failure (no glocks acquired)
1210 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1212 struct gfs2_holder *tmp[4];
1213 struct gfs2_holder **pph = tmp;
1220 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1221 return gfs2_glock_nq(ghs);
1225 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1230 error = nq_m_sync(num_gh, ghs, pph);
1239 * gfs2_glock_dq_m - release multiple glocks
1240 * @num_gh: the number of structures
1241 * @ghs: an array of struct gfs2_holder structures
1245 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1249 for (x = 0; x < num_gh; x++)
1250 gfs2_glock_dq(&ghs[x]);
1254 * gfs2_glock_dq_uninit_m - release multiple glocks
1255 * @num_gh: the number of structures
1256 * @ghs: an array of struct gfs2_holder structures
1260 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1264 for (x = 0; x < num_gh; x++)
1265 gfs2_glock_dq_uninit(&ghs[x]);
1268 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1270 unsigned long delay = 0;
1271 unsigned long holdtime;
1272 unsigned long now = jiffies;
1274 gfs2_glock_hold(gl);
1275 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1276 if (time_before(now, holdtime))
1277 delay = holdtime - now;
1278 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1279 delay = gl->gl_ops->go_min_hold_time;
1281 spin_lock(&gl->gl_spin);
1282 handle_callback(gl, state, delay);
1283 spin_unlock(&gl->gl_spin);
1284 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1289 * gfs2_glock_complete - Callback used by locking
1290 * @gl: Pointer to the glock
1291 * @ret: The return value from the dlm
1295 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1297 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1299 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1300 struct gfs2_holder *gh;
1301 spin_lock(&gl->gl_spin);
1302 gh = find_first_waiter(gl);
1303 if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) &&
1304 (gl->gl_target != LM_ST_UNLOCKED)) ||
1305 ((ret & ~LM_OUT_ST_MASK) != 0))
1306 set_bit(GLF_FROZEN, &gl->gl_flags);
1307 spin_unlock(&gl->gl_spin);
1308 if (test_bit(GLF_FROZEN, &gl->gl_flags))
1311 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1312 gfs2_glock_hold(gl);
1313 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1318 static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1320 struct gfs2_glock *gl;
1328 if (!(gfp_mask & __GFP_FS))
1331 spin_lock(&lru_lock);
1332 while(nr && !list_empty(&lru_list)) {
1333 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1334 list_del_init(&gl->gl_lru);
1335 atomic_dec(&lru_count);
1337 /* Check if glock is about to be freed */
1338 if (atomic_read(&gl->gl_ref) == 0)
1341 /* Test for being demotable */
1342 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1343 gfs2_glock_hold(gl);
1344 spin_unlock(&lru_lock);
1345 spin_lock(&gl->gl_spin);
1346 may_demote = demote_ok(gl);
1347 spin_unlock(&gl->gl_spin);
1348 clear_bit(GLF_LOCK, &gl->gl_flags);
1350 handle_callback(gl, LM_ST_UNLOCKED, 0);
1353 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1355 spin_lock(&lru_lock);
1359 list_add(&gl->gl_lru, &skipped);
1361 list_splice(&skipped, &lru_list);
1362 atomic_add(nr_skipped, &lru_count);
1363 spin_unlock(&lru_lock);
1365 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1368 static struct shrinker glock_shrinker = {
1369 .shrink = gfs2_shrink_glock_memory,
1370 .seeks = DEFAULT_SEEKS,
1374 * examine_bucket - Call a function for glock in a hash bucket
1375 * @examiner: the function
1376 * @sdp: the filesystem
1377 * @bucket: the bucket
1379 * Returns: 1 if the bucket has entries
1382 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1385 struct gfs2_glock *gl, *prev = NULL;
1386 int has_entries = 0;
1387 struct hlist_head *head = &gl_hash_table[hash].hb_list;
1389 read_lock(gl_lock_addr(hash));
1390 /* Can't use hlist_for_each_entry - don't want prefetch here */
1391 if (hlist_empty(head))
1393 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1395 if (!sdp || gl->gl_sbd == sdp) {
1396 gfs2_glock_hold(gl);
1397 read_unlock(gl_lock_addr(hash));
1399 gfs2_glock_put(prev);
1403 read_lock(gl_lock_addr(hash));
1405 if (gl->gl_list.next == NULL)
1407 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1410 read_unlock(gl_lock_addr(hash));
1412 gfs2_glock_put(prev);
1419 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1420 * @gl: The glock to thaw
1422 * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1423 * so this has to result in the ref count being dropped by one.
1426 static void thaw_glock(struct gfs2_glock *gl)
1428 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1430 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1431 gfs2_glock_hold(gl);
1432 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1437 * clear_glock - look at a glock and see if we can free it from glock cache
1438 * @gl: the glock to look at
1442 static void clear_glock(struct gfs2_glock *gl)
1444 spin_lock(&lru_lock);
1445 if (!list_empty(&gl->gl_lru)) {
1446 list_del_init(&gl->gl_lru);
1447 atomic_dec(&lru_count);
1449 spin_unlock(&lru_lock);
1451 spin_lock(&gl->gl_spin);
1452 if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1453 handle_callback(gl, LM_ST_UNLOCKED, 0);
1454 spin_unlock(&gl->gl_spin);
1455 gfs2_glock_hold(gl);
1456 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1461 * gfs2_glock_thaw - Thaw any frozen glocks
1462 * @sdp: The super block
1466 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1470 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1471 examine_bucket(thaw_glock, sdp, x);
1475 * gfs2_gl_hash_clear - Empty out the glock hash table
1476 * @sdp: the filesystem
1477 * @wait: wait until it's all gone
1479 * Called when unmounting the filesystem.
1482 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1492 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1493 if (examine_bucket(clear_glock, sdp, x))
1500 if (time_after_eq(jiffies,
1501 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1502 fs_warn(sdp, "Unmount seems to be stalled. "
1503 "Dumping lock state...\n");
1504 gfs2_dump_lockstate(sdp);
1508 down_write(&gfs2_umount_flush_sem);
1509 invalidate_inodes(sdp->sd_vfs);
1510 up_write(&gfs2_umount_flush_sem);
1515 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1517 struct gfs2_glock *gl = ip->i_gl;
1520 ret = gfs2_truncatei_resume(ip);
1521 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1523 spin_lock(&gl->gl_spin);
1524 clear_bit(GLF_LOCK, &gl->gl_flags);
1526 spin_unlock(&gl->gl_spin);
1529 static const char *state2str(unsigned state)
1532 case LM_ST_UNLOCKED:
1536 case LM_ST_DEFERRED:
1538 case LM_ST_EXCLUSIVE:
1544 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1547 if (flags & LM_FLAG_TRY)
1549 if (flags & LM_FLAG_TRY_1CB)
1551 if (flags & LM_FLAG_NOEXP)
1553 if (flags & LM_FLAG_ANY)
1555 if (flags & LM_FLAG_PRIORITY)
1557 if (flags & GL_ASYNC)
1559 if (flags & GL_EXACT)
1561 if (flags & GL_NOCACHE)
1563 if (test_bit(HIF_HOLDER, &iflags))
1565 if (test_bit(HIF_WAIT, &iflags))
1567 if (test_bit(HIF_FIRST, &iflags))
1574 * dump_holder - print information about a glock holder
1575 * @seq: the seq_file struct
1576 * @gh: the glock holder
1578 * Returns: 0 on success, -ENOBUFS when we run out of space
1581 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1583 struct task_struct *gh_owner = NULL;
1584 char buffer[KSYM_SYMBOL_LEN];
1587 sprint_symbol(buffer, gh->gh_ip);
1588 if (gh->gh_owner_pid)
1589 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1590 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1591 state2str(gh->gh_state),
1592 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1594 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1595 gh_owner ? gh_owner->comm : "(ended)", buffer);
1599 static const char *gflags2str(char *buf, const unsigned long *gflags)
1602 if (test_bit(GLF_LOCK, gflags))
1604 if (test_bit(GLF_DEMOTE, gflags))
1606 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1608 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1610 if (test_bit(GLF_DIRTY, gflags))
1612 if (test_bit(GLF_LFLUSH, gflags))
1614 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1616 if (test_bit(GLF_REPLY_PENDING, gflags))
1618 if (test_bit(GLF_INITIAL, gflags))
1620 if (test_bit(GLF_FROZEN, gflags))
1627 * __dump_glock - print information about a glock
1628 * @seq: The seq_file struct
1631 * The file format is as follows:
1632 * One line per object, capital letters are used to indicate objects
1633 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1634 * other objects are indented by a single space and follow the glock to
1635 * which they are related. Fields are indicated by lower case letters
1636 * followed by a colon and the field value, except for strings which are in
1637 * [] so that its possible to see if they are composed of spaces for
1638 * example. The field's are n = number (id of the object), f = flags,
1639 * t = type, s = state, r = refcount, e = error, p = pid.
1641 * Returns: 0 on success, -ENOBUFS when we run out of space
1644 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1646 const struct gfs2_glock_operations *glops = gl->gl_ops;
1647 unsigned long long dtime;
1648 const struct gfs2_holder *gh;
1649 char gflags_buf[32];
1652 dtime = jiffies - gl->gl_demote_time;
1653 dtime *= 1000000/HZ; /* demote time in uSec */
1654 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1656 gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n",
1657 state2str(gl->gl_state),
1658 gl->gl_name.ln_type,
1659 (unsigned long long)gl->gl_name.ln_number,
1660 gflags2str(gflags_buf, &gl->gl_flags),
1661 state2str(gl->gl_target),
1662 state2str(gl->gl_demote_state), dtime,
1663 atomic_read(&gl->gl_ail_count),
1664 atomic_read(&gl->gl_ref));
1666 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1667 error = dump_holder(seq, gh);
1671 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1672 error = glops->go_dump(seq, gl);
1677 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1680 spin_lock(&gl->gl_spin);
1681 ret = __dump_glock(seq, gl);
1682 spin_unlock(&gl->gl_spin);
1687 * gfs2_dump_lockstate - print out the current lockstate
1688 * @sdp: the filesystem
1689 * @ub: the buffer to copy the information into
1691 * If @ub is NULL, dump the lockstate to the console.
1695 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1697 struct gfs2_glock *gl;
1698 struct hlist_node *h;
1702 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1704 read_lock(gl_lock_addr(x));
1706 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1707 if (gl->gl_sbd != sdp)
1710 error = dump_glock(NULL, gl);
1715 read_unlock(gl_lock_addr(x));
1726 int __init gfs2_glock_init(void)
1729 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1730 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1732 #ifdef GL_HASH_LOCK_SZ
1733 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1734 rwlock_init(&gl_hash_locks[i]);
1738 glock_workqueue = create_workqueue("glock_workqueue");
1739 if (IS_ERR(glock_workqueue))
1740 return PTR_ERR(glock_workqueue);
1742 register_shrinker(&glock_shrinker);
1747 void gfs2_glock_exit(void)
1749 unregister_shrinker(&glock_shrinker);
1750 destroy_workqueue(glock_workqueue);
1753 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1755 struct gfs2_glock *gl;
1758 read_lock(gl_lock_addr(gi->hash));
1761 gi->gl = hlist_entry(gl->gl_list.next,
1762 struct gfs2_glock, gl_list);
1764 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1765 struct gfs2_glock, gl_list);
1768 gfs2_glock_hold(gi->gl);
1769 read_unlock(gl_lock_addr(gi->hash));
1772 while (gi->gl == NULL) {
1774 if (gi->hash >= GFS2_GL_HASH_SIZE)
1776 read_lock(gl_lock_addr(gi->hash));
1777 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1778 struct gfs2_glock, gl_list);
1780 gfs2_glock_hold(gi->gl);
1781 read_unlock(gl_lock_addr(gi->hash));
1784 if (gi->sdp != gi->gl->gl_sbd)
1790 static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1793 gfs2_glock_put(gi->gl);
1797 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1799 struct gfs2_glock_iter *gi = seq->private;
1805 if (gfs2_glock_iter_next(gi)) {
1806 gfs2_glock_iter_free(gi);
1814 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1817 struct gfs2_glock_iter *gi = seq->private;
1821 if (gfs2_glock_iter_next(gi)) {
1822 gfs2_glock_iter_free(gi);
1829 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1831 struct gfs2_glock_iter *gi = seq->private;
1832 gfs2_glock_iter_free(gi);
1835 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1837 return dump_glock(seq, iter_ptr);
1840 static const struct seq_operations gfs2_glock_seq_ops = {
1841 .start = gfs2_glock_seq_start,
1842 .next = gfs2_glock_seq_next,
1843 .stop = gfs2_glock_seq_stop,
1844 .show = gfs2_glock_seq_show,
1847 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1849 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1850 sizeof(struct gfs2_glock_iter));
1852 struct seq_file *seq = file->private_data;
1853 struct gfs2_glock_iter *gi = seq->private;
1854 gi->sdp = inode->i_private;
1859 static const struct file_operations gfs2_debug_fops = {
1860 .owner = THIS_MODULE,
1861 .open = gfs2_debugfs_open,
1863 .llseek = seq_lseek,
1864 .release = seq_release_private,
1867 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1869 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1870 if (!sdp->debugfs_dir)
1872 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1874 sdp->debugfs_dir, sdp,
1876 if (!sdp->debugfs_dentry_glocks)
1882 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1884 if (sdp && sdp->debugfs_dir) {
1885 if (sdp->debugfs_dentry_glocks) {
1886 debugfs_remove(sdp->debugfs_dentry_glocks);
1887 sdp->debugfs_dentry_glocks = NULL;
1889 debugfs_remove(sdp->debugfs_dir);
1890 sdp->debugfs_dir = NULL;
1894 int gfs2_register_debugfs(void)
1896 gfs2_root = debugfs_create_dir("gfs2", NULL);
1897 return gfs2_root ? 0 : -ENOMEM;
1900 void gfs2_unregister_debugfs(void)
1902 debugfs_remove(gfs2_root);