block: use blkdev_issue_discard in blk_ioctl_discard
[safe/jmp/linux-2.6] / fs / gfs2 / glock.c
index 3984e47..8b674b1 100644 (file)
@@ -39,6 +39,8 @@
 #include "super.h"
 #include "util.h"
 #include "bmap.h"
+#define CREATE_TRACE_POINTS
+#include "trace_gfs2.h"
 
 struct gfs2_gl_hash_bucket {
         struct hlist_head hb_list;
@@ -61,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int
 static DECLARE_RWSEM(gfs2_umount_flush_sem);
 static struct dentry *gfs2_root;
 static struct workqueue_struct *glock_workqueue;
+struct workqueue_struct *gfs2_delete_workqueue;
 static LIST_HEAD(lru_list);
 static atomic_t lru_count = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(lru_lock);
@@ -155,7 +158,7 @@ static void glock_free(struct gfs2_glock *gl)
 
        if (aspace)
                gfs2_aspace_put(aspace);
-
+       trace_gfs2_glock_put(gl);
        sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
 }
 
@@ -165,13 +168,33 @@ static void glock_free(struct gfs2_glock *gl)
  *
  */
 
-static void gfs2_glock_hold(struct gfs2_glock *gl)
+void gfs2_glock_hold(struct gfs2_glock *gl)
 {
        GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
        atomic_inc(&gl->gl_ref);
 }
 
 /**
+ * demote_ok - Check to see if it's ok to unlock a glock
+ * @gl: the glock
+ *
+ * Returns: 1 if it's ok
+ */
+
+static int demote_ok(const struct gfs2_glock *gl)
+{
+       const struct gfs2_glock_operations *glops = gl->gl_ops;
+
+       if (gl->gl_state == LM_ST_UNLOCKED)
+               return 0;
+       if (!list_empty(&gl->gl_holders))
+               return 0;
+       if (glops->go_demote_ok)
+               return glops->go_demote_ok(gl);
+       return 1;
+}
+
+/**
  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
  * @gl: the glock
  *
@@ -179,8 +202,13 @@ static void gfs2_glock_hold(struct gfs2_glock *gl)
 
 static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
 {
+       int may_reclaim;
+       may_reclaim = (demote_ok(gl) &&
+                      (atomic_read(&gl->gl_ref) == 1 ||
+                       (gl->gl_name.ln_type == LM_TYPE_INODE &&
+                        atomic_read(&gl->gl_ref) <= 2)));
        spin_lock(&lru_lock);
-       if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) {
+       if (list_empty(&gl->gl_lru) && may_reclaim) {
                list_add_tail(&gl->gl_lru, &lru_list);
                atomic_inc(&lru_count);
        }
@@ -188,6 +216,21 @@ static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
 }
 
 /**
+ * gfs2_glock_put_nolock() - Decrement reference count on glock
+ * @gl: The glock to put
+ *
+ * This function should only be used if the caller has its own reference
+ * to the glock, in addition to the one it is dropping.
+ */
+
+void gfs2_glock_put_nolock(struct gfs2_glock *gl)
+{
+       if (atomic_dec_and_test(&gl->gl_ref))
+               GLOCK_BUG_ON(gl, 1);
+       gfs2_glock_schedule_for_reclaim(gl);
+}
+
+/**
  * gfs2_glock_put() - Decrement reference count on glock
  * @gl: The glock to put
  *
@@ -212,9 +255,9 @@ int gfs2_glock_put(struct gfs2_glock *gl)
                rv = 1;
                goto out;
        }
-       /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */
-       if (atomic_read(&gl->gl_ref) == 2)
-               gfs2_glock_schedule_for_reclaim(gl);
+       spin_lock(&gl->gl_spin);
+       gfs2_glock_schedule_for_reclaim(gl);
+       spin_unlock(&gl->gl_spin);
        write_unlock(gl_lock_addr(gl->gl_hash));
 out:
        return rv;
@@ -317,14 +360,17 @@ restart:
                                                return 2;
                                        gh->gh_error = ret;
                                        list_del_init(&gh->gh_list);
+                                       trace_gfs2_glock_queue(gh, 0);
                                        gfs2_holder_wake(gh);
                                        goto restart;
                                }
                                set_bit(HIF_HOLDER, &gh->gh_iflags);
+                               trace_gfs2_promote(gh, 1);
                                gfs2_holder_wake(gh);
                                goto restart;
                        }
                        set_bit(HIF_HOLDER, &gh->gh_iflags);
+                       trace_gfs2_promote(gh, 0);
                        gfs2_holder_wake(gh);
                        continue;
                }
@@ -354,6 +400,7 @@ static inline void do_error(struct gfs2_glock *gl, const int ret)
                else
                        continue;
                list_del_init(&gh->gh_list);
+               trace_gfs2_glock_queue(gh, 0);
                gfs2_holder_wake(gh);
        }
 }
@@ -392,7 +439,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
                if (held2)
                        gfs2_glock_hold(gl);
                else
-                       gfs2_glock_put(gl);
+                       gfs2_glock_put_nolock(gl);
        }
 
        gl->gl_state = new_state;
@@ -422,6 +469,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
        int rv;
 
        spin_lock(&gl->gl_spin);
+       trace_gfs2_glock_state_change(gl, state);
        state_change(gl, state);
        gh = find_first_waiter(gl);
 
@@ -597,7 +645,6 @@ __acquires(&gl->gl_spin)
 
        GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
 
-       down_read(&gfs2_umount_flush_sem);
        if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
            gl->gl_demote_state != gl->gl_state) {
                if (find_first_holder(gl))
@@ -614,24 +661,46 @@ __acquires(&gl->gl_spin)
                if (ret == 0)
                        goto out_unlock;
                if (ret == 2)
-                       goto out_sem;
+                       goto out;
                gh = find_first_waiter(gl);
                gl->gl_target = gh->gh_state;
                if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
                        do_error(gl, 0); /* Fail queued try locks */
        }
        do_xmote(gl, gh, gl->gl_target);
-out_sem:
-       up_read(&gfs2_umount_flush_sem);
+out:
        return;
 
 out_sched:
        gfs2_glock_hold(gl);
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
-               gfs2_glock_put(gl);
+               gfs2_glock_put_nolock(gl);
 out_unlock:
        clear_bit(GLF_LOCK, &gl->gl_flags);
-       goto out_sem;
+       goto out;
+}
+
+static void delete_work_func(struct work_struct *work)
+{
+       struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
+       struct gfs2_sbd *sdp = gl->gl_sbd;
+       struct gfs2_inode *ip = NULL;
+       struct inode *inode;
+       u64 no_addr = 0;
+
+       spin_lock(&gl->gl_spin);
+       ip = (struct gfs2_inode *)gl->gl_object;
+       if (ip)
+               no_addr = ip->i_no_addr;
+       spin_unlock(&gl->gl_spin);
+       if (ip) {
+               inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
+               if (inode) {
+                       d_prune_aliases(inode);
+                       iput(inode);
+               }
+       }
+       gfs2_glock_put(gl);
 }
 
 static void glock_work_func(struct work_struct *work)
@@ -641,6 +710,7 @@ static void glock_work_func(struct work_struct *work)
 
        if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
                finish_xmote(gl, gl->gl_reply);
+       down_read(&gfs2_umount_flush_sem);
        spin_lock(&gl->gl_spin);
        if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
            gl->gl_state != LM_ST_UNLOCKED &&
@@ -653,6 +723,7 @@ static void glock_work_func(struct work_struct *work)
        }
        run_queue(gl, 0);
        spin_unlock(&gl->gl_spin);
+       up_read(&gfs2_umount_flush_sem);
        if (!delay ||
            queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
                gfs2_glock_put(gl);
@@ -710,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
        gl->gl_sbd = sdp;
        gl->gl_aspace = NULL;
        INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
+       INIT_WORK(&gl->gl_delete, delete_work_func);
 
        /* If this glock protects actual on-disk data or metadata blocks,
           create a VFS inode to manage the pages/buffers holding them. */
@@ -796,22 +868,37 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
        gh->gh_ip = 0;
 }
 
-static int just_schedule(void *word)
+/**
+ * gfs2_glock_holder_wait
+ * @word: unused
+ *
+ * This function and gfs2_glock_demote_wait both show up in the WCHAN
+ * field. Thus I've separated these otherwise identical functions in
+ * order to be more informative to the user.
+ */
+
+static int gfs2_glock_holder_wait(void *word)
 {
         schedule();
         return 0;
 }
 
+static int gfs2_glock_demote_wait(void *word)
+{
+       schedule();
+       return 0;
+}
+
 static void wait_on_holder(struct gfs2_holder *gh)
 {
        might_sleep();
-       wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
 }
 
 static void wait_on_demote(struct gfs2_glock *gl)
 {
        might_sleep();
-       wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
 }
 
 /**
@@ -836,6 +923,9 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
                        gl->gl_demote_state != state) {
                gl->gl_demote_state = LM_ST_UNLOCKED;
        }
+       if (gl->gl_ops->go_callback)
+               gl->gl_ops->go_callback(gl);
+       trace_gfs2_demote_rq(gl);
 }
 
 /**
@@ -921,6 +1011,7 @@ fail:
                        goto do_cancel;
                return;
        }
+       trace_gfs2_glock_queue(gh, 1);
        list_add_tail(&gh->gh_list, insert_pt);
 do_cancel:
        gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
@@ -1017,6 +1108,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
                    !test_bit(GLF_DEMOTE, &gl->gl_flags))
                        fast_path = 1;
        }
+       trace_gfs2_glock_queue(gh, 0);
        spin_unlock(&gl->gl_spin);
        if (likely(fast_path))
                return;
@@ -1249,33 +1341,12 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
                gfs2_glock_put(gl);
 }
 
-/**
- * demote_ok - Check to see if it's ok to unlock a glock
- * @gl: the glock
- *
- * Returns: 1 if it's ok
- */
-
-static int demote_ok(const struct gfs2_glock *gl)
-{
-       const struct gfs2_glock_operations *glops = gl->gl_ops;
-
-       if (gl->gl_state == LM_ST_UNLOCKED)
-               return 0;
-       if (!list_empty(&gl->gl_holders))
-               return 0;
-       if (glops->go_demote_ok)
-               return glops->go_demote_ok(gl);
-       return 1;
-}
-
 
 static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
 {
        struct gfs2_glock *gl;
        int may_demote;
        int nr_skipped = 0;
-       int got_ref = 0;
        LIST_HEAD(skipped);
 
        if (nr == 0)
@@ -1290,36 +1361,29 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
                list_del_init(&gl->gl_lru);
                atomic_dec(&lru_count);
 
+               /* Check if glock is about to be freed */
+               if (atomic_read(&gl->gl_ref) == 0)
+                       continue;
+
                /* Test for being demotable */
                if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
                        gfs2_glock_hold(gl);
-                       got_ref = 1;
                        spin_unlock(&lru_lock);
                        spin_lock(&gl->gl_spin);
                        may_demote = demote_ok(gl);
-                       spin_unlock(&gl->gl_spin);
-                       clear_bit(GLF_LOCK, &gl->gl_flags);
                        if (may_demote) {
                                handle_callback(gl, LM_ST_UNLOCKED, 0);
                                nr--;
-                               if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
-                                       gfs2_glock_put(gl);
                        }
+                       if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+                               gfs2_glock_put_nolock(gl);
+                       spin_unlock(&gl->gl_spin);
+                       clear_bit(GLF_LOCK, &gl->gl_flags);
                        spin_lock(&lru_lock);
-                       if (may_demote)
-                               continue;
-               }
-               if (list_empty(&gl->gl_lru) &&
-                   (atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
-                       nr_skipped++;
-                       list_add(&gl->gl_lru, &skipped);
-               }
-               if (got_ref) {
-                       spin_unlock(&lru_lock);
-                       gfs2_glock_put(gl);
-                       spin_lock(&lru_lock);
-                       got_ref = 0;
+                       continue;
                }
+               nr_skipped++;
+               list_add(&gl->gl_lru, &skipped);
        }
        list_splice(&skipped, &lru_list);
        atomic_add(nr_skipped, &lru_count);
@@ -1701,6 +1765,11 @@ int __init gfs2_glock_init(void)
        glock_workqueue = create_workqueue("glock_workqueue");
        if (IS_ERR(glock_workqueue))
                return PTR_ERR(glock_workqueue);
+       gfs2_delete_workqueue = create_workqueue("delete_workqueue");
+       if (IS_ERR(gfs2_delete_workqueue)) {
+               destroy_workqueue(glock_workqueue);
+               return PTR_ERR(gfs2_delete_workqueue);
+       }
 
        register_shrinker(&glock_shrinker);
 
@@ -1711,6 +1780,7 @@ void gfs2_glock_exit(void)
 {
        unregister_shrinker(&glock_shrinker);
        destroy_workqueue(glock_workqueue);
+       destroy_workqueue(gfs2_delete_workqueue);
 }
 
 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)