CAPI: Eliminate capifs_root variable
[safe/jmp/linux-2.6] / fs / ocfs2 / dlm / dlmunlock.c
index cec2ce1..00f53b2 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
-#include <linux/utsname.h>
 #include <linux/init.h>
 #include <linux/sysctl.h>
 #include <linux/random.h>
@@ -117,12 +116,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
        else
                BUG_ON(res->owner == dlm->node_num);
 
-       spin_lock(&dlm->spinlock);
+       spin_lock(&dlm->ast_lock);
        /* We want to be sure that we're not freeing a lock
         * that still has AST's pending... */
        in_use = !list_empty(&lock->ast_list);
-       spin_unlock(&dlm->spinlock);
-       if (in_use) {
+       spin_unlock(&dlm->ast_lock);
+       if (in_use && !(flags & LKM_CANCEL)) {
               mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
                    "while waiting for an ast!", res->lockname.len,
                    res->lockname.name);
@@ -131,7 +130,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
 
        spin_lock(&res->spinlock);
        if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
-               if (master_node) {
+               if (master_node && !(flags & LKM_CANCEL)) {
                        mlog(ML_ERROR, "lockres in progress!\n");
                        spin_unlock(&res->spinlock);
                        return DLM_FORWARD;
@@ -147,6 +146,10 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                goto leave;
        }
 
+       if (res->state & DLM_LOCK_RES_MIGRATING) {
+               status = DLM_MIGRATING;
+               goto leave;
+       }
 
        /* see above for what the spec says about
         * LKM_CANCEL and the lock queue state */
@@ -155,7 +158,7 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
        else
                status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
 
-       if (status != DLM_NORMAL)
+       if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node))
                goto leave;
 
        /* By now this has been masked out of cancel requests. */
@@ -183,11 +186,23 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                spin_lock(&lock->spinlock);
                /* if the master told us the lock was already granted,
                 * let the ast handle all of these actions */
-               if (status == DLM_NORMAL &&
-                   lksb->status == DLM_CANCELGRANT) {
+               if (status == DLM_CANCELGRANT) {
                        actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
                                     DLM_UNLOCK_REGRANT_LOCK|
                                     DLM_UNLOCK_CLEAR_CONVERT_TYPE);
+               } else if (status == DLM_RECOVERING || 
+                          status == DLM_MIGRATING || 
+                          status == DLM_FORWARD) {
+                       /* must clear the actions because this unlock
+                        * is about to be retried.  cannot free or do
+                        * any list manipulation. */
+                       mlog(0, "%s:%.*s: clearing actions, %s\n",
+                            dlm->name, res->lockname.len,
+                            res->lockname.name,
+                            status==DLM_RECOVERING?"recovering":
+                            (status==DLM_MIGRATING?"migrating":
+                             "forward"));
+                       actions = 0;
                }
                if (flags & LKM_CANCEL)
                        lock->cancel_pending = 0;
@@ -231,8 +246,10 @@ leave:
        if (actions & DLM_UNLOCK_FREE_LOCK) {
                /* this should always be coupled with list removal */
                BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
-               mlog(0, "lock %"MLFu64" should be gone now! refs=%d\n",
-                    lock->ml.cookie, atomic_read(&lock->lock_refs.refcount)-1);
+               mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
+                    dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+                    dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+                    atomic_read(&lock->lock_refs.refcount)-1);
                dlm_lock_put(lock);
        }
        if (actions & DLM_UNLOCK_CALL_AST)
@@ -256,8 +273,7 @@ void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
 void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
                               struct dlm_lock *lock)
 {
-       list_del_init(&lock->list);
-       list_add_tail(&lock->list, &res->granted);
+       list_move_tail(&lock->list, &res->granted);
        lock->ml.convert_type = LKM_IVMODE;
 }
 
@@ -304,6 +320,16 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
 
        mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
 
+       if (owner == dlm->node_num) {
+               /* ended up trying to contact ourself.  this means
+                * that the lockres had been remote but became local
+                * via a migration.  just retry it, now as local */
+               mlog(0, "%s:%.*s: this node became the master due to a "
+                    "migration, re-evaluate now\n", dlm->name,
+                    res->lockname.len, res->lockname.name);
+               return DLM_FORWARD;
+       }
+
        memset(&unlock, 0, sizeof(unlock));
        unlock.node_idx = dlm->node_num;
        unlock.flags = cpu_to_be32(flags);
@@ -325,14 +351,9 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
                                        vec, veclen, owner, &status);
        if (tmpret >= 0) {
                // successfully sent and received
-               if (status == DLM_CANCELGRANT)
-                       ret = DLM_NORMAL;
-               else if (status == DLM_FORWARD) {
+               if (status == DLM_FORWARD)
                        mlog(0, "master was in-progress.  retry\n");
-                       ret = DLM_FORWARD;
-               } else
-                       ret = status;
-               lksb->status = status;
+               ret = status;
        } else {
                mlog_errno(tmpret);
                if (dlm_is_host_down(tmpret)) {
@@ -348,7 +369,6 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
                        /* something bad.  this will BUG in ocfs2 */
                        ret = dlm_err_to_dlm_status(tmpret);
                }
-               lksb->status = ret;
        }
 
        return ret;
@@ -362,7 +382,8 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
  * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
  *          return value from dlmunlock_master
  */
-int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data)
+int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
+                           void **ret_data)
 {
        struct dlm_ctxt *dlm = data;
        struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
@@ -459,6 +480,10 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data)
 
        /* lock was found on queue */
        lksb = lock->lksb;
+       if (flags & (LKM_VALBLK|LKM_PUT_LVB) &&
+           lock->ml.type != LKM_EXMODE)
+               flags &= ~(LKM_VALBLK|LKM_PUT_LVB);
+
        /* unlockast only called on originating node */
        if (flags & LKM_PUT_LVB) {
                lksb->flags |= DLM_LKSB_PUT_LVB;
@@ -480,13 +505,11 @@ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data)
 not_found:
        if (!found)
                mlog(ML_ERROR, "failed to find lock to unlock! "
-                              "cookie=%"MLFu64"\n",
-                    unlock->cookie);
-       else {
-               /* send the lksb->status back to the other node */
-               status = lksb->status;
+                              "cookie=%u:%llu\n",
+                    dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)),
+                    dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie)));
+       else
                dlm_lock_put(lock);
-       }
 
 leave:
        if (res)
@@ -508,26 +531,22 @@ static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
 
        if (dlm_lock_on_list(&res->blocked, lock)) {
                /* cancel this outright */
-               lksb->status = DLM_NORMAL;
                status = DLM_NORMAL;
                *actions = (DLM_UNLOCK_CALL_AST |
                            DLM_UNLOCK_REMOVE_LOCK);
        } else if (dlm_lock_on_list(&res->converting, lock)) {
                /* cancel the request, put back on granted */
-               lksb->status = DLM_NORMAL;
                status = DLM_NORMAL;
                *actions = (DLM_UNLOCK_CALL_AST |
                            DLM_UNLOCK_REMOVE_LOCK |
                            DLM_UNLOCK_REGRANT_LOCK |
                            DLM_UNLOCK_CLEAR_CONVERT_TYPE);
        } else if (dlm_lock_on_list(&res->granted, lock)) {
-               /* too late, already granted.  DLM_CANCELGRANT */
-               lksb->status = DLM_CANCELGRANT;
-               status = DLM_NORMAL;
+               /* too late, already granted. */
+               status = DLM_CANCELGRANT;
                *actions = DLM_UNLOCK_CALL_AST;
        } else {
                mlog(ML_ERROR, "lock to cancel is not on any list!\n");
-               lksb->status = DLM_IVLOCKID;
                status = DLM_IVLOCKID;
                *actions = 0;
        }
@@ -544,13 +563,11 @@ static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
 
        /* unlock request */
        if (!dlm_lock_on_list(&res->granted, lock)) {
-               lksb->status = DLM_DENIED;
                status = DLM_DENIED;
                dlm_error(status);
                *actions = 0;
        } else {
                /* unlock granted lock */
-               lksb->status = DLM_NORMAL;
                status = DLM_NORMAL;
                *actions = (DLM_UNLOCK_FREE_LOCK |
                            DLM_UNLOCK_CALL_AST |
@@ -607,6 +624,8 @@ retry:
 
        spin_lock(&res->spinlock);
        is_master = (res->owner == dlm->node_num);
+       if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE)
+               flags &= ~LKM_VALBLK;
        spin_unlock(&res->spinlock);
 
        if (is_master) {
@@ -640,7 +659,7 @@ retry:
        }
 
        if (call_ast) {
-               mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status);
+               mlog(0, "calling unlockast(%p, %d)\n", data, status);
                if (is_master) {
                        /* it is possible that there is one last bast 
                         * pending.  make sure it is flushed, then
@@ -652,9 +671,12 @@ retry:
                        wait_event(dlm->ast_wq, 
                                   dlm_lock_basts_flushed(dlm, lock));
                }
-               (*unlockast)(data, lksb->status);
+               (*unlockast)(data, status);
        }
 
+       if (status == DLM_CANCELGRANT)
+               status = DLM_NORMAL;
+
        if (status == DLM_NORMAL) {
                mlog(0, "kicking the thread\n");
                dlm_kick_thread(dlm, res);