[GFS2] Remove all_list from lock_dlm
[safe/jmp/linux-2.6] / fs / gfs2 / locking / dlm / lock.c
index 2d81d90..2482c90 100644 (file)
 
 static char junk_lvb[GDLM_LVB_SIZE];
 
-static void queue_complete(struct gdlm_lock *lp)
+
+/* convert dlm lock-mode to gfs lock-state */
+
+static s16 gdlm_make_lmstate(s16 dlmmode)
 {
-       struct gdlm_ls *ls = lp->ls;
+       switch (dlmmode) {
+       case DLM_LOCK_IV:
+       case DLM_LOCK_NL:
+               return LM_ST_UNLOCKED;
+       case DLM_LOCK_EX:
+               return LM_ST_EXCLUSIVE;
+       case DLM_LOCK_CW:
+               return LM_ST_DEFERRED;
+       case DLM_LOCK_PR:
+               return LM_ST_SHARED;
+       }
+       gdlm_assert(0, "unknown DLM mode %d", dlmmode);
+       return -1;
+}
 
-       clear_bit(LFL_ACTIVE, &lp->flags);
+/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
+   thread gets to it. */
+
+static void queue_submit(struct gdlm_lock *lp)
+{
+       struct gdlm_ls *ls = lp->ls;
 
        spin_lock(&ls->async_lock);
-       list_add_tail(&lp->clist, &ls->complete);
+       list_add_tail(&lp->delay_list, &ls->submit);
        spin_unlock(&ls->async_lock);
        wake_up(&ls->thread_wait);
 }
 
-static inline void gdlm_ast(void *astarg)
+static void wake_up_ast(struct gdlm_lock *lp)
 {
-       queue_complete(astarg);
+       clear_bit(LFL_AST_WAIT, &lp->flags);
+       smp_mb__after_clear_bit();
+       wake_up_bit(&lp->flags, LFL_AST_WAIT);
 }
 
-static inline void gdlm_bast(void *astarg, int mode)
+static void gdlm_delete_lp(struct gdlm_lock *lp)
 {
-       struct gdlm_lock *lp = astarg;
        struct gdlm_ls *ls = lp->ls;
 
-       if (!mode) {
-               printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
-                       lp->lockname.ln_type,
-                       (unsigned long long)lp->lockname.ln_number);
-               return;
-       }
-
        spin_lock(&ls->async_lock);
-       if (!lp->bast_mode) {
-               list_add_tail(&lp->blist, &ls->blocking);
-               lp->bast_mode = mode;
-       } else if (lp->bast_mode < mode)
-               lp->bast_mode = mode;
+       if (!list_empty(&lp->delay_list))
+               list_del_init(&lp->delay_list);
+       ls->all_locks_count--;
        spin_unlock(&ls->async_lock);
-       wake_up(&ls->thread_wait);
+
+       kfree(lp);
 }
 
-void gdlm_queue_delayed(struct gdlm_lock *lp)
+static void gdlm_queue_delayed(struct gdlm_lock *lp)
 {
        struct gdlm_ls *ls = lp->ls;
 
@@ -59,9 +73,239 @@ void gdlm_queue_delayed(struct gdlm_lock *lp)
        spin_unlock(&ls->async_lock);
 }
 
+static void process_complete(struct gdlm_lock *lp)
+{
+       struct gdlm_ls *ls = lp->ls;
+       struct lm_async_cb acb;
+
+       memset(&acb, 0, sizeof(acb));
+
+       if (lp->lksb.sb_status == -DLM_ECANCEL) {
+               log_info("complete dlm cancel %x,%llx flags %lx",
+                        lp->lockname.ln_type,
+                        (unsigned long long)lp->lockname.ln_number,
+                        lp->flags);
+
+               lp->req = lp->cur;
+               acb.lc_ret |= LM_OUT_CANCELED;
+               if (lp->cur == DLM_LOCK_IV)
+                       lp->lksb.sb_lkid = 0;
+               goto out;
+       }
+
+       if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
+               if (lp->lksb.sb_status != -DLM_EUNLOCK) {
+                       log_info("unlock sb_status %d %x,%llx flags %lx",
+                                lp->lksb.sb_status, lp->lockname.ln_type,
+                                (unsigned long long)lp->lockname.ln_number,
+                                lp->flags);
+                       return;
+               }
+
+               lp->cur = DLM_LOCK_IV;
+               lp->req = DLM_LOCK_IV;
+               lp->lksb.sb_lkid = 0;
+
+               if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
+                       gdlm_delete_lp(lp);
+                       return;
+               }
+               goto out;
+       }
+
+       if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
+               memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
+
+       if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
+               if (lp->req == DLM_LOCK_PR)
+                       lp->req = DLM_LOCK_CW;
+               else if (lp->req == DLM_LOCK_CW)
+                       lp->req = DLM_LOCK_PR;
+       }
+
+       /*
+        * A canceled lock request.  The lock was just taken off the delayed
+        * list and was never even submitted to dlm.
+        */
+
+       if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
+               log_info("complete internal cancel %x,%llx",
+                        lp->lockname.ln_type,
+                        (unsigned long long)lp->lockname.ln_number);
+               lp->req = lp->cur;
+               acb.lc_ret |= LM_OUT_CANCELED;
+               goto out;
+       }
+
+       /*
+        * An error occured.
+        */
+
+       if (lp->lksb.sb_status) {
+               /* a "normal" error */
+               if ((lp->lksb.sb_status == -EAGAIN) &&
+                   (lp->lkf & DLM_LKF_NOQUEUE)) {
+                       lp->req = lp->cur;
+                       if (lp->cur == DLM_LOCK_IV)
+                               lp->lksb.sb_lkid = 0;
+                       goto out;
+               }
+
+               /* this could only happen with cancels I think */
+               log_info("ast sb_status %d %x,%llx flags %lx",
+                        lp->lksb.sb_status, lp->lockname.ln_type,
+                        (unsigned long long)lp->lockname.ln_number,
+                        lp->flags);
+               return;
+       }
+
+       /*
+        * This is an AST for an EX->EX conversion for sync_lvb from GFS.
+        */
+
+       if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
+               wake_up_ast(lp);
+               return;
+       }
+
+       /*
+        * A lock has been demoted to NL because it initially completed during
+        * BLOCK_LOCKS.  Now it must be requested in the originally requested
+        * mode.
+        */
+
+       if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
+               gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
+                           lp->lockname.ln_type,
+                           (unsigned long long)lp->lockname.ln_number);
+               gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
+                           lp->lockname.ln_type,
+                           (unsigned long long)lp->lockname.ln_number);
+
+               lp->cur = DLM_LOCK_NL;
+               lp->req = lp->prev_req;
+               lp->prev_req = DLM_LOCK_IV;
+               lp->lkf &= ~DLM_LKF_CONVDEADLK;
+
+               set_bit(LFL_NOCACHE, &lp->flags);
+
+               if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
+                   !test_bit(LFL_NOBLOCK, &lp->flags))
+                       gdlm_queue_delayed(lp);
+               else
+                       queue_submit(lp);
+               return;
+       }
+
+       /*
+        * A request is granted during dlm recovery.  It may be granted
+        * because the locks of a failed node were cleared.  In that case,
+        * there may be inconsistent data beneath this lock and we must wait
+        * for recovery to complete to use it.  When gfs recovery is done this
+        * granted lock will be converted to NL and then reacquired in this
+        * granted state.
+        */
+
+       if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
+           !test_bit(LFL_NOBLOCK, &lp->flags) &&
+           lp->req != DLM_LOCK_NL) {
+
+               lp->cur = lp->req;
+               lp->prev_req = lp->req;
+               lp->req = DLM_LOCK_NL;
+               lp->lkf |= DLM_LKF_CONVERT;
+               lp->lkf &= ~DLM_LKF_CONVDEADLK;
+
+               log_debug("rereq %x,%llx id %x %d,%d",
+                         lp->lockname.ln_type,
+                         (unsigned long long)lp->lockname.ln_number,
+                         lp->lksb.sb_lkid, lp->cur, lp->req);
+
+               set_bit(LFL_REREQUEST, &lp->flags);
+               queue_submit(lp);
+               return;
+       }
+
+       /*
+        * DLM demoted the lock to NL before it was granted so GFS must be
+        * told it cannot cache data for this lock.
+        */
+
+       if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
+               set_bit(LFL_NOCACHE, &lp->flags);
+
+out:
+       /*
+        * This is an internal lock_dlm lock
+        */
+
+       if (test_bit(LFL_INLOCK, &lp->flags)) {
+               clear_bit(LFL_NOBLOCK, &lp->flags);
+               lp->cur = lp->req;
+               wake_up_ast(lp);
+               return;
+       }
+
+       /*
+        * Normal completion of a lock request.  Tell GFS it now has the lock.
+        */
+
+       clear_bit(LFL_NOBLOCK, &lp->flags);
+       lp->cur = lp->req;
+
+       acb.lc_name = lp->lockname;
+       acb.lc_ret |= gdlm_make_lmstate(lp->cur);
+
+       ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
+}
+
+static void gdlm_ast(void *astarg)
+{
+       struct gdlm_lock *lp = astarg;
+       clear_bit(LFL_ACTIVE, &lp->flags);
+       process_complete(lp);
+}
+
+static void process_blocking(struct gdlm_lock *lp, int bast_mode)
+{
+       struct gdlm_ls *ls = lp->ls;
+       unsigned int cb = 0;
+
+       switch (gdlm_make_lmstate(bast_mode)) {
+       case LM_ST_EXCLUSIVE:
+               cb = LM_CB_NEED_E;
+               break;
+       case LM_ST_DEFERRED:
+               cb = LM_CB_NEED_D;
+               break;
+       case LM_ST_SHARED:
+               cb = LM_CB_NEED_S;
+               break;
+       default:
+               gdlm_assert(0, "unknown bast mode %u", bast_mode);
+       }
+
+       ls->fscb(ls->sdp, cb, &lp->lockname);
+}
+
+
+static void gdlm_bast(void *astarg, int mode)
+{
+       struct gdlm_lock *lp = astarg;
+
+       if (!mode) {
+               printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
+                       lp->lockname.ln_type,
+                       (unsigned long long)lp->lockname.ln_number);
+               return;
+       }
+
+       process_blocking(lp, mode);
+}
+
 /* convert gfs lock-state to dlm lock-mode */
 
-static int16_t make_mode(int16_t lmstate)
+static s16 make_mode(s16 lmstate)
 {
        switch (lmstate) {
        case LM_ST_UNLOCKED:
@@ -77,38 +321,20 @@ static int16_t make_mode(int16_t lmstate)
        return -1;
 }
 
-/* convert dlm lock-mode to gfs lock-state */
-
-int16_t gdlm_make_lmstate(int16_t dlmmode)
-{
-       switch (dlmmode) {
-       case DLM_LOCK_IV:
-       case DLM_LOCK_NL:
-               return LM_ST_UNLOCKED;
-       case DLM_LOCK_EX:
-               return LM_ST_EXCLUSIVE;
-       case DLM_LOCK_CW:
-               return LM_ST_DEFERRED;
-       case DLM_LOCK_PR:
-               return LM_ST_SHARED;
-       }
-       gdlm_assert(0, "unknown DLM mode %d", dlmmode);
-       return -1;
-}
 
 /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
    DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
 
 static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
 {
-       int16_t cur = make_mode(cur_state);
+       s16 cur = make_mode(cur_state);
        if (lp->cur != DLM_LOCK_IV)
                gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
 }
 
 static inline unsigned int make_flags(struct gdlm_lock *lp,
                                      unsigned int gfs_flags,
-                                     int16_t cur, int16_t req)
+                                     s16 cur, s16 req)
 {
        unsigned int lkf = 0;
 
@@ -134,13 +360,6 @@ static inline unsigned int make_flags(struct gdlm_lock *lp,
 
        if (lp->lksb.sb_lkid != 0) {
                lkf |= DLM_LKF_CONVERT;
-
-               /* Conversion deadlock avoidance by DLM */
-
-               if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
-                   !(lkf & DLM_LKF_NOQUEUE) &&
-                   cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
-                       lkf |= DLM_LKF_CONVDEADLK;
        }
 
        if (lp->lvb)
@@ -151,7 +370,7 @@ static inline unsigned int make_flags(struct gdlm_lock *lp,
 
 /* make_strname - convert GFS lock numbers to a string */
 
-static inline void make_strname(struct lm_lockname *lockname,
+static inline void make_strname(const struct lm_lockname *lockname,
                                struct gdlm_strname *str)
 {
        sprintf(str->name, "%8x%16llx", lockname->ln_type,
@@ -164,22 +383,17 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
 {
        struct gdlm_lock *lp;
 
-       lp = kzalloc(sizeof(struct gdlm_lock), GFP_KERNEL);
+       lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
        if (!lp)
                return -ENOMEM;
 
        lp->lockname = *name;
+       make_strname(name, &lp->strname);
        lp->ls = ls;
        lp->cur = DLM_LOCK_IV;
-       lp->lvb = NULL;
-       lp->hold_null = NULL;
-       init_completion(&lp->ast_wait);
-       INIT_LIST_HEAD(&lp->clist);
-       INIT_LIST_HEAD(&lp->blist);
        INIT_LIST_HEAD(&lp->delay_list);
 
        spin_lock(&ls->async_lock);
-       list_add(&lp->all_list, &ls->all_locks);
        ls->all_locks_count++;
        spin_unlock(&ls->async_lock);
 
@@ -187,47 +401,26 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
        return 0;
 }
 
-void gdlm_delete_lp(struct gdlm_lock *lp)
-{
-       struct gdlm_ls *ls = lp->ls;
-
-       spin_lock(&ls->async_lock);
-       if (!list_empty(&lp->clist))
-               list_del_init(&lp->clist);
-       if (!list_empty(&lp->blist))
-               list_del_init(&lp->blist);
-       if (!list_empty(&lp->delay_list))
-               list_del_init(&lp->delay_list);
-       gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
-                   (unsigned long long)lp->lockname.ln_number);
-       list_del_init(&lp->all_list);
-       ls->all_locks_count--;
-       spin_unlock(&ls->async_lock);
-
-       kfree(lp);
-}
-
-int gdlm_get_lock(lm_lockspace_t *lockspace, struct lm_lockname *name,
-                 lm_lock_t **lockp)
+int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
+                 void **lockp)
 {
        struct gdlm_lock *lp;
        int error;
 
-       error = gdlm_create_lp((struct gdlm_ls *) lockspace, name, &lp);
+       error = gdlm_create_lp(lockspace, name, &lp);
 
-       *lockp = (lm_lock_t *) lp;
+       *lockp = lp;
        return error;
 }
 
-void gdlm_put_lock(lm_lock_t *lock)
+void gdlm_put_lock(void *lock)
 {
-       gdlm_delete_lp((struct gdlm_lock *) lock);
+       gdlm_delete_lp(lock);
 }
 
 unsigned int gdlm_do_lock(struct gdlm_lock *lp)
 {
        struct gdlm_ls *ls = lp->ls;
-       struct gdlm_strname str;
        int error, bast = 1;
 
        /*
@@ -249,8 +442,6 @@ unsigned int gdlm_do_lock(struct gdlm_lock *lp)
        if (test_bit(LFL_NOBAST, &lp->flags))
                bast = 0;
 
-       make_strname(&lp->lockname, &str);
-
        set_bit(LFL_ACTIVE, &lp->flags);
 
        log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
@@ -258,17 +449,17 @@ unsigned int gdlm_do_lock(struct gdlm_lock *lp)
                  lp->cur, lp->req, lp->lkf);
 
        error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
-                        str.name, str.namelen, 0, gdlm_ast, lp,
-                        bast ? gdlm_bast : NULL);
+                        lp->strname.name, lp->strname.namelen, 0, gdlm_ast,
+                        lp, bast ? gdlm_bast : NULL);
 
        if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
                lp->lksb.sb_status = -EAGAIN;
-               queue_complete(lp);
+               gdlm_ast(lp);
                error = 0;
        }
 
        if (error) {
-               log_debug("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
+               log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
                          "flags=%lx", ls->fsname, lp->lockname.ln_type,
                          (unsigned long long)lp->lockname.ln_number, error,
                          lp->cur, lp->req, lp->lkf, lp->flags);
@@ -296,7 +487,7 @@ static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
        error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
 
        if (error) {
-               log_debug("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
+               log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
                          "flags=%lx", ls->fsname, lp->lockname.ln_type,
                          (unsigned long long)lp->lockname.ln_number, error,
                          lp->cur, lp->req, lp->lkf, lp->flags);
@@ -305,10 +496,16 @@ static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
        return LM_OUT_ASYNC;
 }
 
-unsigned int gdlm_lock(lm_lock_t *lock, unsigned int cur_state,
+unsigned int gdlm_lock(void *lock, unsigned int cur_state,
                       unsigned int req_state, unsigned int flags)
 {
-       struct gdlm_lock *lp = (struct gdlm_lock *) lock;
+       struct gdlm_lock *lp = lock;
+
+       if (req_state == LM_ST_UNLOCKED)
+               return gdlm_unlock(lock, cur_state);
+
+       if (req_state == LM_ST_UNLOCKED)
+               return gdlm_unlock(lock, cur_state);
 
        clear_bit(LFL_DLM_CANCEL, &lp->flags);
        if (flags & LM_FLAG_NOEXP)
@@ -321,9 +518,9 @@ unsigned int gdlm_lock(lm_lock_t *lock, unsigned int cur_state,
        return gdlm_do_lock(lp);
 }
 
-unsigned int gdlm_unlock(lm_lock_t *lock, unsigned int cur_state)
+unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
 {
-       struct gdlm_lock *lp = (struct gdlm_lock *) lock;
+       struct gdlm_lock *lp = lock;
 
        clear_bit(LFL_DLM_CANCEL, &lp->flags);
        if (lp->cur == DLM_LOCK_IV)
@@ -331,9 +528,9 @@ unsigned int gdlm_unlock(lm_lock_t *lock, unsigned int cur_state)
        return gdlm_do_unlock(lp);
 }
 
-void gdlm_cancel(lm_lock_t *lock)
+void gdlm_cancel(void *lock)
 {
-       struct gdlm_lock *lp = (struct gdlm_lock *) lock;
+       struct gdlm_lock *lp = lock;
        struct gdlm_ls *ls = lp->ls;
        int error, delay_list = 0;
 
@@ -353,7 +550,7 @@ void gdlm_cancel(lm_lock_t *lock)
        if (delay_list) {
                set_bit(LFL_CANCEL, &lp->flags);
                set_bit(LFL_ACTIVE, &lp->flags);
-               queue_complete(lp);
+               gdlm_ast(lp);
                return;
        }
 
@@ -385,7 +582,7 @@ static int gdlm_add_lvb(struct gdlm_lock *lp)
 {
        char *lvb;
 
-       lvb = kzalloc(GDLM_LVB_SIZE, GFP_KERNEL);
+       lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
        if (!lvb)
                return -ENOMEM;
 
@@ -401,6 +598,12 @@ static void gdlm_del_lvb(struct gdlm_lock *lp)
        lp->lksb.sb_lvbptr = NULL;
 }
 
+static int gdlm_ast_wait(void *word)
+{
+       schedule();
+       return 0;
+}
+
 /* This can do a synchronous dlm request (requiring a lock_dlm thread to get
    the completion) because gfs won't call hold_lvb() during a callback (from
    the context of a lock_dlm thread). */
@@ -426,10 +629,10 @@ static int hold_null_lock(struct gdlm_lock *lp)
        lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
        set_bit(LFL_NOBAST, &lpn->flags);
        set_bit(LFL_INLOCK, &lpn->flags);
+       set_bit(LFL_AST_WAIT, &lpn->flags);
 
-       init_completion(&lpn->ast_wait);
        gdlm_do_lock(lpn);
-       wait_for_completion(&lpn->ast_wait);
+       wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE);
        error = lpn->lksb.sb_status;
        if (error) {
                printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
@@ -437,7 +640,7 @@ static int hold_null_lock(struct gdlm_lock *lp)
                gdlm_delete_lp(lpn);
                lpn = NULL;
        }
- out:
+out:
        lp->hold_null = lpn;
        return error;
 }
@@ -464,9 +667,9 @@ static void unhold_null_lock(struct gdlm_lock *lp)
    intact on the resource while the lvb is "held" even if it's holding no locks
    on the resource. */
 
-int gdlm_hold_lvb(lm_lock_t *lock, char **lvbp)
+int gdlm_hold_lvb(void *lock, char **lvbp)
 {
-       struct gdlm_lock *lp = (struct gdlm_lock *) lock;
+       struct gdlm_lock *lp = lock;
        int error;
 
        error = gdlm_add_lvb(lp);
@@ -482,31 +685,14 @@ int gdlm_hold_lvb(lm_lock_t *lock, char **lvbp)
        return error;
 }
 
-void gdlm_unhold_lvb(lm_lock_t *lock, char *lvb)
+void gdlm_unhold_lvb(void *lock, char *lvb)
 {
-       struct gdlm_lock *lp = (struct gdlm_lock *) lock;
+       struct gdlm_lock *lp = lock;
 
        unhold_null_lock(lp);
        gdlm_del_lvb(lp);
 }
 
-void gdlm_sync_lvb(lm_lock_t *lock, char *lvb)
-{
-       struct gdlm_lock *lp = (struct gdlm_lock *) lock;
-
-       if (lp->cur != DLM_LOCK_EX)
-               return;
-
-       init_completion(&lp->ast_wait);
-       set_bit(LFL_SYNC_LVB, &lp->flags);
-
-       lp->req = DLM_LOCK_EX;
-       lp->lkf = make_flags(lp, 0, lp->cur, lp->req);
-
-       gdlm_do_lock(lp);
-       wait_for_completion(&lp->ast_wait);
-}
-
 void gdlm_submit_delayed(struct gdlm_ls *ls)
 {
        struct gdlm_lock *lp, *safe;
@@ -520,22 +706,3 @@ void gdlm_submit_delayed(struct gdlm_ls *ls)
        wake_up(&ls->thread_wait);
 }
 
-int gdlm_release_all_locks(struct gdlm_ls *ls)
-{
-       struct gdlm_lock *lp, *safe;
-       int count = 0;
-
-       spin_lock(&ls->async_lock);
-       list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
-               list_del_init(&lp->all_list);
-
-               if (lp->lvb && lp->lvb != junk_lvb)
-                       kfree(lp->lvb);
-               kfree(lp);
-               count++;
-       }
-       spin_unlock(&ls->async_lock);
-
-       return count;
-}
-