NSM: Move nsm_create()
[safe/jmp/linux-2.6] / fs / lockd / clntproc.c
index 80ae312..dd79570 100644 (file)
@@ -6,18 +6,16 @@
  * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  */
 
-#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
 #include <linux/nfs_fs.h>
 #include <linux/utsname.h>
-#include <linux/smp_lock.h>
+#include <linux/freezer.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
-#include <linux/lockd/sm_inter.h>
 
 #define NLMDBG_FACILITY                NLMDBG_CLIENT
 #define NLMCLNT_GRACE_WAIT     (5*HZ)
@@ -27,7 +25,7 @@
 static int     nlmclnt_test(struct nlm_rqst *, struct file_lock *);
 static int     nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
 static int     nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
-static int     nlm_stat_to_errno(u32 stat);
+static int     nlm_stat_to_errno(__be32 stat);
 static void    nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
 static int     nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
 
@@ -37,14 +35,14 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
 /*
  * Cookie counter for NLM requests
  */
-static u32     nlm_cookie = 0x1234;
+static atomic_t        nlm_cookie = ATOMIC_INIT(0x1234);
 
-static inline void nlmclnt_next_cookie(struct nlm_cookie *c)
+void nlmclnt_next_cookie(struct nlm_cookie *c)
 {
-       memcpy(c->data, &nlm_cookie, 4);
-       memset(c->data+4, 0, 4);
+       u32     cookie = atomic_inc_return(&nlm_cookie);
+
+       memcpy(c->data, &cookie, 4);
        c->len=4;
-       nlm_cookie++;
 }
 
 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
@@ -101,7 +99,7 @@ static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_
        res = __nlm_find_lockowner(host, owner);
        if (res == NULL) {
                spin_unlock(&host->h_lock);
-               new = (struct nlm_lockowner *)kmalloc(sizeof(*new), GFP_KERNEL);
+               new = kmalloc(sizeof(*new), GFP_KERNEL);
                spin_lock(&host->h_lock);
                res = __nlm_find_lockowner(host, owner);
                if (res == NULL && new != NULL) {
@@ -129,93 +127,41 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
 
        nlmclnt_next_cookie(&argp->cookie);
        argp->state   = nsm_local_state;
-       memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
-       lock->caller  = system_utsname.nodename;
+       memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
+       lock->caller  = utsname()->nodename;
        lock->oh.data = req->a_owner;
        lock->oh.len  = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
                                (unsigned int)fl->fl_u.nfs_fl.owner->pid,
-                               system_utsname.nodename);
+                               utsname()->nodename);
        lock->svid = fl->fl_u.nfs_fl.owner->pid;
-       locks_copy_lock(&lock->fl, fl);
+       lock->fl.fl_start = fl->fl_start;
+       lock->fl.fl_end = fl->fl_end;
+       lock->fl.fl_type = fl->fl_type;
 }
 
 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
 {
-       struct file_lock *fl = &req->a_args.lock.fl;
-
-       if (fl->fl_ops && fl->fl_ops->fl_release_private)
-               fl->fl_ops->fl_release_private(fl);
+       BUG_ON(req->a_args.lock.fl.fl_ops != NULL);
 }
 
-/*
- * This is the main entry point for the NLM client.
+/**
+ * nlmclnt_proc - Perform a single client-side lock request
+ * @host: address of a valid nlm_host context representing the NLM server
+ * @cmd: fcntl-style file lock operation to perform
+ * @fl: address of arguments for the lock operation
+ *
  */
-int
-nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
-{
-       struct nfs_server       *nfssrv = NFS_SERVER(inode);
-       struct nlm_host         *host;
-       struct nlm_rqst         reqst, *call = &reqst;
-       sigset_t                oldset;
-       unsigned long           flags;
-       int                     status, proto, vers;
-
-       vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
-       if (NFS_PROTO(inode)->version > 3) {
-               printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
-               return -ENOLCK;
-       }
-
-       /* Retrieve transport protocol from NFS client */
-       proto = NFS_CLIENT(inode)->cl_xprt->prot;
-
-       if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers)))
-               return -ENOLCK;
-
-       /* Create RPC client handle if not there, and copy soft
-        * and intr flags from NFS client. */
-       if (host->h_rpcclnt == NULL) {
-               struct rpc_clnt *clnt;
-
-               /* Bind an rpc client to this host handle (does not
-                * perform a portmapper lookup) */
-               if (!(clnt = nlm_bind_host(host))) {
-                       status = -ENOLCK;
-                       goto done;
-               }
-               clnt->cl_softrtry = nfssrv->client->cl_softrtry;
-               clnt->cl_intr = nfssrv->client->cl_intr;
-       }
+int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
+{
+       struct nlm_rqst         *call;
+       int                     status;
 
-       /* Keep the old signal mask */
-       spin_lock_irqsave(&current->sighand->siglock, flags);
-       oldset = current->blocked;
-
-       /* If we're cleaning up locks because the process is exiting,
-        * perform the RPC call asynchronously. */
-       if ((IS_SETLK(cmd) || IS_SETLKW(cmd))
-           && fl->fl_type == F_UNLCK
-           && (current->flags & PF_EXITING)) {
-               sigfillset(&current->blocked);  /* Mask all signals */
-               recalc_sigpending();
-               spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
-               call = nlmclnt_alloc_call();
-               if (!call) {
-                       status = -ENOMEM;
-                       goto out_restore;
-               }
-               call->a_flags = RPC_TASK_ASYNC;
-       } else {
-               spin_unlock_irqrestore(&current->sighand->siglock, flags);
-               memset(call, 0, sizeof(*call));
-               locks_init_lock(&call->a_args.lock.fl);
-               locks_init_lock(&call->a_res.lock.fl);
-       }
-       call->a_host = host;
+       nlm_get_host(host);
+       call = nlm_alloc_call(host);
+       if (call == NULL)
+               return -ENOMEM;
 
        nlmclnt_locks_init_private(fl, host);
-
        /* Set up the argument struct */
        nlmclnt_setlockargs(call, fl);
 
@@ -230,42 +176,58 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
        else
                status = -EINVAL;
 
- out_restore:
-       spin_lock_irqsave(&current->sighand->siglock, flags);
-       current->blocked = oldset;
-       recalc_sigpending();
-       spin_unlock_irqrestore(&current->sighand->siglock, flags);
+       fl->fl_ops->fl_release_private(fl);
+       fl->fl_ops = NULL;
 
-done:
        dprintk("lockd: clnt proc returns %d\n", status);
-       nlm_release_host(host);
        return status;
 }
-EXPORT_SYMBOL(nlmclnt_proc);
+EXPORT_SYMBOL_GPL(nlmclnt_proc);
 
 /*
  * Allocate an NLM RPC call struct
+ *
+ * Note: the caller must hold a reference to host. In case of failure,
+ * this reference will be released.
  */
-struct nlm_rqst *
-nlmclnt_alloc_call(void)
+struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
 {
        struct nlm_rqst *call;
 
        for(;;) {
                call = kzalloc(sizeof(*call), GFP_KERNEL);
                if (call != NULL) {
+                       atomic_set(&call->a_count, 1);
                        locks_init_lock(&call->a_args.lock.fl);
                        locks_init_lock(&call->a_res.lock.fl);
+                       call->a_host = host;
                        return call;
                }
                if (signalled())
                        break;
-               printk("nlmclnt_alloc_call: failed, waiting for memory\n");
+               printk("nlm_alloc_call: failed, waiting for memory\n");
                schedule_timeout_interruptible(5*HZ);
        }
+       nlm_release_host(host);
        return NULL;
 }
 
+void nlm_release_call(struct nlm_rqst *call)
+{
+       if (!atomic_dec_and_test(&call->a_count))
+               return;
+       nlm_release_host(call->a_host);
+       nlmclnt_release_lockargs(call);
+       kfree(call);
+}
+
+static void nlmclnt_rpc_release(void *data)
+{
+       lock_kernel();
+       nlm_release_call(data);
+       unlock_kernel();
+}
+
 static int nlm_wait_on_grace(wait_queue_head_t *queue)
 {
        DEFINE_WAIT(wait);
@@ -286,7 +248,7 @@ static int nlm_wait_on_grace(wait_queue_head_t *queue)
  * Generic NLM call
  */
 static int
-nlmclnt_call(struct nlm_rqst *req, u32 proc)
+nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc)
 {
        struct nlm_host *host = req->a_host;
        struct rpc_clnt *clnt;
@@ -295,6 +257,7 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc)
        struct rpc_message msg = {
                .rpc_argp       = argp,
                .rpc_resp       = resp,
+               .rpc_cred       = cred,
        };
        int             status;
 
@@ -330,7 +293,7 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc)
                        }
                        break;
                } else
-               if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) {
+               if (resp->status == nlm_lck_denied_grace_period) {
                        dprintk("lockd: server in grace period\n");
                        if (argp->reclaim) {
                                printk(KERN_WARNING
@@ -362,57 +325,89 @@ in_grace_period:
 /*
  * Generic NLM call, async version.
  */
-int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
+static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
 {
        struct nlm_host *host = req->a_host;
        struct rpc_clnt *clnt;
-       struct rpc_message msg = {
-               .rpc_argp       = &req->a_args,
-               .rpc_resp       = &req->a_res,
+       struct rpc_task_setup task_setup_data = {
+               .rpc_message = msg,
+               .callback_ops = tk_ops,
+               .callback_data = req,
+               .flags = RPC_TASK_ASYNC,
        };
-       int             status;
 
        dprintk("lockd: call procedure %d on %s (async)\n",
                        (int)proc, host->h_name);
 
        /* If we have no RPC client yet, create one. */
-       if ((clnt = nlm_bind_host(host)) == NULL)
-               return -ENOLCK;
-       msg.rpc_proc = &clnt->cl_procinfo[proc];
+       clnt = nlm_bind_host(host);
+       if (clnt == NULL)
+               goto out_err;
+       msg->rpc_proc = &clnt->cl_procinfo[proc];
+       task_setup_data.rpc_client = clnt;
 
         /* bootstrap and kick off the async RPC call */
-        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
+       return rpc_run_task(&task_setup_data);
+out_err:
+       tk_ops->rpc_release(req);
+       return ERR_PTR(-ENOLCK);
+}
 
-       return status;
+static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
+{
+       struct rpc_task *task;
+
+       task = __nlm_async_call(req, proc, msg, tk_ops);
+       if (IS_ERR(task))
+               return PTR_ERR(task);
+       rpc_put_task(task);
+       return 0;
 }
 
-static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
+/*
+ * NLM asynchronous call.
+ */
+int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
 {
-       struct nlm_host *host = req->a_host;
-       struct rpc_clnt *clnt;
-       struct nlm_args *argp = &req->a_args;
-       struct nlm_res  *resp = &req->a_res;
        struct rpc_message msg = {
-               .rpc_argp       = argp,
-               .rpc_resp       = resp,
+               .rpc_argp       = &req->a_args,
+               .rpc_resp       = &req->a_res,
        };
-       int             status;
-
-       dprintk("lockd: call procedure %d on %s (async)\n",
-                       (int)proc, host->h_name);
+       return nlm_do_async_call(req, proc, &msg, tk_ops);
+}
 
-       /* If we have no RPC client yet, create one. */
-       if ((clnt = nlm_bind_host(host)) == NULL)
-               return -ENOLCK;
-       msg.rpc_proc = &clnt->cl_procinfo[proc];
+int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
+{
+       struct rpc_message msg = {
+               .rpc_argp       = &req->a_res,
+       };
+       return nlm_do_async_call(req, proc, &msg, tk_ops);
+}
 
-       /* Increment host refcount */
-       nlm_get_host(host);
-        /* bootstrap and kick off the async RPC call */
-        status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
-       if (status < 0)
-               nlm_release_host(host);
-       return status;
+/*
+ * NLM client asynchronous call.
+ *
+ * Note that although the calls are asynchronous, and are therefore
+ *      guaranteed to complete, we still always attempt to wait for
+ *      completion in order to be able to correctly track the lock
+ *      state.
+ */
+static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
+{
+       struct rpc_message msg = {
+               .rpc_argp       = &req->a_args,
+               .rpc_resp       = &req->a_res,
+               .rpc_cred       = cred,
+       };
+       struct rpc_task *task;
+       int err;
+
+       task = __nlm_async_call(req, proc, &msg, tk_ops);
+       if (IS_ERR(task))
+               return PTR_ERR(task);
+       err = rpc_wait_for_completion_task(task);
+       rpc_put_task(task);
+       return err;
 }
 
 /*
@@ -423,37 +418,42 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
 {
        int     status;
 
-       status = nlmclnt_call(req, NLMPROC_TEST);
-       nlmclnt_release_lockargs(req);
+       status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
        if (status < 0)
-               return status;
+               goto out;
 
-       status = req->a_res.status;
-       if (status == NLM_LCK_GRANTED) {
-               fl->fl_type = F_UNLCK;
-       } if (status == NLM_LCK_DENIED) {
-               /*
-                * Report the conflicting lock back to the application.
-                */
-               locks_copy_lock(fl, &req->a_res.lock.fl);
-               fl->fl_pid = 0;
-       } else {
-               return nlm_stat_to_errno(req->a_res.status);
+       switch (req->a_res.status) {
+               case nlm_granted:
+                       fl->fl_type = F_UNLCK;
+                       break;
+               case nlm_lck_denied:
+                       /*
+                        * Report the conflicting lock back to the application.
+                        */
+                       fl->fl_start = req->a_res.lock.fl.fl_start;
+                       fl->fl_end = req->a_res.lock.fl.fl_end;
+                       fl->fl_type = req->a_res.lock.fl.fl_type;
+                       fl->fl_pid = 0;
+                       break;
+               default:
+                       status = nlm_stat_to_errno(req->a_res.status);
        }
-
-       return 0;
+out:
+       nlm_release_call(req);
+       return status;
 }
 
 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
 {
-       memcpy(&new->fl_u.nfs_fl, &fl->fl_u.nfs_fl, sizeof(new->fl_u.nfs_fl));
-       nlm_get_lockowner(new->fl_u.nfs_fl.owner);
+       new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
+       new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
+       list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
 }
 
 static void nlmclnt_locks_release_private(struct file_lock *fl)
 {
+       list_del(&fl->fl_u.nfs_fl.list);
        nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
-       fl->fl_ops = NULL;
 }
 
 static struct file_lock_operations nlmclnt_lock_ops = {
@@ -465,12 +465,12 @@ static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *ho
 {
        BUG_ON(fl->fl_ops != NULL);
        fl->fl_u.nfs_fl.state = 0;
-       fl->fl_u.nfs_fl.flags = 0;
        fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner);
+       INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
        fl->fl_ops = &nlmclnt_lock_ops;
 }
 
-static void do_vfs_lock(struct file_lock *fl)
+static int do_vfs_lock(struct file_lock *fl)
 {
        int res = 0;
        switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
@@ -483,9 +483,7 @@ static void do_vfs_lock(struct file_lock *fl)
                default:
                        BUG();
        }
-       if (res < 0)
-               printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n",
-                               __FUNCTION__);
+       return res;
 }
 
 /*
@@ -511,59 +509,103 @@ static void do_vfs_lock(struct file_lock *fl)
 static int
 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
 {
+       struct rpc_cred *cred = nfs_file_cred(fl->fl_file);
        struct nlm_host *host = req->a_host;
        struct nlm_res  *resp = &req->a_res;
-       long timeout;
-       int status;
+       struct nlm_wait *block = NULL;
+       unsigned char fl_flags = fl->fl_flags;
+       unsigned char fl_type;
+       int status = -ENOLCK;
 
-       if (!host->h_monitored && nsm_monitor(host) < 0) {
-               printk(KERN_NOTICE "lockd: failed to monitor %s\n",
-                                       host->h_name);
-               status = -ENOLCK;
+       if (nsm_monitor(host) < 0)
                goto out;
-       }
 
-       if (req->a_args.block) {
-               status = nlmclnt_prepare_block(req, host, fl);
-               if (status < 0)
-                       goto out;
-       }
+       fl->fl_flags |= FL_ACCESS;
+       status = do_vfs_lock(fl);
+       fl->fl_flags = fl_flags;
+       if (status < 0)
+               goto out;
+
+       block = nlmclnt_prepare_block(host, fl);
+again:
+       /*
+        * Initialise resp->status to a valid non-zero value,
+        * since 0 == nlm_lck_granted
+        */
+       resp->status = nlm_lck_blocked;
        for(;;) {
-               status = nlmclnt_call(req, NLMPROC_LOCK);
+               /* Reboot protection */
+               fl->fl_u.nfs_fl.state = host->h_state;
+               status = nlmclnt_call(cred, req, NLMPROC_LOCK);
                if (status < 0)
-                       goto out_unblock;
-               if (resp->status != NLM_LCK_BLOCKED)
                        break;
-               /* Wait on an NLM blocking lock */
-               timeout = nlmclnt_block(req, NLMCLNT_POLL_TIMEOUT);
                /* Did a reclaimer thread notify us of a server reboot? */
-               if (resp->status ==  NLM_LCK_DENIED_GRACE_PERIOD)
+               if (resp->status ==  nlm_lck_denied_grace_period)
                        continue;
-               if (resp->status != NLM_LCK_BLOCKED)
+               if (resp->status != nlm_lck_blocked)
+                       break;
+               /* Wait on an NLM blocking lock */
+               status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+               if (status < 0)
+                       break;
+               if (resp->status != nlm_lck_blocked)
                        break;
-               if (timeout >= 0)
-                       continue;
-               /* We were interrupted. Send a CANCEL request to the server
-                * and exit
-                */
-               status = (int)timeout;
-               goto out_unblock;
        }
 
-       if (resp->status == NLM_LCK_GRANTED) {
-               fl->fl_u.nfs_fl.state = host->h_state;
-               fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
+       /* if we were interrupted while blocking, then cancel the lock request
+        * and exit
+        */
+       if (resp->status == nlm_lck_blocked) {
+               if (!req->a_args.block)
+                       goto out_unlock;
+               if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
+                       goto out_unblock;
+       }
+
+       if (resp->status == nlm_granted) {
+               down_read(&host->h_rwsem);
+               /* Check whether or not the server has rebooted */
+               if (fl->fl_u.nfs_fl.state != host->h_state) {
+                       up_read(&host->h_rwsem);
+                       goto again;
+               }
+               /* Ensure the resulting lock will get added to granted list */
                fl->fl_flags |= FL_SLEEP;
-               do_vfs_lock(fl);
+               if (do_vfs_lock(fl) < 0)
+                       printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
+               up_read(&host->h_rwsem);
+               fl->fl_flags = fl_flags;
+               status = 0;
        }
-       status = nlm_stat_to_errno(resp->status);
+       if (status < 0)
+               goto out_unlock;
+       /*
+        * EAGAIN doesn't make sense for sleeping locks, and in some
+        * cases NLM_LCK_DENIED is returned for a permanent error.  So
+        * turn it into an ENOLCK.
+        */
+       if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
+               status = -ENOLCK;
+       else
+               status = nlm_stat_to_errno(resp->status);
 out_unblock:
-       nlmclnt_finish_block(req);
-       /* Cancel the blocked request if it is still pending */
-       if (resp->status == NLM_LCK_BLOCKED)
-               nlmclnt_cancel(host, req->a_args.block, fl);
+       nlmclnt_finish_block(block);
 out:
-       nlmclnt_release_lockargs(req);
+       nlm_release_call(req);
+       return status;
+out_unlock:
+       /* Fatal error: ensure that we remove the lock altogether */
+       dprintk("lockd: lock attempt ended in fatal error.\n"
+               "       Attempting to unlock.\n");
+       nlmclnt_finish_block(block);
+       fl_type = fl->fl_type;
+       fl->fl_type = F_UNLCK;
+       down_read(&host->h_rwsem);
+       do_vfs_lock(fl);
+       up_read(&host->h_rwsem);
+       fl->fl_type = fl_type;
+       fl->fl_flags = fl_flags;
+       nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
        return status;
 }
 
@@ -587,13 +629,13 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
        nlmclnt_setlockargs(req, fl);
        req->a_args.reclaim = 1;
 
-       if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
-        && req->a_res.status == NLM_LCK_GRANTED)
+       status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
+       if (status >= 0 && req->a_res.status == nlm_granted)
                return 0;
 
        printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
                                "(errno %d, status %d)\n", fl->fl_pid,
-                               status, req->a_res.status);
+                               status, ntohl(req->a_res.status));
 
        /*
         * FIXME: This is a serious failure. We can
@@ -616,52 +658,48 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
 static int
 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
 {
+       struct nlm_host *host = req->a_host;
        struct nlm_res  *resp = &req->a_res;
-       int             status;
-
-       /* Clean the GRANTED flag now so the lock doesn't get
-        * reclaimed while we're stuck in the unlock call. */
-       fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
+       int status;
+       unsigned char fl_flags = fl->fl_flags;
 
        /*
         * Note: the server is supposed to either grant us the unlock
         * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
         * case, we want to unlock.
         */
-       do_vfs_lock(fl);
-
-       if (req->a_flags & RPC_TASK_ASYNC) {
-               status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
-                                       &nlmclnt_unlock_ops);
-               /* Hrmf... Do the unlock early since locks_remove_posix()
-                * really expects us to free the lock synchronously */
-               if (status < 0) {
-                       nlmclnt_release_lockargs(req);
-                       kfree(req);
-               }
-               return status;
+       fl->fl_flags |= FL_EXISTS;
+       down_read(&host->h_rwsem);
+       status = do_vfs_lock(fl);
+       up_read(&host->h_rwsem);
+       fl->fl_flags = fl_flags;
+       if (status == -ENOENT) {
+               status = 0;
+               goto out;
        }
 
-       status = nlmclnt_call(req, NLMPROC_UNLOCK);
-       nlmclnt_release_lockargs(req);
+       atomic_inc(&req->a_count);
+       status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
+                       NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
        if (status < 0)
-               return status;
+               goto out;
 
-       if (resp->status == NLM_LCK_GRANTED)
-               return 0;
+       if (resp->status == nlm_granted)
+               goto out;
 
-       if (resp->status != NLM_LCK_DENIED_NOLOCKS)
+       if (resp->status != nlm_lck_denied_nolocks)
                printk("lockd: unexpected unlock status: %d\n", resp->status);
-
        /* What to do now? I'm out of my depth... */
-
-       return -ENOLCK;
+       status = -ENOLCK;
+out:
+       nlm_release_call(req);
+       return status;
 }
 
 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
 {
        struct nlm_rqst *req = data;
-       int             status = req->a_res.status;
+       u32 status = ntohl(req->a_res.status);
 
        if (RPC_ASSASSINATED(task))
                goto die;
@@ -677,18 +715,18 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
        if (status != NLM_LCK_GRANTED)
                printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
 die:
-       nlm_release_host(req->a_host);
-       nlmclnt_release_lockargs(req);
-       kfree(req);
        return;
  retry_rebind:
+       lock_kernel();
        nlm_rebind_host(req->a_host);
+       unlock_kernel();
  retry_unlock:
        rpc_restart_call(task);
 }
 
 static const struct rpc_call_ops nlmclnt_unlock_ops = {
        .rpc_call_done = nlmclnt_unlock_callback,
+       .rpc_release = nlmclnt_rpc_release,
 };
 
 /*
@@ -699,43 +737,32 @@ static const struct rpc_call_ops nlmclnt_unlock_ops = {
 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
 {
        struct nlm_rqst *req;
-       unsigned long   flags;
-       sigset_t        oldset;
-       int             status;
+       int status;
 
-       /* Block all signals while setting up call */
-       spin_lock_irqsave(&current->sighand->siglock, flags);
-       oldset = current->blocked;
-       sigfillset(&current->blocked);
-       recalc_sigpending();
-       spin_unlock_irqrestore(&current->sighand->siglock, flags);
+       dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
+               "       Attempting to cancel lock.\n");
 
-       req = nlmclnt_alloc_call();
+       req = nlm_alloc_call(nlm_get_host(host));
        if (!req)
                return -ENOMEM;
-       req->a_host  = host;
        req->a_flags = RPC_TASK_ASYNC;
 
        nlmclnt_setlockargs(req, fl);
        req->a_args.block = block;
 
-       status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
-       if (status < 0) {
-               nlmclnt_release_lockargs(req);
-               kfree(req);
-       }
-
-       spin_lock_irqsave(&current->sighand->siglock, flags);
-       current->blocked = oldset;
-       recalc_sigpending();
-       spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
+       atomic_inc(&req->a_count);
+       status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
+                       NLMPROC_CANCEL, &nlmclnt_cancel_ops);
+       if (status == 0 && req->a_res.status == nlm_lck_denied)
+               status = -ENOLCK;
+       nlm_release_call(req);
        return status;
 }
 
 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
 {
        struct nlm_rqst *req = data;
+       u32 status = ntohl(req->a_res.status);
 
        if (RPC_ASSASSINATED(task))
                goto die;
@@ -746,12 +773,13 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
                goto retry_cancel;
        }
 
-       dprintk("lockd: cancel status %d (task %d)\n",
-                       req->a_res.status, task->tk_pid);
+       dprintk("lockd: cancel status %u (task %u)\n",
+                       status, task->tk_pid);
 
-       switch (req->a_res.status) {
+       switch (status) {
        case NLM_LCK_GRANTED:
        case NLM_LCK_DENIED_GRACE_PERIOD:
+       case NLM_LCK_DENIED:
                /* Everything's good */
                break;
        case NLM_LCK_DENIED_NOLOCKS:
@@ -759,35 +787,35 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
                goto retry_cancel;
        default:
                printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
-                       req->a_res.status);
+                       status);
        }
 
 die:
-       nlm_release_host(req->a_host);
-       nlmclnt_release_lockargs(req);
-       kfree(req);
        return;
 
 retry_cancel:
        /* Don't ever retry more than 3 times */
        if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
                goto die;
+       lock_kernel();
        nlm_rebind_host(req->a_host);
+       unlock_kernel();
        rpc_restart_call(task);
        rpc_delay(task, 30 * HZ);
 }
 
 static const struct rpc_call_ops nlmclnt_cancel_ops = {
        .rpc_call_done = nlmclnt_cancel_callback,
+       .rpc_release = nlmclnt_rpc_release,
 };
 
 /*
  * Convert an NLM status code to a generic kernel errno
  */
 static int
-nlm_stat_to_errno(u32 status)
+nlm_stat_to_errno(__be32 status)
 {
-       switch(status) {
+       switch(ntohl(status)) {
        case NLM_LCK_GRANTED:
                return 0;
        case NLM_LCK_DENIED: