SUNRPC: One more clean up for rpc_create_client_dir()
[safe/jmp/linux-2.6] / net / sunrpc / sched.c
index 6288af0..8f459ab 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/slab.h>
 #include <linux/mempool.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
 
@@ -293,11 +292,6 @@ static void rpc_make_runnable(struct rpc_task *task)
        rpc_clear_queued(task);
        if (rpc_test_and_set_running(task))
                return;
-       /* We might have raced */
-       if (RPC_IS_QUEUED(task)) {
-               rpc_clear_running(task);
-               return;
-       }
        if (RPC_IS_ASYNC(task)) {
                int status;
 
@@ -574,11 +568,9 @@ EXPORT_SYMBOL_GPL(rpc_delay);
 /*
  * Helper to call task->tk_ops->rpc_call_prepare
  */
-static void rpc_prepare_task(struct rpc_task *task)
+void rpc_prepare_task(struct rpc_task *task)
 {
-       lock_kernel();
        task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
-       unlock_kernel();
 }
 
 /*
@@ -588,9 +580,7 @@ void rpc_exit_task(struct rpc_task *task)
 {
        task->tk_action = NULL;
        if (task->tk_ops->rpc_call_done != NULL) {
-               lock_kernel();
                task->tk_ops->rpc_call_done(task, task->tk_calldata);
-               unlock_kernel();
                if (task->tk_action != NULL) {
                        WARN_ON(RPC_ASSASSINATED(task));
                        /* Always release the RPC slot and buffer memory */
@@ -602,11 +592,8 @@ EXPORT_SYMBOL_GPL(rpc_exit_task);
 
 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
 {
-       if (ops->rpc_release != NULL) {
-               lock_kernel();
+       if (ops->rpc_release != NULL)
                ops->rpc_release(calldata);
-               unlock_kernel();
-       }
 }
 
 /*
@@ -614,7 +601,9 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
  */
 static void __rpc_execute(struct rpc_task *task)
 {
-       int             status = 0;
+       struct rpc_wait_queue *queue;
+       int task_is_async = RPC_IS_ASYNC(task);
+       int status = 0;
 
        dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
                        task->tk_pid, task->tk_flags);
@@ -654,15 +643,25 @@ static void __rpc_execute(struct rpc_task *task)
                 */
                if (!RPC_IS_QUEUED(task))
                        continue;
-               rpc_clear_running(task);
-               if (RPC_IS_ASYNC(task)) {
-                       /* Careful! we may have raced... */
-                       if (RPC_IS_QUEUED(task))
-                               return;
-                       if (rpc_test_and_set_running(task))
-                               return;
+               /*
+                * The queue->lock protects against races with
+                * rpc_make_runnable().
+                *
+                * Note that once we clear RPC_TASK_RUNNING on an asynchronous
+                * rpc_task, rpc_make_runnable() can assign it to a
+                * different workqueue. We therefore cannot assume that the
+                * rpc_task pointer may still be dereferenced.
+                */
+               queue = task->tk_waitqueue;
+               spin_lock_bh(&queue->lock);
+               if (!RPC_IS_QUEUED(task)) {
+                       spin_unlock_bh(&queue->lock);
                        continue;
                }
+               rpc_clear_running(task);
+               spin_unlock_bh(&queue->lock);
+               if (task_is_async)
+                       return;
 
                /* sync task: sleep here */
                dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);