* Scheduling for synchronous and asynchronous RPC requests.
*
* Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
- *
+ *
* TCP NFS related read + write fixes
* (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
*/
#ifdef RPC_DEBUG
#define RPCDBG_FACILITY RPCDBG_SCHED
#define RPC_TASK_MAGIC_ID 0xf00baa
-static int rpc_task_id;
#endif
/*
#define RPC_BUFFER_MAXSIZE (2048)
#define RPC_BUFFER_POOLSIZE (8)
#define RPC_TASK_POOLSIZE (8)
-static kmem_cache_t *rpc_task_slabp __read_mostly;
-static kmem_cache_t *rpc_buffer_slabp __read_mostly;
+static struct kmem_cache *rpc_task_slabp __read_mostly;
+static struct kmem_cache *rpc_buffer_slabp __read_mostly;
static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool __read_mostly;
static void __rpc_default_timer(struct rpc_task *task);
-static void rpciod_killall(void);
-static void rpc_async_schedule(void *);
+static void rpc_async_schedule(struct work_struct *);
+static void rpc_release_task(struct rpc_task *task);
/*
* RPC tasks sit here while waiting for conditions to improve.
*/
-static RPC_WAITQ(delay_queue, "delayq");
-
-/*
- * All RPC tasks are linked into this list
- */
-static LIST_HEAD(all_tasks);
+static struct rpc_wait_queue delay_queue;
/*
* rpciod-related stuff
*/
-static DEFINE_MUTEX(rpciod_mutex);
-static unsigned int rpciod_users;
struct workqueue_struct *rpciod_workqueue;
/*
- * Spinlock for other critical sections of code.
- */
-static DEFINE_SPINLOCK(rpc_sched_lock);
-
-/*
* Disable the timer for a given RPC task. Should be called with
* queue->lock and bh_disabled in order to avoid races within
* rpc_run_timer().
static inline void
__rpc_disable_timer(struct rpc_task *task)
{
- dprintk("RPC: %4d disabling timer\n", task->tk_pid);
+ dprintk("RPC: %5u disabling timer\n", task->tk_pid);
task->tk_timeout_fn = NULL;
task->tk_timeout = 0;
}
callback = task->tk_timeout_fn;
task->tk_timeout_fn = NULL;
if (callback && RPC_IS_QUEUED(task)) {
- dprintk("RPC: %4d running timer\n", task->tk_pid);
+ dprintk("RPC: %5u running timer\n", task->tk_pid);
callback(task);
}
smp_mb__before_clear_bit();
if (!task->tk_timeout)
return;
- dprintk("RPC: %4d setting alarm for %lu ms\n",
+ dprintk("RPC: %5u setting alarm for %lu ms\n",
task->tk_pid, task->tk_timeout * 1000 / HZ);
if (timer)
return;
if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
del_singleshot_timer_sync(&task->tk_timer);
- dprintk("RPC: %4d deleting timer\n", task->tk_pid);
+ dprintk("RPC: %5u deleting timer\n", task->tk_pid);
}
}
if (unlikely(task->tk_priority > queue->maxpriority))
q = &queue->tasks[queue->maxpriority];
list_for_each_entry(t, q, u.tk_wait.list) {
- if (t->tk_cookie == task->tk_cookie) {
+ if (t->tk_owner == task->tk_owner) {
list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
return;
}
queue->qlen++;
rpc_set_queued(task);
- dprintk("RPC: %4d added to queue %p \"%s\"\n",
- task->tk_pid, queue, rpc_qname(queue));
+ dprintk("RPC: %5u added to queue %p \"%s\"\n",
+ task->tk_pid, queue, rpc_qname(queue));
}
/*
else
list_del(&task->u.tk_wait.list);
queue->qlen--;
- dprintk("RPC: %4d removed from queue %p \"%s\"\n",
- task->tk_pid, queue, rpc_qname(queue));
+ dprintk("RPC: %5u removed from queue %p \"%s\"\n",
+ task->tk_pid, queue, rpc_qname(queue));
}
static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
queue->count = 1 << (priority * 2);
}
-static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
+static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
{
- queue->cookie = cookie;
+ queue->owner = pid;
queue->nr = RPC_BATCH_COUNT;
}
static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
{
rpc_set_waitqueue_priority(queue, queue->maxpriority);
- rpc_set_waitqueue_cookie(queue, 0);
+ rpc_set_waitqueue_owner(queue, 0);
}
-static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
+static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
{
int i;
spin_lock_init(&queue->lock);
for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
INIT_LIST_HEAD(&queue->tasks[i]);
- queue->maxpriority = maxprio;
+ queue->maxpriority = nr_queues - 1;
rpc_reset_waitqueue_priority(queue);
#ifdef RPC_DEBUG
queue->name = qname;
void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
- __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
+ __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
}
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
- __rpc_init_priority_wait_queue(queue, qname, 0);
+ __rpc_init_priority_wait_queue(queue, qname, 1);
}
-EXPORT_SYMBOL(rpc_init_wait_queue);
+EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
-static int rpc_wait_bit_interruptible(void *word)
+static int rpc_wait_bit_killable(void *word)
{
- if (signal_pending(current))
+ if (fatal_signal_pending(current))
return -ERESTARTSYS;
schedule();
return 0;
}
+#ifdef RPC_DEBUG
+static void rpc_task_set_debuginfo(struct rpc_task *task)
+{
+ static atomic_t rpc_pid;
+
+ task->tk_magic = RPC_TASK_MAGIC_ID;
+ task->tk_pid = atomic_inc_return(&rpc_pid);
+}
+#else
+static inline void rpc_task_set_debuginfo(struct rpc_task *task)
+{
+}
+#endif
+
static void rpc_set_active(struct rpc_task *task)
{
+ struct rpc_clnt *clnt;
if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
return;
- spin_lock(&rpc_sched_lock);
-#ifdef RPC_DEBUG
- task->tk_magic = RPC_TASK_MAGIC_ID;
- task->tk_pid = rpc_task_id++;
-#endif
+ rpc_task_set_debuginfo(task);
/* Add to global list of all tasks */
- list_add_tail(&task->tk_task, &all_tasks);
- spin_unlock(&rpc_sched_lock);
+ clnt = task->tk_client;
+ if (clnt != NULL) {
+ spin_lock(&clnt->cl_lock);
+ list_add_tail(&task->tk_task, &clnt->cl_tasks);
+ spin_unlock(&clnt->cl_lock);
+ }
}
/*
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
{
if (action == NULL)
- action = rpc_wait_bit_interruptible;
+ action = rpc_wait_bit_killable;
return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
- action, TASK_INTERRUPTIBLE);
+ action, TASK_KILLABLE);
}
-EXPORT_SYMBOL(__rpc_wait_for_completion_task);
+EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
/*
* Make an RPC task runnable.
*
- * Note: If the task is ASYNC, this must be called with
+ * Note: If the task is ASYNC, this must be called with
* the spinlock held to protect the wait queue operation.
*/
static void rpc_make_runnable(struct rpc_task *task)
if (RPC_IS_ASYNC(task)) {
int status;
- INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
+ INIT_WORK(&task->u.tk_work, rpc_async_schedule);
status = queue_work(task->tk_workqueue, &task->u.tk_work);
if (status < 0) {
printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action, rpc_action timer)
{
- dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
- rpc_qname(q), jiffies);
+ dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
+ task->tk_pid, rpc_qname(q), jiffies);
if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
__rpc_sleep_on(q, task, action, timer);
spin_unlock_bh(&q->lock);
}
+EXPORT_SYMBOL_GPL(rpc_sleep_on);
/**
* __rpc_do_wake_up_task - wake up a single rpc_task
*/
static void __rpc_do_wake_up_task(struct rpc_task *task)
{
- dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
+ dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
+ task->tk_pid, jiffies);
#ifdef RPC_DEBUG
BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
rpc_make_runnable(task);
- dprintk("RPC: __rpc_wake_up_task done\n");
+ dprintk("RPC: __rpc_wake_up_task done\n");
}
/*
static void
__rpc_default_timer(struct rpc_task *task)
{
- dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
+ dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid);
task->tk_status = -ETIMEDOUT;
rpc_wake_up_task(task);
}
}
rcu_read_unlock_bh();
}
+EXPORT_SYMBOL_GPL(rpc_wake_up_task);
/*
* Wake up the next task on a priority queue.
struct rpc_task *task;
/*
- * Service a batch of tasks from a single cookie.
+ * Service a batch of tasks from a single owner.
*/
q = &queue->tasks[queue->priority];
if (!list_empty(q)) {
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
- if (queue->cookie == task->tk_cookie) {
+ if (queue->owner == task->tk_owner) {
if (--queue->nr)
goto out;
list_move_tail(&task->u.tk_wait.list, q);
* Check if we need to switch queues.
*/
if (--queue->count)
- goto new_cookie;
+ goto new_owner;
}
/*
new_queue:
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
-new_cookie:
- rpc_set_waitqueue_cookie(queue, task->tk_cookie);
+new_owner:
+ rpc_set_waitqueue_owner(queue, task->tk_owner);
out:
__rpc_wake_up_task(task);
return task;
{
struct rpc_task *task = NULL;
- dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
+ dprintk("RPC: wake_up_next(%p \"%s\")\n",
+ queue, rpc_qname(queue));
rcu_read_lock_bh();
spin_lock(&queue->lock);
if (RPC_IS_PRIORITY(queue))
return task;
}
+EXPORT_SYMBOL_GPL(rpc_wake_up_next);
/**
* rpc_wake_up - wake up all rpc_tasks
spin_unlock(&queue->lock);
rcu_read_unlock_bh();
}
+EXPORT_SYMBOL_GPL(rpc_wake_up);
/**
* rpc_wake_up_status - wake up all rpc_tasks and set their status value.
spin_unlock(&queue->lock);
rcu_read_unlock_bh();
}
+EXPORT_SYMBOL_GPL(rpc_wake_up_status);
static void __rpc_atrun(struct rpc_task *task)
{
task->tk_timeout = delay;
rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
}
+EXPORT_SYMBOL_GPL(rpc_delay);
/*
* Helper to call task->tk_ops->rpc_call_prepare
}
}
}
-EXPORT_SYMBOL(rpc_exit_task);
+EXPORT_SYMBOL_GPL(rpc_exit_task);
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
{
/*
* This is the RPC `scheduler' (or rather, the finite state machine).
*/
-static int __rpc_execute(struct rpc_task *task)
+static void __rpc_execute(struct rpc_task *task)
{
int status = 0;
- dprintk("RPC: %4d rpc_execute flgs %x\n",
- task->tk_pid, task->tk_flags);
+ dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
+ task->tk_pid, task->tk_flags);
BUG_ON(RPC_IS_QUEUED(task));
if (RPC_DO_CALLBACK(task)) {
/* Define a callback save pointer */
void (*save_callback)(struct rpc_task *);
-
- /*
+
+ /*
* If a callback exists, save it, reset it,
* call it.
* The save is needed to stop from resetting
if (RPC_IS_ASYNC(task)) {
/* Careful! we may have raced... */
if (RPC_IS_QUEUED(task))
- return 0;
+ return;
if (rpc_test_and_set_running(task))
- return 0;
+ return;
continue;
}
/* sync task: sleep here */
- dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
- /* Note: Caller should be using rpc_clnt_sigmask() */
+ dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
status = out_of_line_wait_on_bit(&task->tk_runstate,
- RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
- TASK_INTERRUPTIBLE);
+ RPC_TASK_QUEUED, rpc_wait_bit_killable,
+ TASK_KILLABLE);
if (status == -ERESTARTSYS) {
/*
* When a sync task receives a signal, it exits with
* clean up after sleeping on some queue, we don't
* break the loop here, but go around once more.
*/
- dprintk("RPC: %4d got signal\n", task->tk_pid);
+ dprintk("RPC: %5u got signal\n", task->tk_pid);
task->tk_flags |= RPC_TASK_KILLED;
rpc_exit(task, -ERESTARTSYS);
rpc_wake_up_task(task);
}
rpc_set_running(task);
- dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
+ dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
}
- dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
+ dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
+ task->tk_status);
/* Release all resources associated with the task */
rpc_release_task(task);
- return status;
}
/*
* released. In particular note that tk_release() will have
* been called, so your task memory may have been freed.
*/
-int
-rpc_execute(struct rpc_task *task)
+void rpc_execute(struct rpc_task *task)
{
rpc_set_active(task);
rpc_set_running(task);
- return __rpc_execute(task);
+ __rpc_execute(task);
}
-static void rpc_async_schedule(void *arg)
+static void rpc_async_schedule(struct work_struct *work)
{
- __rpc_execute((struct rpc_task *)arg);
+ __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
}
+struct rpc_buffer {
+ size_t len;
+ char data[];
+};
+
/**
* rpc_malloc - allocate an RPC buffer
* @task: RPC task that will use this buffer
* @size: requested byte size
*
- * We try to ensure that some NFS reads and writes can always proceed
- * by using a mempool when allocating 'small' buffers.
+ * To prevent rpciod from hanging, this allocator never sleeps,
+ * returning NULL if the request cannot be serviced immediately.
+ * The caller can arrange to sleep in a way that is safe for rpciod.
+ *
+ * Most requests are 'small' (under 2KiB) and can be serviced from a
+ * mempool, ensuring that NFS reads and writes can always proceed,
+ * and that there is good locality of reference for these buffers.
+ *
* In order to avoid memory starvation triggering more writebacks of
- * NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
+ * NFS requests, we avoid using GFP_KERNEL.
*/
-void * rpc_malloc(struct rpc_task *task, size_t size)
+void *rpc_malloc(struct rpc_task *task, size_t size)
{
- struct rpc_rqst *req = task->tk_rqstp;
- gfp_t gfp;
+ struct rpc_buffer *buf;
+ gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
- if (task->tk_flags & RPC_TASK_SWAPPER)
- gfp = GFP_ATOMIC;
+ size += sizeof(struct rpc_buffer);
+ if (size <= RPC_BUFFER_MAXSIZE)
+ buf = mempool_alloc(rpc_buffer_mempool, gfp);
else
- gfp = GFP_NOFS;
-
- if (size > RPC_BUFFER_MAXSIZE) {
- req->rq_buffer = kmalloc(size, gfp);
- if (req->rq_buffer)
- req->rq_bufsize = size;
- } else {
- req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
- if (req->rq_buffer)
- req->rq_bufsize = RPC_BUFFER_MAXSIZE;
- }
- return req->rq_buffer;
+ buf = kmalloc(size, gfp);
+
+ if (!buf)
+ return NULL;
+
+ buf->len = size;
+ dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
+ task->tk_pid, size, buf);
+ return &buf->data;
}
+EXPORT_SYMBOL_GPL(rpc_malloc);
/**
* rpc_free - free buffer allocated via rpc_malloc
- * @task: RPC task with a buffer to be freed
+ * @buffer: buffer to free
*
*/
-void rpc_free(struct rpc_task *task)
+void rpc_free(void *buffer)
{
- struct rpc_rqst *req = task->tk_rqstp;
+ size_t size;
+ struct rpc_buffer *buf;
- if (req->rq_buffer) {
- if (req->rq_bufsize == RPC_BUFFER_MAXSIZE)
- mempool_free(req->rq_buffer, rpc_buffer_mempool);
- else
- kfree(req->rq_buffer);
- req->rq_buffer = NULL;
- req->rq_bufsize = 0;
- }
+ if (!buffer)
+ return;
+
+ buf = container_of(buffer, struct rpc_buffer, data);
+ size = buf->len;
+
+ dprintk("RPC: freeing buffer of size %zu at %p\n",
+ size, buf);
+
+ if (size <= RPC_BUFFER_MAXSIZE)
+ mempool_free(buf, rpc_buffer_mempool);
+ else
+ kfree(buf);
}
+EXPORT_SYMBOL_GPL(rpc_free);
/*
* Creation and deletion of RPC task structures
*/
-void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
+static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
{
memset(task, 0, sizeof(*task));
- init_timer(&task->tk_timer);
- task->tk_timer.data = (unsigned long) task;
- task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
+ setup_timer(&task->tk_timer, (void (*)(unsigned long))rpc_run_timer,
+ (unsigned long)task);
atomic_set(&task->tk_count, 1);
- task->tk_client = clnt;
- task->tk_flags = flags;
- task->tk_ops = tk_ops;
- if (tk_ops->rpc_call_prepare != NULL)
- task->tk_action = rpc_prepare_task;
- task->tk_calldata = calldata;
+ task->tk_flags = task_setup_data->flags;
+ task->tk_ops = task_setup_data->callback_ops;
+ task->tk_calldata = task_setup_data->callback_data;
+ INIT_LIST_HEAD(&task->tk_task);
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
- task->tk_priority = RPC_PRIORITY_NORMAL;
- task->tk_cookie = (unsigned long)current;
+ task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
+ task->tk_owner = current->tgid;
/* Initialize workqueue for async tasks */
task->tk_workqueue = rpciod_workqueue;
- if (clnt) {
- atomic_inc(&clnt->cl_users);
- if (clnt->cl_softrtry)
+ task->tk_client = task_setup_data->rpc_client;
+ if (task->tk_client != NULL) {
+ kref_get(&task->tk_client->cl_kref);
+ if (task->tk_client->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
- if (!clnt->cl_intr)
- task->tk_flags |= RPC_TASK_NOINTR;
}
- BUG_ON(task->tk_ops == NULL);
+ if (task->tk_ops->rpc_call_prepare != NULL)
+ task->tk_action = rpc_prepare_task;
+
+ if (task_setup_data->rpc_message != NULL) {
+ memcpy(&task->tk_msg, task_setup_data->rpc_message, sizeof(task->tk_msg));
+ /* Bind the user cred */
+ if (task->tk_msg.rpc_cred != NULL)
+ rpcauth_holdcred(task);
+ else
+ rpcauth_bindcred(task);
+ if (task->tk_action == NULL)
+ rpc_call_start(task);
+ }
/* starting timestamp */
task->tk_start = jiffies;
- dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
- current->pid);
+ dprintk("RPC: new task initialized, procpid %u\n",
+ task_pid_nr(current));
}
static struct rpc_task *
static void rpc_free_task(struct rcu_head *rcu)
{
struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
- dprintk("RPC: %4d freeing task\n", task->tk_pid);
+ dprintk("RPC: %5u freeing task\n", task->tk_pid);
mempool_free(task, rpc_task_mempool);
}
/*
- * Create a new task for the specified client. We have to
- * clean up after an allocation failure, as the client may
- * have specified "oneshot".
+ * Create a new task for the specified client.
*/
-struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
+struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
{
- struct rpc_task *task;
+ struct rpc_task *task = setup_data->task;
+ unsigned short flags = 0;
- task = rpc_alloc_task();
- if (!task)
- goto cleanup;
+ if (task == NULL) {
+ task = rpc_alloc_task();
+ if (task == NULL)
+ goto out;
+ flags = RPC_TASK_DYNAMIC;
+ }
- rpc_init_task(task, clnt, flags, tk_ops, calldata);
+ rpc_init_task(task, setup_data);
- dprintk("RPC: %4d allocated task\n", task->tk_pid);
- task->tk_flags |= RPC_TASK_DYNAMIC;
+ task->tk_flags |= flags;
+ dprintk("RPC: allocated task %p\n", task);
out:
return task;
-
-cleanup:
- /* Check whether to release the client */
- if (clnt) {
- printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
- atomic_read(&clnt->cl_users), clnt->cl_oneshot);
- atomic_inc(&clnt->cl_users); /* pretend we were used ... */
- rpc_release_client(clnt);
- }
- goto out;
}
call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
rpc_release_calldata(tk_ops, calldata);
}
-EXPORT_SYMBOL(rpc_put_task);
+EXPORT_SYMBOL_GPL(rpc_put_task);
-void rpc_release_task(struct rpc_task *task)
+static void rpc_release_task(struct rpc_task *task)
{
#ifdef RPC_DEBUG
BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
#endif
- dprintk("RPC: %4d release task\n", task->tk_pid);
-
- /* Remove from global task list */
- spin_lock(&rpc_sched_lock);
- list_del(&task->tk_task);
- spin_unlock(&rpc_sched_lock);
-
+ dprintk("RPC: %5u release task\n", task->tk_pid);
+
+ if (!list_empty(&task->tk_task)) {
+ struct rpc_clnt *clnt = task->tk_client;
+ /* Remove from client task list */
+ spin_lock(&clnt->cl_lock);
+ list_del(&task->tk_task);
+ spin_unlock(&clnt->cl_lock);
+ }
BUG_ON (RPC_IS_QUEUED(task));
/* Synchronously delete any running timer */
rpc_put_task(task);
}
-/**
- * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
- * @clnt: pointer to RPC client
- * @flags: RPC flags
- * @ops: RPC call ops
- * @data: user call data
- */
-struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
- const struct rpc_call_ops *ops,
- void *data)
-{
- struct rpc_task *task;
- task = rpc_new_task(clnt, flags, ops, data);
- if (task == NULL) {
- rpc_release_calldata(ops, data);
- return ERR_PTR(-ENOMEM);
- }
- atomic_inc(&task->tk_count);
- rpc_execute(task);
- return task;
-}
-EXPORT_SYMBOL(rpc_run_task);
-
/*
* Kill all tasks for the given client.
* XXX: kill their descendants as well?
void rpc_killall_tasks(struct rpc_clnt *clnt)
{
struct rpc_task *rovr;
- struct list_head *le;
- dprintk("RPC: killing all tasks for client %p\n", clnt);
+ if (list_empty(&clnt->cl_tasks))
+ return;
+ dprintk("RPC: killing all tasks for client %p\n", clnt);
/*
* Spin lock all_tasks to prevent changes...
*/
- spin_lock(&rpc_sched_lock);
- alltask_for_each(rovr, le, &all_tasks) {
+ spin_lock(&clnt->cl_lock);
+ list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
if (! RPC_IS_ACTIVATED(rovr))
continue;
- if (!clnt || rovr->tk_client == clnt) {
+ if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
rovr->tk_flags |= RPC_TASK_KILLED;
rpc_exit(rovr, -EIO);
rpc_wake_up_task(rovr);
}
}
- spin_unlock(&rpc_sched_lock);
+ spin_unlock(&clnt->cl_lock);
}
+EXPORT_SYMBOL_GPL(rpc_killall_tasks);
-static DECLARE_MUTEX_LOCKED(rpciod_running);
-
-static void rpciod_killall(void)
+int rpciod_up(void)
{
- unsigned long flags;
-
- while (!list_empty(&all_tasks)) {
- clear_thread_flag(TIF_SIGPENDING);
- rpc_killall_tasks(NULL);
- flush_workqueue(rpciod_workqueue);
- if (!list_empty(&all_tasks)) {
- dprintk("rpciod_killall: waiting for tasks to exit\n");
- yield();
- }
- }
+ return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
+}
- spin_lock_irqsave(¤t->sighand->siglock, flags);
- recalc_sigpending();
- spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+void rpciod_down(void)
+{
+ module_put(THIS_MODULE);
}
/*
- * Start up the rpciod process if it's not already running.
+ * Start up the rpciod workqueue.
*/
-int
-rpciod_up(void)
+static int rpciod_start(void)
{
struct workqueue_struct *wq;
- int error = 0;
- mutex_lock(&rpciod_mutex);
- dprintk("rpciod_up: users %d\n", rpciod_users);
- rpciod_users++;
- if (rpciod_workqueue)
- goto out;
- /*
- * If there's no pid, we should be the first user.
- */
- if (rpciod_users > 1)
- printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
/*
* Create the rpciod thread and wait for it to start.
*/
- error = -ENOMEM;
+ dprintk("RPC: creating workqueue rpciod\n");
wq = create_workqueue("rpciod");
- if (wq == NULL) {
- printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
- rpciod_users--;
- goto out;
- }
rpciod_workqueue = wq;
- error = 0;
-out:
- mutex_unlock(&rpciod_mutex);
- return error;
+ return rpciod_workqueue != NULL;
}
-void
-rpciod_down(void)
+static void rpciod_stop(void)
{
- mutex_lock(&rpciod_mutex);
- dprintk("rpciod_down sema %d\n", rpciod_users);
- if (rpciod_users) {
- if (--rpciod_users)
- goto out;
- } else
- printk(KERN_WARNING "rpciod_down: no users??\n");
+ struct workqueue_struct *wq = NULL;
- if (!rpciod_workqueue) {
- dprintk("rpciod_down: Nothing to do!\n");
- goto out;
- }
- rpciod_killall();
+ if (rpciod_workqueue == NULL)
+ return;
+ dprintk("RPC: destroying workqueue rpciod\n");
- destroy_workqueue(rpciod_workqueue);
+ wq = rpciod_workqueue;
rpciod_workqueue = NULL;
- out:
- mutex_unlock(&rpciod_mutex);
+ destroy_workqueue(wq);
}
-#ifdef RPC_DEBUG
-void rpc_show_tasks(void)
-{
- struct list_head *le;
- struct rpc_task *t;
-
- spin_lock(&rpc_sched_lock);
- if (list_empty(&all_tasks)) {
- spin_unlock(&rpc_sched_lock);
- return;
- }
- printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
- "-rpcwait -action- ---ops--\n");
- alltask_for_each(t, le, &all_tasks) {
- const char *rpc_waitq = "none";
-
- if (RPC_IS_QUEUED(t))
- rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
-
- printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
- t->tk_pid,
- (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
- t->tk_flags, t->tk_status,
- t->tk_client,
- (t->tk_client ? t->tk_client->cl_prog : 0),
- t->tk_rqstp, t->tk_timeout,
- rpc_waitq,
- t->tk_action, t->tk_ops);
- }
- spin_unlock(&rpc_sched_lock);
-}
-#endif
-
void
rpc_destroy_mempool(void)
{
+ rpciod_stop();
if (rpc_buffer_mempool)
mempool_destroy(rpc_buffer_mempool);
if (rpc_task_mempool)
rpc_task_slabp = kmem_cache_create("rpc_tasks",
sizeof(struct rpc_task),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!rpc_task_slabp)
goto err_nomem;
rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
RPC_BUFFER_MAXSIZE,
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!rpc_buffer_slabp)
goto err_nomem;
rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
+ if (!rpciod_start())
+ goto err_nomem;
+ /*
+ * The following is not strictly a mempool initialisation,
+ * but there is no harm in doing it here
+ */
+ rpc_init_wait_queue(&delay_queue, "delayq");
return 0;
err_nomem:
rpc_destroy_mempool();