SUNRPC: Move rpc_register_client and friends into net/sunrpc/clnt.c
[safe/jmp/linux-2.6] / net / sunrpc / sched.c
1 /*
2  * linux/net/sunrpc/sched.c
3  *
4  * Scheduling for synchronous and asynchronous RPC requests.
5  *
6  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7  *
8  * TCP NFS related read + write fixes
9  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10  */
11
12 #include <linux/module.h>
13
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22
23 #include <linux/sunrpc/clnt.h>
24
25 #ifdef RPC_DEBUG
26 #define RPCDBG_FACILITY         RPCDBG_SCHED
27 #define RPC_TASK_MAGIC_ID       0xf00baa
28 #endif
29
30 /*
31  * RPC slabs and memory pools
32  */
33 #define RPC_BUFFER_MAXSIZE      (2048)
34 #define RPC_BUFFER_POOLSIZE     (8)
35 #define RPC_TASK_POOLSIZE       (8)
36 static struct kmem_cache        *rpc_task_slabp __read_mostly;
37 static struct kmem_cache        *rpc_buffer_slabp __read_mostly;
38 static mempool_t        *rpc_task_mempool __read_mostly;
39 static mempool_t        *rpc_buffer_mempool __read_mostly;
40
41 static void                     __rpc_default_timer(struct rpc_task *task);
42 static void                     rpc_async_schedule(struct work_struct *);
43 static void                      rpc_release_task(struct rpc_task *task);
44
45 /*
46  * RPC tasks sit here while waiting for conditions to improve.
47  */
48 static RPC_WAITQ(delay_queue, "delayq");
49
50 /*
51  * rpciod-related stuff
52  */
53 static DEFINE_MUTEX(rpciod_mutex);
54 static atomic_t rpciod_users = ATOMIC_INIT(0);
55 struct workqueue_struct *rpciod_workqueue;
56
57 /*
58  * Disable the timer for a given RPC task. Should be called with
59  * queue->lock and bh_disabled in order to avoid races within
60  * rpc_run_timer().
61  */
62 static inline void
63 __rpc_disable_timer(struct rpc_task *task)
64 {
65         dprintk("RPC: %5u disabling timer\n", task->tk_pid);
66         task->tk_timeout_fn = NULL;
67         task->tk_timeout = 0;
68 }
69
70 /*
71  * Run a timeout function.
72  * We use the callback in order to allow __rpc_wake_up_task()
73  * and friends to disable the timer synchronously on SMP systems
74  * without calling del_timer_sync(). The latter could cause a
75  * deadlock if called while we're holding spinlocks...
76  */
77 static void rpc_run_timer(struct rpc_task *task)
78 {
79         void (*callback)(struct rpc_task *);
80
81         callback = task->tk_timeout_fn;
82         task->tk_timeout_fn = NULL;
83         if (callback && RPC_IS_QUEUED(task)) {
84                 dprintk("RPC: %5u running timer\n", task->tk_pid);
85                 callback(task);
86         }
87         smp_mb__before_clear_bit();
88         clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
89         smp_mb__after_clear_bit();
90 }
91
92 /*
93  * Set up a timer for the current task.
94  */
95 static inline void
96 __rpc_add_timer(struct rpc_task *task, rpc_action timer)
97 {
98         if (!task->tk_timeout)
99                 return;
100
101         dprintk("RPC: %5u setting alarm for %lu ms\n",
102                         task->tk_pid, task->tk_timeout * 1000 / HZ);
103
104         if (timer)
105                 task->tk_timeout_fn = timer;
106         else
107                 task->tk_timeout_fn = __rpc_default_timer;
108         set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
109         mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
110 }
111
112 /*
113  * Delete any timer for the current task. Because we use del_timer_sync(),
114  * this function should never be called while holding queue->lock.
115  */
116 static void
117 rpc_delete_timer(struct rpc_task *task)
118 {
119         if (RPC_IS_QUEUED(task))
120                 return;
121         if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
122                 del_singleshot_timer_sync(&task->tk_timer);
123                 dprintk("RPC: %5u deleting timer\n", task->tk_pid);
124         }
125 }
126
127 /*
128  * Add new request to a priority queue.
129  */
130 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
131 {
132         struct list_head *q;
133         struct rpc_task *t;
134
135         INIT_LIST_HEAD(&task->u.tk_wait.links);
136         q = &queue->tasks[task->tk_priority];
137         if (unlikely(task->tk_priority > queue->maxpriority))
138                 q = &queue->tasks[queue->maxpriority];
139         list_for_each_entry(t, q, u.tk_wait.list) {
140                 if (t->tk_cookie == task->tk_cookie) {
141                         list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
142                         return;
143                 }
144         }
145         list_add_tail(&task->u.tk_wait.list, q);
146 }
147
148 /*
149  * Add new request to wait queue.
150  *
151  * Swapper tasks always get inserted at the head of the queue.
152  * This should avoid many nasty memory deadlocks and hopefully
153  * improve overall performance.
154  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
155  */
156 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
157 {
158         BUG_ON (RPC_IS_QUEUED(task));
159
160         if (RPC_IS_PRIORITY(queue))
161                 __rpc_add_wait_queue_priority(queue, task);
162         else if (RPC_IS_SWAPPER(task))
163                 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
164         else
165                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
166         task->u.tk_wait.rpc_waitq = queue;
167         queue->qlen++;
168         rpc_set_queued(task);
169
170         dprintk("RPC: %5u added to queue %p \"%s\"\n",
171                         task->tk_pid, queue, rpc_qname(queue));
172 }
173
174 /*
175  * Remove request from a priority queue.
176  */
177 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
178 {
179         struct rpc_task *t;
180
181         if (!list_empty(&task->u.tk_wait.links)) {
182                 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
183                 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
184                 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
185         }
186         list_del(&task->u.tk_wait.list);
187 }
188
189 /*
190  * Remove request from queue.
191  * Note: must be called with spin lock held.
192  */
193 static void __rpc_remove_wait_queue(struct rpc_task *task)
194 {
195         struct rpc_wait_queue *queue;
196         queue = task->u.tk_wait.rpc_waitq;
197
198         if (RPC_IS_PRIORITY(queue))
199                 __rpc_remove_wait_queue_priority(task);
200         else
201                 list_del(&task->u.tk_wait.list);
202         queue->qlen--;
203         dprintk("RPC: %5u removed from queue %p \"%s\"\n",
204                         task->tk_pid, queue, rpc_qname(queue));
205 }
206
207 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
208 {
209         queue->priority = priority;
210         queue->count = 1 << (priority * 2);
211 }
212
213 static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
214 {
215         queue->cookie = cookie;
216         queue->nr = RPC_BATCH_COUNT;
217 }
218
219 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
220 {
221         rpc_set_waitqueue_priority(queue, queue->maxpriority);
222         rpc_set_waitqueue_cookie(queue, 0);
223 }
224
225 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
226 {
227         int i;
228
229         spin_lock_init(&queue->lock);
230         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
231                 INIT_LIST_HEAD(&queue->tasks[i]);
232         queue->maxpriority = maxprio;
233         rpc_reset_waitqueue_priority(queue);
234 #ifdef RPC_DEBUG
235         queue->name = qname;
236 #endif
237 }
238
239 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
240 {
241         __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
242 }
243
244 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
245 {
246         __rpc_init_priority_wait_queue(queue, qname, 0);
247 }
248 EXPORT_SYMBOL(rpc_init_wait_queue);
249
250 static int rpc_wait_bit_interruptible(void *word)
251 {
252         if (signal_pending(current))
253                 return -ERESTARTSYS;
254         schedule();
255         return 0;
256 }
257
258 #ifdef RPC_DEBUG
259 static void rpc_task_set_debuginfo(struct rpc_task *task)
260 {
261         static atomic_t rpc_pid;
262
263         task->tk_magic = RPC_TASK_MAGIC_ID;
264         task->tk_pid = atomic_inc_return(&rpc_pid);
265 }
266 #else
267 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
268 {
269 }
270 #endif
271
272 static void rpc_set_active(struct rpc_task *task)
273 {
274         struct rpc_clnt *clnt;
275         if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
276                 return;
277         rpc_task_set_debuginfo(task);
278         /* Add to global list of all tasks */
279         clnt = task->tk_client;
280         if (clnt != NULL) {
281                 spin_lock(&clnt->cl_lock);
282                 list_add_tail(&task->tk_task, &clnt->cl_tasks);
283                 spin_unlock(&clnt->cl_lock);
284         }
285 }
286
287 /*
288  * Mark an RPC call as having completed by clearing the 'active' bit
289  */
290 static void rpc_mark_complete_task(struct rpc_task *task)
291 {
292         smp_mb__before_clear_bit();
293         clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
294         smp_mb__after_clear_bit();
295         wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
296 }
297
298 /*
299  * Allow callers to wait for completion of an RPC call
300  */
301 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
302 {
303         if (action == NULL)
304                 action = rpc_wait_bit_interruptible;
305         return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
306                         action, TASK_INTERRUPTIBLE);
307 }
308 EXPORT_SYMBOL(__rpc_wait_for_completion_task);
309
310 /*
311  * Make an RPC task runnable.
312  *
313  * Note: If the task is ASYNC, this must be called with
314  * the spinlock held to protect the wait queue operation.
315  */
316 static void rpc_make_runnable(struct rpc_task *task)
317 {
318         BUG_ON(task->tk_timeout_fn);
319         rpc_clear_queued(task);
320         if (rpc_test_and_set_running(task))
321                 return;
322         /* We might have raced */
323         if (RPC_IS_QUEUED(task)) {
324                 rpc_clear_running(task);
325                 return;
326         }
327         if (RPC_IS_ASYNC(task)) {
328                 int status;
329
330                 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
331                 status = queue_work(task->tk_workqueue, &task->u.tk_work);
332                 if (status < 0) {
333                         printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
334                         task->tk_status = status;
335                         return;
336                 }
337         } else
338                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
339 }
340
341 /*
342  * Prepare for sleeping on a wait queue.
343  * By always appending tasks to the list we ensure FIFO behavior.
344  * NB: An RPC task will only receive interrupt-driven events as long
345  * as it's on a wait queue.
346  */
347 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
348                         rpc_action action, rpc_action timer)
349 {
350         dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
351                         task->tk_pid, rpc_qname(q), jiffies);
352
353         if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
354                 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
355                 return;
356         }
357
358         __rpc_add_wait_queue(q, task);
359
360         BUG_ON(task->tk_callback != NULL);
361         task->tk_callback = action;
362         __rpc_add_timer(task, timer);
363 }
364
365 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
366                                 rpc_action action, rpc_action timer)
367 {
368         /* Mark the task as being activated if so needed */
369         rpc_set_active(task);
370
371         /*
372          * Protect the queue operations.
373          */
374         spin_lock_bh(&q->lock);
375         __rpc_sleep_on(q, task, action, timer);
376         spin_unlock_bh(&q->lock);
377 }
378
379 /**
380  * __rpc_do_wake_up_task - wake up a single rpc_task
381  * @task: task to be woken up
382  *
383  * Caller must hold queue->lock, and have cleared the task queued flag.
384  */
385 static void __rpc_do_wake_up_task(struct rpc_task *task)
386 {
387         dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
388                         task->tk_pid, jiffies);
389
390 #ifdef RPC_DEBUG
391         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
392 #endif
393         /* Has the task been executed yet? If not, we cannot wake it up! */
394         if (!RPC_IS_ACTIVATED(task)) {
395                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
396                 return;
397         }
398
399         __rpc_disable_timer(task);
400         __rpc_remove_wait_queue(task);
401
402         rpc_make_runnable(task);
403
404         dprintk("RPC:       __rpc_wake_up_task done\n");
405 }
406
407 /*
408  * Wake up the specified task
409  */
410 static void __rpc_wake_up_task(struct rpc_task *task)
411 {
412         if (rpc_start_wakeup(task)) {
413                 if (RPC_IS_QUEUED(task))
414                         __rpc_do_wake_up_task(task);
415                 rpc_finish_wakeup(task);
416         }
417 }
418
419 /*
420  * Default timeout handler if none specified by user
421  */
422 static void
423 __rpc_default_timer(struct rpc_task *task)
424 {
425         dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid);
426         task->tk_status = -ETIMEDOUT;
427         rpc_wake_up_task(task);
428 }
429
430 /*
431  * Wake up the specified task
432  */
433 void rpc_wake_up_task(struct rpc_task *task)
434 {
435         rcu_read_lock_bh();
436         if (rpc_start_wakeup(task)) {
437                 if (RPC_IS_QUEUED(task)) {
438                         struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
439
440                         /* Note: we're already in a bh-safe context */
441                         spin_lock(&queue->lock);
442                         __rpc_do_wake_up_task(task);
443                         spin_unlock(&queue->lock);
444                 }
445                 rpc_finish_wakeup(task);
446         }
447         rcu_read_unlock_bh();
448 }
449
450 /*
451  * Wake up the next task on a priority queue.
452  */
453 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
454 {
455         struct list_head *q;
456         struct rpc_task *task;
457
458         /*
459          * Service a batch of tasks from a single cookie.
460          */
461         q = &queue->tasks[queue->priority];
462         if (!list_empty(q)) {
463                 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
464                 if (queue->cookie == task->tk_cookie) {
465                         if (--queue->nr)
466                                 goto out;
467                         list_move_tail(&task->u.tk_wait.list, q);
468                 }
469                 /*
470                  * Check if we need to switch queues.
471                  */
472                 if (--queue->count)
473                         goto new_cookie;
474         }
475
476         /*
477          * Service the next queue.
478          */
479         do {
480                 if (q == &queue->tasks[0])
481                         q = &queue->tasks[queue->maxpriority];
482                 else
483                         q = q - 1;
484                 if (!list_empty(q)) {
485                         task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
486                         goto new_queue;
487                 }
488         } while (q != &queue->tasks[queue->priority]);
489
490         rpc_reset_waitqueue_priority(queue);
491         return NULL;
492
493 new_queue:
494         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
495 new_cookie:
496         rpc_set_waitqueue_cookie(queue, task->tk_cookie);
497 out:
498         __rpc_wake_up_task(task);
499         return task;
500 }
501
502 /*
503  * Wake up the next task on the wait queue.
504  */
505 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
506 {
507         struct rpc_task *task = NULL;
508
509         dprintk("RPC:       wake_up_next(%p \"%s\")\n",
510                         queue, rpc_qname(queue));
511         rcu_read_lock_bh();
512         spin_lock(&queue->lock);
513         if (RPC_IS_PRIORITY(queue))
514                 task = __rpc_wake_up_next_priority(queue);
515         else {
516                 task_for_first(task, &queue->tasks[0])
517                         __rpc_wake_up_task(task);
518         }
519         spin_unlock(&queue->lock);
520         rcu_read_unlock_bh();
521
522         return task;
523 }
524
525 /**
526  * rpc_wake_up - wake up all rpc_tasks
527  * @queue: rpc_wait_queue on which the tasks are sleeping
528  *
529  * Grabs queue->lock
530  */
531 void rpc_wake_up(struct rpc_wait_queue *queue)
532 {
533         struct rpc_task *task, *next;
534         struct list_head *head;
535
536         rcu_read_lock_bh();
537         spin_lock(&queue->lock);
538         head = &queue->tasks[queue->maxpriority];
539         for (;;) {
540                 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
541                         __rpc_wake_up_task(task);
542                 if (head == &queue->tasks[0])
543                         break;
544                 head--;
545         }
546         spin_unlock(&queue->lock);
547         rcu_read_unlock_bh();
548 }
549
550 /**
551  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
552  * @queue: rpc_wait_queue on which the tasks are sleeping
553  * @status: status value to set
554  *
555  * Grabs queue->lock
556  */
557 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
558 {
559         struct rpc_task *task, *next;
560         struct list_head *head;
561
562         rcu_read_lock_bh();
563         spin_lock(&queue->lock);
564         head = &queue->tasks[queue->maxpriority];
565         for (;;) {
566                 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
567                         task->tk_status = status;
568                         __rpc_wake_up_task(task);
569                 }
570                 if (head == &queue->tasks[0])
571                         break;
572                 head--;
573         }
574         spin_unlock(&queue->lock);
575         rcu_read_unlock_bh();
576 }
577
578 static void __rpc_atrun(struct rpc_task *task)
579 {
580         rpc_wake_up_task(task);
581 }
582
583 /*
584  * Run a task at a later time
585  */
586 void rpc_delay(struct rpc_task *task, unsigned long delay)
587 {
588         task->tk_timeout = delay;
589         rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
590 }
591
592 /*
593  * Helper to call task->tk_ops->rpc_call_prepare
594  */
595 static void rpc_prepare_task(struct rpc_task *task)
596 {
597         lock_kernel();
598         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
599         unlock_kernel();
600 }
601
602 /*
603  * Helper that calls task->tk_ops->rpc_call_done if it exists
604  */
605 void rpc_exit_task(struct rpc_task *task)
606 {
607         task->tk_action = NULL;
608         if (task->tk_ops->rpc_call_done != NULL) {
609                 lock_kernel();
610                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
611                 unlock_kernel();
612                 if (task->tk_action != NULL) {
613                         WARN_ON(RPC_ASSASSINATED(task));
614                         /* Always release the RPC slot and buffer memory */
615                         xprt_release(task);
616                 }
617         }
618 }
619 EXPORT_SYMBOL(rpc_exit_task);
620
621 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
622 {
623         if (ops->rpc_release != NULL) {
624                 lock_kernel();
625                 ops->rpc_release(calldata);
626                 unlock_kernel();
627         }
628 }
629
630 /*
631  * This is the RPC `scheduler' (or rather, the finite state machine).
632  */
633 static void __rpc_execute(struct rpc_task *task)
634 {
635         int             status = 0;
636
637         dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
638                         task->tk_pid, task->tk_flags);
639
640         BUG_ON(RPC_IS_QUEUED(task));
641
642         for (;;) {
643                 /*
644                  * Garbage collection of pending timers...
645                  */
646                 rpc_delete_timer(task);
647
648                 /*
649                  * Execute any pending callback.
650                  */
651                 if (RPC_DO_CALLBACK(task)) {
652                         /* Define a callback save pointer */
653                         void (*save_callback)(struct rpc_task *);
654
655                         /*
656                          * If a callback exists, save it, reset it,
657                          * call it.
658                          * The save is needed to stop from resetting
659                          * another callback set within the callback handler
660                          * - Dave
661                          */
662                         save_callback=task->tk_callback;
663                         task->tk_callback=NULL;
664                         save_callback(task);
665                 }
666
667                 /*
668                  * Perform the next FSM step.
669                  * tk_action may be NULL when the task has been killed
670                  * by someone else.
671                  */
672                 if (!RPC_IS_QUEUED(task)) {
673                         if (task->tk_action == NULL)
674                                 break;
675                         task->tk_action(task);
676                 }
677
678                 /*
679                  * Lockless check for whether task is sleeping or not.
680                  */
681                 if (!RPC_IS_QUEUED(task))
682                         continue;
683                 rpc_clear_running(task);
684                 if (RPC_IS_ASYNC(task)) {
685                         /* Careful! we may have raced... */
686                         if (RPC_IS_QUEUED(task))
687                                 return;
688                         if (rpc_test_and_set_running(task))
689                                 return;
690                         continue;
691                 }
692
693                 /* sync task: sleep here */
694                 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
695                 /* Note: Caller should be using rpc_clnt_sigmask() */
696                 status = out_of_line_wait_on_bit(&task->tk_runstate,
697                                 RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
698                                 TASK_INTERRUPTIBLE);
699                 if (status == -ERESTARTSYS) {
700                         /*
701                          * When a sync task receives a signal, it exits with
702                          * -ERESTARTSYS. In order to catch any callbacks that
703                          * clean up after sleeping on some queue, we don't
704                          * break the loop here, but go around once more.
705                          */
706                         dprintk("RPC: %5u got signal\n", task->tk_pid);
707                         task->tk_flags |= RPC_TASK_KILLED;
708                         rpc_exit(task, -ERESTARTSYS);
709                         rpc_wake_up_task(task);
710                 }
711                 rpc_set_running(task);
712                 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
713         }
714
715         dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
716                         task->tk_status);
717         /* Release all resources associated with the task */
718         rpc_release_task(task);
719 }
720
721 /*
722  * User-visible entry point to the scheduler.
723  *
724  * This may be called recursively if e.g. an async NFS task updates
725  * the attributes and finds that dirty pages must be flushed.
726  * NOTE: Upon exit of this function the task is guaranteed to be
727  *       released. In particular note that tk_release() will have
728  *       been called, so your task memory may have been freed.
729  */
730 void rpc_execute(struct rpc_task *task)
731 {
732         rpc_set_active(task);
733         rpc_set_running(task);
734         __rpc_execute(task);
735 }
736
737 static void rpc_async_schedule(struct work_struct *work)
738 {
739         __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
740 }
741
742 struct rpc_buffer {
743         size_t  len;
744         char    data[];
745 };
746
747 /**
748  * rpc_malloc - allocate an RPC buffer
749  * @task: RPC task that will use this buffer
750  * @size: requested byte size
751  *
752  * To prevent rpciod from hanging, this allocator never sleeps,
753  * returning NULL if the request cannot be serviced immediately.
754  * The caller can arrange to sleep in a way that is safe for rpciod.
755  *
756  * Most requests are 'small' (under 2KiB) and can be serviced from a
757  * mempool, ensuring that NFS reads and writes can always proceed,
758  * and that there is good locality of reference for these buffers.
759  *
760  * In order to avoid memory starvation triggering more writebacks of
761  * NFS requests, we avoid using GFP_KERNEL.
762  */
763 void *rpc_malloc(struct rpc_task *task, size_t size)
764 {
765         struct rpc_buffer *buf;
766         gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
767
768         size += sizeof(struct rpc_buffer);
769         if (size <= RPC_BUFFER_MAXSIZE)
770                 buf = mempool_alloc(rpc_buffer_mempool, gfp);
771         else
772                 buf = kmalloc(size, gfp);
773
774         if (!buf)
775                 return NULL;
776
777         buf->len = size;
778         dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
779                         task->tk_pid, size, buf);
780         return &buf->data;
781 }
782
783 /**
784  * rpc_free - free buffer allocated via rpc_malloc
785  * @buffer: buffer to free
786  *
787  */
788 void rpc_free(void *buffer)
789 {
790         size_t size;
791         struct rpc_buffer *buf;
792
793         if (!buffer)
794                 return;
795
796         buf = container_of(buffer, struct rpc_buffer, data);
797         size = buf->len;
798
799         dprintk("RPC:       freeing buffer of size %zu at %p\n",
800                         size, buf);
801
802         if (size <= RPC_BUFFER_MAXSIZE)
803                 mempool_free(buf, rpc_buffer_mempool);
804         else
805                 kfree(buf);
806 }
807
808 /*
809  * Creation and deletion of RPC task structures
810  */
811 void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
812 {
813         memset(task, 0, sizeof(*task));
814         init_timer(&task->tk_timer);
815         task->tk_timer.data     = (unsigned long) task;
816         task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
817         atomic_set(&task->tk_count, 1);
818         task->tk_client = clnt;
819         task->tk_flags  = flags;
820         task->tk_ops = tk_ops;
821         if (tk_ops->rpc_call_prepare != NULL)
822                 task->tk_action = rpc_prepare_task;
823         task->tk_calldata = calldata;
824         INIT_LIST_HEAD(&task->tk_task);
825
826         /* Initialize retry counters */
827         task->tk_garb_retry = 2;
828         task->tk_cred_retry = 2;
829
830         task->tk_priority = RPC_PRIORITY_NORMAL;
831         task->tk_cookie = (unsigned long)current;
832
833         /* Initialize workqueue for async tasks */
834         task->tk_workqueue = rpciod_workqueue;
835
836         if (clnt) {
837                 kref_get(&clnt->cl_kref);
838                 if (clnt->cl_softrtry)
839                         task->tk_flags |= RPC_TASK_SOFT;
840                 if (!clnt->cl_intr)
841                         task->tk_flags |= RPC_TASK_NOINTR;
842         }
843
844         BUG_ON(task->tk_ops == NULL);
845
846         /* starting timestamp */
847         task->tk_start = jiffies;
848
849         dprintk("RPC:       new task initialized, procpid %u\n",
850                                 current->pid);
851 }
852
853 static struct rpc_task *
854 rpc_alloc_task(void)
855 {
856         return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
857 }
858
859 static void rpc_free_task(struct rcu_head *rcu)
860 {
861         struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
862         dprintk("RPC: %5u freeing task\n", task->tk_pid);
863         mempool_free(task, rpc_task_mempool);
864 }
865
866 /*
867  * Create a new task for the specified client.
868  */
869 struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
870 {
871         struct rpc_task *task;
872
873         task = rpc_alloc_task();
874         if (!task)
875                 goto out;
876
877         rpc_init_task(task, clnt, flags, tk_ops, calldata);
878
879         dprintk("RPC:       allocated task %p\n", task);
880         task->tk_flags |= RPC_TASK_DYNAMIC;
881 out:
882         return task;
883 }
884
885
886 void rpc_put_task(struct rpc_task *task)
887 {
888         const struct rpc_call_ops *tk_ops = task->tk_ops;
889         void *calldata = task->tk_calldata;
890
891         if (!atomic_dec_and_test(&task->tk_count))
892                 return;
893         /* Release resources */
894         if (task->tk_rqstp)
895                 xprt_release(task);
896         if (task->tk_msg.rpc_cred)
897                 rpcauth_unbindcred(task);
898         if (task->tk_client) {
899                 rpc_release_client(task->tk_client);
900                 task->tk_client = NULL;
901         }
902         if (task->tk_flags & RPC_TASK_DYNAMIC)
903                 call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
904         rpc_release_calldata(tk_ops, calldata);
905 }
906 EXPORT_SYMBOL(rpc_put_task);
907
908 static void rpc_release_task(struct rpc_task *task)
909 {
910 #ifdef RPC_DEBUG
911         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
912 #endif
913         dprintk("RPC: %5u release task\n", task->tk_pid);
914
915         if (!list_empty(&task->tk_task)) {
916                 struct rpc_clnt *clnt = task->tk_client;
917                 /* Remove from client task list */
918                 spin_lock(&clnt->cl_lock);
919                 list_del(&task->tk_task);
920                 spin_unlock(&clnt->cl_lock);
921         }
922         BUG_ON (RPC_IS_QUEUED(task));
923
924         /* Synchronously delete any running timer */
925         rpc_delete_timer(task);
926
927 #ifdef RPC_DEBUG
928         task->tk_magic = 0;
929 #endif
930         /* Wake up anyone who is waiting for task completion */
931         rpc_mark_complete_task(task);
932
933         rpc_put_task(task);
934 }
935
936 /**
937  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
938  * @clnt: pointer to RPC client
939  * @flags: RPC flags
940  * @ops: RPC call ops
941  * @data: user call data
942  */
943 struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
944                                         const struct rpc_call_ops *ops,
945                                         void *data)
946 {
947         struct rpc_task *task;
948         task = rpc_new_task(clnt, flags, ops, data);
949         if (task == NULL) {
950                 rpc_release_calldata(ops, data);
951                 return ERR_PTR(-ENOMEM);
952         }
953         atomic_inc(&task->tk_count);
954         rpc_execute(task);
955         return task;
956 }
957 EXPORT_SYMBOL(rpc_run_task);
958
959 /*
960  * Kill all tasks for the given client.
961  * XXX: kill their descendants as well?
962  */
963 void rpc_killall_tasks(struct rpc_clnt *clnt)
964 {
965         struct rpc_task *rovr;
966
967
968         if (list_empty(&clnt->cl_tasks))
969                 return;
970         dprintk("RPC:       killing all tasks for client %p\n", clnt);
971         /*
972          * Spin lock all_tasks to prevent changes...
973          */
974         spin_lock(&clnt->cl_lock);
975         list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
976                 if (! RPC_IS_ACTIVATED(rovr))
977                         continue;
978                 if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
979                         rovr->tk_flags |= RPC_TASK_KILLED;
980                         rpc_exit(rovr, -EIO);
981                         rpc_wake_up_task(rovr);
982                 }
983         }
984         spin_unlock(&clnt->cl_lock);
985 }
986
987 /*
988  * Start up the rpciod process if it's not already running.
989  */
990 int
991 rpciod_up(void)
992 {
993         struct workqueue_struct *wq;
994         int error = 0;
995
996         if (atomic_inc_not_zero(&rpciod_users))
997                 return 0;
998
999         mutex_lock(&rpciod_mutex);
1000
1001         /* Guard against races with rpciod_down() */
1002         if (rpciod_workqueue != NULL)
1003                 goto out_ok;
1004         /*
1005          * Create the rpciod thread and wait for it to start.
1006          */
1007         dprintk("RPC:       creating workqueue rpciod\n");
1008         error = -ENOMEM;
1009         wq = create_workqueue("rpciod");
1010         if (wq == NULL)
1011                 goto out;
1012
1013         rpciod_workqueue = wq;
1014         error = 0;
1015 out_ok:
1016         atomic_inc(&rpciod_users);
1017 out:
1018         mutex_unlock(&rpciod_mutex);
1019         return error;
1020 }
1021
1022 void
1023 rpciod_down(void)
1024 {
1025         if (!atomic_dec_and_test(&rpciod_users))
1026                 return;
1027
1028         mutex_lock(&rpciod_mutex);
1029         dprintk("RPC:       destroying workqueue rpciod\n");
1030
1031         if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) {
1032                 destroy_workqueue(rpciod_workqueue);
1033                 rpciod_workqueue = NULL;
1034         }
1035         mutex_unlock(&rpciod_mutex);
1036 }
1037
1038 void
1039 rpc_destroy_mempool(void)
1040 {
1041         if (rpc_buffer_mempool)
1042                 mempool_destroy(rpc_buffer_mempool);
1043         if (rpc_task_mempool)
1044                 mempool_destroy(rpc_task_mempool);
1045         if (rpc_task_slabp)
1046                 kmem_cache_destroy(rpc_task_slabp);
1047         if (rpc_buffer_slabp)
1048                 kmem_cache_destroy(rpc_buffer_slabp);
1049 }
1050
1051 int
1052 rpc_init_mempool(void)
1053 {
1054         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1055                                              sizeof(struct rpc_task),
1056                                              0, SLAB_HWCACHE_ALIGN,
1057                                              NULL, NULL);
1058         if (!rpc_task_slabp)
1059                 goto err_nomem;
1060         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1061                                              RPC_BUFFER_MAXSIZE,
1062                                              0, SLAB_HWCACHE_ALIGN,
1063                                              NULL, NULL);
1064         if (!rpc_buffer_slabp)
1065                 goto err_nomem;
1066         rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1067                                                     rpc_task_slabp);
1068         if (!rpc_task_mempool)
1069                 goto err_nomem;
1070         rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1071                                                       rpc_buffer_slabp);
1072         if (!rpc_buffer_mempool)
1073                 goto err_nomem;
1074         return 0;
1075 err_nomem:
1076         rpc_destroy_mempool();
1077         return -ENOMEM;
1078 }