SUNRPC: Fix a potential race in rpc_wake_up_task()
[safe/jmp/linux-2.6] / net / sunrpc / sched.c
1 /*
2  * linux/net/sunrpc/sched.c
3  *
4  * Scheduling for synchronous and asynchronous RPC requests.
5  *
6  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7  * 
8  * TCP NFS related read + write fixes
9  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10  */
11
12 #include <linux/module.h>
13
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/mempool.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22
23 #include <linux/sunrpc/clnt.h>
24
25 #ifdef RPC_DEBUG
26 #define RPCDBG_FACILITY         RPCDBG_SCHED
27 #define RPC_TASK_MAGIC_ID       0xf00baa
28 static int                      rpc_task_id;
29 #endif
30
31 /*
32  * RPC slabs and memory pools
33  */
34 #define RPC_BUFFER_MAXSIZE      (2048)
35 #define RPC_BUFFER_POOLSIZE     (8)
36 #define RPC_TASK_POOLSIZE       (8)
37 static kmem_cache_t     *rpc_task_slabp __read_mostly;
38 static kmem_cache_t     *rpc_buffer_slabp __read_mostly;
39 static mempool_t        *rpc_task_mempool __read_mostly;
40 static mempool_t        *rpc_buffer_mempool __read_mostly;
41
42 static void                     __rpc_default_timer(struct rpc_task *task);
43 static void                     rpciod_killall(void);
44 static void                     rpc_async_schedule(void *);
45
46 /*
47  * RPC tasks sit here while waiting for conditions to improve.
48  */
49 static RPC_WAITQ(delay_queue, "delayq");
50
51 /*
52  * All RPC tasks are linked into this list
53  */
54 static LIST_HEAD(all_tasks);
55
56 /*
57  * rpciod-related stuff
58  */
59 static DEFINE_MUTEX(rpciod_mutex);
60 static unsigned int             rpciod_users;
61 struct workqueue_struct *rpciod_workqueue;
62
63 /*
64  * Spinlock for other critical sections of code.
65  */
66 static DEFINE_SPINLOCK(rpc_sched_lock);
67
68 /*
69  * Disable the timer for a given RPC task. Should be called with
70  * queue->lock and bh_disabled in order to avoid races within
71  * rpc_run_timer().
72  */
73 static inline void
74 __rpc_disable_timer(struct rpc_task *task)
75 {
76         dprintk("RPC: %4d disabling timer\n", task->tk_pid);
77         task->tk_timeout_fn = NULL;
78         task->tk_timeout = 0;
79 }
80
81 /*
82  * Run a timeout function.
83  * We use the callback in order to allow __rpc_wake_up_task()
84  * and friends to disable the timer synchronously on SMP systems
85  * without calling del_timer_sync(). The latter could cause a
86  * deadlock if called while we're holding spinlocks...
87  */
88 static void rpc_run_timer(struct rpc_task *task)
89 {
90         void (*callback)(struct rpc_task *);
91
92         callback = task->tk_timeout_fn;
93         task->tk_timeout_fn = NULL;
94         if (callback && RPC_IS_QUEUED(task)) {
95                 dprintk("RPC: %4d running timer\n", task->tk_pid);
96                 callback(task);
97         }
98         smp_mb__before_clear_bit();
99         clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
100         smp_mb__after_clear_bit();
101 }
102
103 /*
104  * Set up a timer for the current task.
105  */
106 static inline void
107 __rpc_add_timer(struct rpc_task *task, rpc_action timer)
108 {
109         if (!task->tk_timeout)
110                 return;
111
112         dprintk("RPC: %4d setting alarm for %lu ms\n",
113                         task->tk_pid, task->tk_timeout * 1000 / HZ);
114
115         if (timer)
116                 task->tk_timeout_fn = timer;
117         else
118                 task->tk_timeout_fn = __rpc_default_timer;
119         set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
120         mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
121 }
122
123 /*
124  * Delete any timer for the current task. Because we use del_timer_sync(),
125  * this function should never be called while holding queue->lock.
126  */
127 static void
128 rpc_delete_timer(struct rpc_task *task)
129 {
130         if (RPC_IS_QUEUED(task))
131                 return;
132         if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
133                 del_singleshot_timer_sync(&task->tk_timer);
134                 dprintk("RPC: %4d deleting timer\n", task->tk_pid);
135         }
136 }
137
138 /*
139  * Add new request to a priority queue.
140  */
141 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
142 {
143         struct list_head *q;
144         struct rpc_task *t;
145
146         INIT_LIST_HEAD(&task->u.tk_wait.links);
147         q = &queue->tasks[task->tk_priority];
148         if (unlikely(task->tk_priority > queue->maxpriority))
149                 q = &queue->tasks[queue->maxpriority];
150         list_for_each_entry(t, q, u.tk_wait.list) {
151                 if (t->tk_cookie == task->tk_cookie) {
152                         list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
153                         return;
154                 }
155         }
156         list_add_tail(&task->u.tk_wait.list, q);
157 }
158
159 /*
160  * Add new request to wait queue.
161  *
162  * Swapper tasks always get inserted at the head of the queue.
163  * This should avoid many nasty memory deadlocks and hopefully
164  * improve overall performance.
165  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
166  */
167 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
168 {
169         BUG_ON (RPC_IS_QUEUED(task));
170
171         if (RPC_IS_PRIORITY(queue))
172                 __rpc_add_wait_queue_priority(queue, task);
173         else if (RPC_IS_SWAPPER(task))
174                 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
175         else
176                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
177         task->u.tk_wait.rpc_waitq = queue;
178         queue->qlen++;
179         rpc_set_queued(task);
180
181         dprintk("RPC: %4d added to queue %p \"%s\"\n",
182                                 task->tk_pid, queue, rpc_qname(queue));
183 }
184
185 /*
186  * Remove request from a priority queue.
187  */
188 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
189 {
190         struct rpc_task *t;
191
192         if (!list_empty(&task->u.tk_wait.links)) {
193                 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
194                 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
195                 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
196         }
197         list_del(&task->u.tk_wait.list);
198 }
199
200 /*
201  * Remove request from queue.
202  * Note: must be called with spin lock held.
203  */
204 static void __rpc_remove_wait_queue(struct rpc_task *task)
205 {
206         struct rpc_wait_queue *queue;
207         queue = task->u.tk_wait.rpc_waitq;
208
209         if (RPC_IS_PRIORITY(queue))
210                 __rpc_remove_wait_queue_priority(task);
211         else
212                 list_del(&task->u.tk_wait.list);
213         queue->qlen--;
214         dprintk("RPC: %4d removed from queue %p \"%s\"\n",
215                                 task->tk_pid, queue, rpc_qname(queue));
216 }
217
218 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
219 {
220         queue->priority = priority;
221         queue->count = 1 << (priority * 2);
222 }
223
224 static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie)
225 {
226         queue->cookie = cookie;
227         queue->nr = RPC_BATCH_COUNT;
228 }
229
230 static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
231 {
232         rpc_set_waitqueue_priority(queue, queue->maxpriority);
233         rpc_set_waitqueue_cookie(queue, 0);
234 }
235
236 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio)
237 {
238         int i;
239
240         spin_lock_init(&queue->lock);
241         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
242                 INIT_LIST_HEAD(&queue->tasks[i]);
243         queue->maxpriority = maxprio;
244         rpc_reset_waitqueue_priority(queue);
245 #ifdef RPC_DEBUG
246         queue->name = qname;
247 #endif
248 }
249
250 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
251 {
252         __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);
253 }
254
255 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
256 {
257         __rpc_init_priority_wait_queue(queue, qname, 0);
258 }
259 EXPORT_SYMBOL(rpc_init_wait_queue);
260
261 static int rpc_wait_bit_interruptible(void *word)
262 {
263         if (signal_pending(current))
264                 return -ERESTARTSYS;
265         schedule();
266         return 0;
267 }
268
269 static void rpc_set_active(struct rpc_task *task)
270 {
271         if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
272                 return;
273         spin_lock(&rpc_sched_lock);
274 #ifdef RPC_DEBUG
275         task->tk_magic = RPC_TASK_MAGIC_ID;
276         task->tk_pid = rpc_task_id++;
277 #endif
278         /* Add to global list of all tasks */
279         list_add_tail(&task->tk_task, &all_tasks);
280         spin_unlock(&rpc_sched_lock);
281 }
282
283 /*
284  * Mark an RPC call as having completed by clearing the 'active' bit
285  */
286 static void rpc_mark_complete_task(struct rpc_task *task)
287 {
288         smp_mb__before_clear_bit();
289         clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
290         smp_mb__after_clear_bit();
291         wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
292 }
293
294 /*
295  * Allow callers to wait for completion of an RPC call
296  */
297 int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
298 {
299         if (action == NULL)
300                 action = rpc_wait_bit_interruptible;
301         return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
302                         action, TASK_INTERRUPTIBLE);
303 }
304 EXPORT_SYMBOL(__rpc_wait_for_completion_task);
305
306 /*
307  * Make an RPC task runnable.
308  *
309  * Note: If the task is ASYNC, this must be called with 
310  * the spinlock held to protect the wait queue operation.
311  */
312 static void rpc_make_runnable(struct rpc_task *task)
313 {
314         BUG_ON(task->tk_timeout_fn);
315         rpc_clear_queued(task);
316         if (rpc_test_and_set_running(task))
317                 return;
318         /* We might have raced */
319         if (RPC_IS_QUEUED(task)) {
320                 rpc_clear_running(task);
321                 return;
322         }
323         if (RPC_IS_ASYNC(task)) {
324                 int status;
325
326                 INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
327                 status = queue_work(task->tk_workqueue, &task->u.tk_work);
328                 if (status < 0) {
329                         printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
330                         task->tk_status = status;
331                         return;
332                 }
333         } else
334                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
335 }
336
337 /*
338  * Prepare for sleeping on a wait queue.
339  * By always appending tasks to the list we ensure FIFO behavior.
340  * NB: An RPC task will only receive interrupt-driven events as long
341  * as it's on a wait queue.
342  */
343 static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
344                         rpc_action action, rpc_action timer)
345 {
346         dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
347                                 rpc_qname(q), jiffies);
348
349         if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
350                 printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
351                 return;
352         }
353
354         __rpc_add_wait_queue(q, task);
355
356         BUG_ON(task->tk_callback != NULL);
357         task->tk_callback = action;
358         __rpc_add_timer(task, timer);
359 }
360
361 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
362                                 rpc_action action, rpc_action timer)
363 {
364         /* Mark the task as being activated if so needed */
365         rpc_set_active(task);
366
367         /*
368          * Protect the queue operations.
369          */
370         spin_lock_bh(&q->lock);
371         __rpc_sleep_on(q, task, action, timer);
372         spin_unlock_bh(&q->lock);
373 }
374
375 /**
376  * __rpc_do_wake_up_task - wake up a single rpc_task
377  * @task: task to be woken up
378  *
379  * Caller must hold queue->lock, and have cleared the task queued flag.
380  */
381 static void __rpc_do_wake_up_task(struct rpc_task *task)
382 {
383         dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
384
385 #ifdef RPC_DEBUG
386         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
387 #endif
388         /* Has the task been executed yet? If not, we cannot wake it up! */
389         if (!RPC_IS_ACTIVATED(task)) {
390                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
391                 return;
392         }
393
394         __rpc_disable_timer(task);
395         __rpc_remove_wait_queue(task);
396
397         rpc_make_runnable(task);
398
399         dprintk("RPC:      __rpc_wake_up_task done\n");
400 }
401
402 /*
403  * Wake up the specified task
404  */
405 static void __rpc_wake_up_task(struct rpc_task *task)
406 {
407         if (rpc_start_wakeup(task)) {
408                 if (RPC_IS_QUEUED(task))
409                         __rpc_do_wake_up_task(task);
410                 rpc_finish_wakeup(task);
411         }
412 }
413
414 /*
415  * Default timeout handler if none specified by user
416  */
417 static void
418 __rpc_default_timer(struct rpc_task *task)
419 {
420         dprintk("RPC: %d timeout (default timer)\n", task->tk_pid);
421         task->tk_status = -ETIMEDOUT;
422         rpc_wake_up_task(task);
423 }
424
425 /*
426  * Wake up the specified task
427  */
428 void rpc_wake_up_task(struct rpc_task *task)
429 {
430         rcu_read_lock_bh();
431         if (rpc_start_wakeup(task)) {
432                 if (RPC_IS_QUEUED(task)) {
433                         struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
434
435                         /* Note: we're already in a bh-safe context */
436                         spin_lock(&queue->lock);
437                         __rpc_do_wake_up_task(task);
438                         spin_unlock(&queue->lock);
439                 }
440                 rpc_finish_wakeup(task);
441         }
442         rcu_read_unlock_bh();
443 }
444
445 /*
446  * Wake up the next task on a priority queue.
447  */
448 static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
449 {
450         struct list_head *q;
451         struct rpc_task *task;
452
453         /*
454          * Service a batch of tasks from a single cookie.
455          */
456         q = &queue->tasks[queue->priority];
457         if (!list_empty(q)) {
458                 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
459                 if (queue->cookie == task->tk_cookie) {
460                         if (--queue->nr)
461                                 goto out;
462                         list_move_tail(&task->u.tk_wait.list, q);
463                 }
464                 /*
465                  * Check if we need to switch queues.
466                  */
467                 if (--queue->count)
468                         goto new_cookie;
469         }
470
471         /*
472          * Service the next queue.
473          */
474         do {
475                 if (q == &queue->tasks[0])
476                         q = &queue->tasks[queue->maxpriority];
477                 else
478                         q = q - 1;
479                 if (!list_empty(q)) {
480                         task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
481                         goto new_queue;
482                 }
483         } while (q != &queue->tasks[queue->priority]);
484
485         rpc_reset_waitqueue_priority(queue);
486         return NULL;
487
488 new_queue:
489         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
490 new_cookie:
491         rpc_set_waitqueue_cookie(queue, task->tk_cookie);
492 out:
493         __rpc_wake_up_task(task);
494         return task;
495 }
496
497 /*
498  * Wake up the next task on the wait queue.
499  */
500 struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
501 {
502         struct rpc_task *task = NULL;
503
504         dprintk("RPC:      wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
505         rcu_read_lock_bh();
506         spin_lock(&queue->lock);
507         if (RPC_IS_PRIORITY(queue))
508                 task = __rpc_wake_up_next_priority(queue);
509         else {
510                 task_for_first(task, &queue->tasks[0])
511                         __rpc_wake_up_task(task);
512         }
513         spin_unlock(&queue->lock);
514         rcu_read_unlock_bh();
515
516         return task;
517 }
518
519 /**
520  * rpc_wake_up - wake up all rpc_tasks
521  * @queue: rpc_wait_queue on which the tasks are sleeping
522  *
523  * Grabs queue->lock
524  */
525 void rpc_wake_up(struct rpc_wait_queue *queue)
526 {
527         struct rpc_task *task, *next;
528         struct list_head *head;
529
530         rcu_read_lock_bh();
531         spin_lock(&queue->lock);
532         head = &queue->tasks[queue->maxpriority];
533         for (;;) {
534                 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
535                         __rpc_wake_up_task(task);
536                 if (head == &queue->tasks[0])
537                         break;
538                 head--;
539         }
540         spin_unlock(&queue->lock);
541         rcu_read_unlock_bh();
542 }
543
544 /**
545  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
546  * @queue: rpc_wait_queue on which the tasks are sleeping
547  * @status: status value to set
548  *
549  * Grabs queue->lock
550  */
551 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
552 {
553         struct rpc_task *task, *next;
554         struct list_head *head;
555
556         rcu_read_lock_bh();
557         spin_lock(&queue->lock);
558         head = &queue->tasks[queue->maxpriority];
559         for (;;) {
560                 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
561                         task->tk_status = status;
562                         __rpc_wake_up_task(task);
563                 }
564                 if (head == &queue->tasks[0])
565                         break;
566                 head--;
567         }
568         spin_unlock(&queue->lock);
569         rcu_read_unlock_bh();
570 }
571
572 static void __rpc_atrun(struct rpc_task *task)
573 {
574         rpc_wake_up_task(task);
575 }
576
577 /*
578  * Run a task at a later time
579  */
580 void rpc_delay(struct rpc_task *task, unsigned long delay)
581 {
582         task->tk_timeout = delay;
583         rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);
584 }
585
586 /*
587  * Helper to call task->tk_ops->rpc_call_prepare
588  */
589 static void rpc_prepare_task(struct rpc_task *task)
590 {
591         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
592 }
593
594 /*
595  * Helper that calls task->tk_ops->rpc_call_done if it exists
596  */
597 void rpc_exit_task(struct rpc_task *task)
598 {
599         task->tk_action = NULL;
600         if (task->tk_ops->rpc_call_done != NULL) {
601                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
602                 if (task->tk_action != NULL) {
603                         WARN_ON(RPC_ASSASSINATED(task));
604                         /* Always release the RPC slot and buffer memory */
605                         xprt_release(task);
606                 }
607         }
608 }
609 EXPORT_SYMBOL(rpc_exit_task);
610
611 /*
612  * This is the RPC `scheduler' (or rather, the finite state machine).
613  */
614 static int __rpc_execute(struct rpc_task *task)
615 {
616         int             status = 0;
617
618         dprintk("RPC: %4d rpc_execute flgs %x\n",
619                                 task->tk_pid, task->tk_flags);
620
621         BUG_ON(RPC_IS_QUEUED(task));
622
623         for (;;) {
624                 /*
625                  * Garbage collection of pending timers...
626                  */
627                 rpc_delete_timer(task);
628
629                 /*
630                  * Execute any pending callback.
631                  */
632                 if (RPC_DO_CALLBACK(task)) {
633                         /* Define a callback save pointer */
634                         void (*save_callback)(struct rpc_task *);
635         
636                         /* 
637                          * If a callback exists, save it, reset it,
638                          * call it.
639                          * The save is needed to stop from resetting
640                          * another callback set within the callback handler
641                          * - Dave
642                          */
643                         save_callback=task->tk_callback;
644                         task->tk_callback=NULL;
645                         lock_kernel();
646                         save_callback(task);
647                         unlock_kernel();
648                 }
649
650                 /*
651                  * Perform the next FSM step.
652                  * tk_action may be NULL when the task has been killed
653                  * by someone else.
654                  */
655                 if (!RPC_IS_QUEUED(task)) {
656                         if (task->tk_action == NULL)
657                                 break;
658                         lock_kernel();
659                         task->tk_action(task);
660                         unlock_kernel();
661                 }
662
663                 /*
664                  * Lockless check for whether task is sleeping or not.
665                  */
666                 if (!RPC_IS_QUEUED(task))
667                         continue;
668                 rpc_clear_running(task);
669                 if (RPC_IS_ASYNC(task)) {
670                         /* Careful! we may have raced... */
671                         if (RPC_IS_QUEUED(task))
672                                 return 0;
673                         if (rpc_test_and_set_running(task))
674                                 return 0;
675                         continue;
676                 }
677
678                 /* sync task: sleep here */
679                 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
680                 /* Note: Caller should be using rpc_clnt_sigmask() */
681                 status = out_of_line_wait_on_bit(&task->tk_runstate,
682                                 RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
683                                 TASK_INTERRUPTIBLE);
684                 if (status == -ERESTARTSYS) {
685                         /*
686                          * When a sync task receives a signal, it exits with
687                          * -ERESTARTSYS. In order to catch any callbacks that
688                          * clean up after sleeping on some queue, we don't
689                          * break the loop here, but go around once more.
690                          */
691                         dprintk("RPC: %4d got signal\n", task->tk_pid);
692                         task->tk_flags |= RPC_TASK_KILLED;
693                         rpc_exit(task, -ERESTARTSYS);
694                         rpc_wake_up_task(task);
695                 }
696                 rpc_set_running(task);
697                 dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
698         }
699
700         dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
701         /* Release all resources associated with the task */
702         rpc_release_task(task);
703         return status;
704 }
705
706 /*
707  * User-visible entry point to the scheduler.
708  *
709  * This may be called recursively if e.g. an async NFS task updates
710  * the attributes and finds that dirty pages must be flushed.
711  * NOTE: Upon exit of this function the task is guaranteed to be
712  *       released. In particular note that tk_release() will have
713  *       been called, so your task memory may have been freed.
714  */
715 int
716 rpc_execute(struct rpc_task *task)
717 {
718         rpc_set_active(task);
719         rpc_set_running(task);
720         return __rpc_execute(task);
721 }
722
723 static void rpc_async_schedule(void *arg)
724 {
725         __rpc_execute((struct rpc_task *)arg);
726 }
727
728 /**
729  * rpc_malloc - allocate an RPC buffer
730  * @task: RPC task that will use this buffer
731  * @size: requested byte size
732  *
733  * We try to ensure that some NFS reads and writes can always proceed
734  * by using a mempool when allocating 'small' buffers.
735  * In order to avoid memory starvation triggering more writebacks of
736  * NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
737  */
738 void * rpc_malloc(struct rpc_task *task, size_t size)
739 {
740         struct rpc_rqst *req = task->tk_rqstp;
741         gfp_t   gfp;
742
743         if (task->tk_flags & RPC_TASK_SWAPPER)
744                 gfp = GFP_ATOMIC;
745         else
746                 gfp = GFP_NOFS;
747
748         if (size > RPC_BUFFER_MAXSIZE) {
749                 req->rq_buffer = kmalloc(size, gfp);
750                 if (req->rq_buffer)
751                         req->rq_bufsize = size;
752         } else {
753                 req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
754                 if (req->rq_buffer)
755                         req->rq_bufsize = RPC_BUFFER_MAXSIZE;
756         }
757         return req->rq_buffer;
758 }
759
760 /**
761  * rpc_free - free buffer allocated via rpc_malloc
762  * @task: RPC task with a buffer to be freed
763  *
764  */
765 void rpc_free(struct rpc_task *task)
766 {
767         struct rpc_rqst *req = task->tk_rqstp;
768
769         if (req->rq_buffer) {
770                 if (req->rq_bufsize == RPC_BUFFER_MAXSIZE)
771                         mempool_free(req->rq_buffer, rpc_buffer_mempool);
772                 else
773                         kfree(req->rq_buffer);
774                 req->rq_buffer = NULL;
775                 req->rq_bufsize = 0;
776         }
777 }
778
779 /*
780  * Creation and deletion of RPC task structures
781  */
782 void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
783 {
784         memset(task, 0, sizeof(*task));
785         init_timer(&task->tk_timer);
786         task->tk_timer.data     = (unsigned long) task;
787         task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
788         atomic_set(&task->tk_count, 1);
789         task->tk_client = clnt;
790         task->tk_flags  = flags;
791         task->tk_ops = tk_ops;
792         if (tk_ops->rpc_call_prepare != NULL)
793                 task->tk_action = rpc_prepare_task;
794         task->tk_calldata = calldata;
795
796         /* Initialize retry counters */
797         task->tk_garb_retry = 2;
798         task->tk_cred_retry = 2;
799
800         task->tk_priority = RPC_PRIORITY_NORMAL;
801         task->tk_cookie = (unsigned long)current;
802
803         /* Initialize workqueue for async tasks */
804         task->tk_workqueue = rpciod_workqueue;
805
806         if (clnt) {
807                 atomic_inc(&clnt->cl_users);
808                 if (clnt->cl_softrtry)
809                         task->tk_flags |= RPC_TASK_SOFT;
810                 if (!clnt->cl_intr)
811                         task->tk_flags |= RPC_TASK_NOINTR;
812         }
813
814         BUG_ON(task->tk_ops == NULL);
815
816         /* starting timestamp */
817         task->tk_start = jiffies;
818
819         dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
820                                 current->pid);
821 }
822
823 static struct rpc_task *
824 rpc_alloc_task(void)
825 {
826         return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
827 }
828
829 static void rpc_free_task(struct rcu_head *rcu)
830 {
831         struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
832         dprintk("RPC: %4d freeing task\n", task->tk_pid);
833         mempool_free(task, rpc_task_mempool);
834 }
835
836 /*
837  * Create a new task for the specified client.  We have to
838  * clean up after an allocation failure, as the client may
839  * have specified "oneshot".
840  */
841 struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
842 {
843         struct rpc_task *task;
844
845         task = rpc_alloc_task();
846         if (!task)
847                 goto cleanup;
848
849         rpc_init_task(task, clnt, flags, tk_ops, calldata);
850
851         dprintk("RPC: %4d allocated task\n", task->tk_pid);
852         task->tk_flags |= RPC_TASK_DYNAMIC;
853 out:
854         return task;
855
856 cleanup:
857         /* Check whether to release the client */
858         if (clnt) {
859                 printk("rpc_new_task: failed, users=%d, oneshot=%d\n",
860                         atomic_read(&clnt->cl_users), clnt->cl_oneshot);
861                 atomic_inc(&clnt->cl_users); /* pretend we were used ... */
862                 rpc_release_client(clnt);
863         }
864         goto out;
865 }
866
867
868 void rpc_put_task(struct rpc_task *task)
869 {
870         const struct rpc_call_ops *tk_ops = task->tk_ops;
871         void *calldata = task->tk_calldata;
872
873         if (!atomic_dec_and_test(&task->tk_count))
874                 return;
875         /* Release resources */
876         if (task->tk_rqstp)
877                 xprt_release(task);
878         if (task->tk_msg.rpc_cred)
879                 rpcauth_unbindcred(task);
880         if (task->tk_client) {
881                 rpc_release_client(task->tk_client);
882                 task->tk_client = NULL;
883         }
884         if (task->tk_flags & RPC_TASK_DYNAMIC)
885                 call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
886         if (tk_ops->rpc_release)
887                 tk_ops->rpc_release(calldata);
888 }
889 EXPORT_SYMBOL(rpc_put_task);
890
891 void rpc_release_task(struct rpc_task *task)
892 {
893 #ifdef RPC_DEBUG
894         BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
895 #endif
896         dprintk("RPC: %4d release task\n", task->tk_pid);
897
898         /* Remove from global task list */
899         spin_lock(&rpc_sched_lock);
900         list_del(&task->tk_task);
901         spin_unlock(&rpc_sched_lock);
902
903         BUG_ON (RPC_IS_QUEUED(task));
904
905         /* Synchronously delete any running timer */
906         rpc_delete_timer(task);
907
908 #ifdef RPC_DEBUG
909         task->tk_magic = 0;
910 #endif
911         /* Wake up anyone who is waiting for task completion */
912         rpc_mark_complete_task(task);
913
914         rpc_put_task(task);
915 }
916
917 /**
918  * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
919  * @clnt: pointer to RPC client
920  * @flags: RPC flags
921  * @ops: RPC call ops
922  * @data: user call data
923  */
924 struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
925                                         const struct rpc_call_ops *ops,
926                                         void *data)
927 {
928         struct rpc_task *task;
929         task = rpc_new_task(clnt, flags, ops, data);
930         if (task == NULL) {
931                 if (ops->rpc_release != NULL)
932                         ops->rpc_release(data);
933                 return ERR_PTR(-ENOMEM);
934         }
935         atomic_inc(&task->tk_count);
936         rpc_execute(task);
937         return task;
938 }
939 EXPORT_SYMBOL(rpc_run_task);
940
941 /*
942  * Kill all tasks for the given client.
943  * XXX: kill their descendants as well?
944  */
945 void rpc_killall_tasks(struct rpc_clnt *clnt)
946 {
947         struct rpc_task *rovr;
948         struct list_head *le;
949
950         dprintk("RPC:      killing all tasks for client %p\n", clnt);
951
952         /*
953          * Spin lock all_tasks to prevent changes...
954          */
955         spin_lock(&rpc_sched_lock);
956         alltask_for_each(rovr, le, &all_tasks) {
957                 if (! RPC_IS_ACTIVATED(rovr))
958                         continue;
959                 if (!clnt || rovr->tk_client == clnt) {
960                         rovr->tk_flags |= RPC_TASK_KILLED;
961                         rpc_exit(rovr, -EIO);
962                         rpc_wake_up_task(rovr);
963                 }
964         }
965         spin_unlock(&rpc_sched_lock);
966 }
967
968 static DECLARE_MUTEX_LOCKED(rpciod_running);
969
970 static void rpciod_killall(void)
971 {
972         unsigned long flags;
973
974         while (!list_empty(&all_tasks)) {
975                 clear_thread_flag(TIF_SIGPENDING);
976                 rpc_killall_tasks(NULL);
977                 flush_workqueue(rpciod_workqueue);
978                 if (!list_empty(&all_tasks)) {
979                         dprintk("rpciod_killall: waiting for tasks to exit\n");
980                         yield();
981                 }
982         }
983
984         spin_lock_irqsave(&current->sighand->siglock, flags);
985         recalc_sigpending();
986         spin_unlock_irqrestore(&current->sighand->siglock, flags);
987 }
988
989 /*
990  * Start up the rpciod process if it's not already running.
991  */
992 int
993 rpciod_up(void)
994 {
995         struct workqueue_struct *wq;
996         int error = 0;
997
998         mutex_lock(&rpciod_mutex);
999         dprintk("rpciod_up: users %d\n", rpciod_users);
1000         rpciod_users++;
1001         if (rpciod_workqueue)
1002                 goto out;
1003         /*
1004          * If there's no pid, we should be the first user.
1005          */
1006         if (rpciod_users > 1)
1007                 printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
1008         /*
1009          * Create the rpciod thread and wait for it to start.
1010          */
1011         error = -ENOMEM;
1012         wq = create_workqueue("rpciod");
1013         if (wq == NULL) {
1014                 printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
1015                 rpciod_users--;
1016                 goto out;
1017         }
1018         rpciod_workqueue = wq;
1019         error = 0;
1020 out:
1021         mutex_unlock(&rpciod_mutex);
1022         return error;
1023 }
1024
1025 void
1026 rpciod_down(void)
1027 {
1028         mutex_lock(&rpciod_mutex);
1029         dprintk("rpciod_down sema %d\n", rpciod_users);
1030         if (rpciod_users) {
1031                 if (--rpciod_users)
1032                         goto out;
1033         } else
1034                 printk(KERN_WARNING "rpciod_down: no users??\n");
1035
1036         if (!rpciod_workqueue) {
1037                 dprintk("rpciod_down: Nothing to do!\n");
1038                 goto out;
1039         }
1040         rpciod_killall();
1041
1042         destroy_workqueue(rpciod_workqueue);
1043         rpciod_workqueue = NULL;
1044  out:
1045         mutex_unlock(&rpciod_mutex);
1046 }
1047
1048 #ifdef RPC_DEBUG
1049 void rpc_show_tasks(void)
1050 {
1051         struct list_head *le;
1052         struct rpc_task *t;
1053
1054         spin_lock(&rpc_sched_lock);
1055         if (list_empty(&all_tasks)) {
1056                 spin_unlock(&rpc_sched_lock);
1057                 return;
1058         }
1059         printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
1060                 "-rpcwait -action- ---ops--\n");
1061         alltask_for_each(t, le, &all_tasks) {
1062                 const char *rpc_waitq = "none";
1063
1064                 if (RPC_IS_QUEUED(t))
1065                         rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
1066
1067                 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
1068                         t->tk_pid,
1069                         (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
1070                         t->tk_flags, t->tk_status,
1071                         t->tk_client,
1072                         (t->tk_client ? t->tk_client->cl_prog : 0),
1073                         t->tk_rqstp, t->tk_timeout,
1074                         rpc_waitq,
1075                         t->tk_action, t->tk_ops);
1076         }
1077         spin_unlock(&rpc_sched_lock);
1078 }
1079 #endif
1080
1081 void
1082 rpc_destroy_mempool(void)
1083 {
1084         if (rpc_buffer_mempool)
1085                 mempool_destroy(rpc_buffer_mempool);
1086         if (rpc_task_mempool)
1087                 mempool_destroy(rpc_task_mempool);
1088         if (rpc_task_slabp)
1089                 kmem_cache_destroy(rpc_task_slabp);
1090         if (rpc_buffer_slabp)
1091                 kmem_cache_destroy(rpc_buffer_slabp);
1092 }
1093
1094 int
1095 rpc_init_mempool(void)
1096 {
1097         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1098                                              sizeof(struct rpc_task),
1099                                              0, SLAB_HWCACHE_ALIGN,
1100                                              NULL, NULL);
1101         if (!rpc_task_slabp)
1102                 goto err_nomem;
1103         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1104                                              RPC_BUFFER_MAXSIZE,
1105                                              0, SLAB_HWCACHE_ALIGN,
1106                                              NULL, NULL);
1107         if (!rpc_buffer_slabp)
1108                 goto err_nomem;
1109         rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1110                                                     rpc_task_slabp);
1111         if (!rpc_task_mempool)
1112                 goto err_nomem;
1113         rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1114                                                       rpc_buffer_slabp);
1115         if (!rpc_buffer_mempool)
1116                 goto err_nomem;
1117         return 0;
1118 err_nomem:
1119         rpc_destroy_mempool();
1120         return -ENOMEM;
1121 }