[PATCH] v9fs: handle kthread_create failure, minor bugfixes
[safe/jmp/linux-2.6] / fs / 9p / mux.c
1 /*
2  * linux/fs/9p/mux.c
3  *
4  * Protocol Multiplexer
5  *
6  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License as published by
11  *  the Free Software Foundation; either version 2 of the License, or
12  *  (at your option) any later version.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to:
21  *  Free Software Foundation
22  *  51 Franklin Street, Fifth Floor
23  *  Boston, MA  02111-1301  USA
24  *
25  */
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/fs.h>
31 #include <linux/poll.h>
32 #include <linux/kthread.h>
33 #include <linux/idr.h>
34
35 #include "debug.h"
36 #include "v9fs.h"
37 #include "9p.h"
38 #include "conv.h"
39 #include "transport.h"
40 #include "mux.h"
41
42 #define ERREQFLUSH      1
43 #define SCHED_TIMEOUT   10
44 #define MAXPOLLWADDR    2
45
46 enum {
47         Rworksched = 1,         /* read work scheduled or running */
48         Rpending = 2,           /* can read */
49         Wworksched = 4,         /* write work scheduled or running */
50         Wpending = 8,           /* can write */
51 };
52
53 struct v9fs_mux_poll_task;
54
55 struct v9fs_req {
56         int tag;
57         struct v9fs_fcall *tcall;
58         struct v9fs_fcall *rcall;
59         int err;
60         v9fs_mux_req_callback cb;
61         void *cba;
62         struct list_head req_list;
63 };
64
65 struct v9fs_mux_data {
66         spinlock_t lock;
67         struct list_head mux_list;
68         struct v9fs_mux_poll_task *poll_task;
69         int msize;
70         unsigned char *extended;
71         struct v9fs_transport *trans;
72         struct v9fs_idpool tidpool;
73         int err;
74         wait_queue_head_t equeue;
75         struct list_head req_list;
76         struct list_head unsent_req_list;
77         struct v9fs_fcall *rcall;
78         int rpos;
79         char *rbuf;
80         int wpos;
81         int wsize;
82         char *wbuf;
83         wait_queue_t poll_wait[MAXPOLLWADDR];
84         wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
85         poll_table pt;
86         struct work_struct rq;
87         struct work_struct wq;
88         unsigned long wsched;
89 };
90
91 struct v9fs_mux_poll_task {
92         struct task_struct *task;
93         struct list_head mux_list;
94         int muxnum;
95 };
96
97 struct v9fs_mux_rpc {
98         struct v9fs_mux_data *m;
99         struct v9fs_req *req;
100         int err;
101         struct v9fs_fcall *rcall;
102         wait_queue_head_t wqueue;
103 };
104
105 static int v9fs_poll_proc(void *);
106 static void v9fs_read_work(void *);
107 static void v9fs_write_work(void *);
108 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
109                           poll_table * p);
110 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
111 static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
112
113 static DECLARE_MUTEX(v9fs_mux_task_lock);
114 static struct workqueue_struct *v9fs_mux_wq;
115
116 static int v9fs_mux_num;
117 static int v9fs_mux_poll_task_num;
118 static struct v9fs_mux_poll_task v9fs_mux_poll_tasks[100];
119
120 int v9fs_mux_global_init(void)
121 {
122         int i;
123
124         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++)
125                 v9fs_mux_poll_tasks[i].task = NULL;
126
127         v9fs_mux_wq = create_workqueue("v9fs");
128         if (!v9fs_mux_wq)
129                 return -ENOMEM;
130
131         return 0;
132 }
133
134 void v9fs_mux_global_exit(void)
135 {
136         destroy_workqueue(v9fs_mux_wq);
137 }
138
139 /**
140  * v9fs_mux_calc_poll_procs - calculates the number of polling procs
141  * based on the number of mounted v9fs filesystems.
142  *
143  * The current implementation returns sqrt of the number of mounts.
144  */
145 inline int v9fs_mux_calc_poll_procs(int muxnum)
146 {
147         int n;
148
149         if (v9fs_mux_poll_task_num)
150                 n = muxnum / v9fs_mux_poll_task_num +
151                     (muxnum % v9fs_mux_poll_task_num ? 1 : 0);
152         else
153                 n = 1;
154
155         if (n > ARRAY_SIZE(v9fs_mux_poll_tasks))
156                 n = ARRAY_SIZE(v9fs_mux_poll_tasks);
157
158         return n;
159 }
160
161 static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
162 {
163         int i, n;
164         struct v9fs_mux_poll_task *vpt, *vptlast;
165         struct task_struct *pproc;
166
167         dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
168                 v9fs_mux_poll_task_num);
169         up(&v9fs_mux_task_lock);
170
171         n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
172         if (n > v9fs_mux_poll_task_num) {
173                 for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
174                         if (v9fs_mux_poll_tasks[i].task == NULL) {
175                                 vpt = &v9fs_mux_poll_tasks[i];
176                                 dprintk(DEBUG_MUX, "create proc %p\n", vpt);
177                                 pproc = kthread_create(v9fs_poll_proc, vpt,
178                                                    "v9fs-poll");
179
180                                 if (!IS_ERR(pproc)) {
181                                         vpt->task = pproc;
182                                         INIT_LIST_HEAD(&vpt->mux_list);
183                                         vpt->muxnum = 0;
184                                         v9fs_mux_poll_task_num++;
185                                         wake_up_process(vpt->task);
186                                 }
187                                 break;
188                         }
189                 }
190
191                 if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks))
192                         dprintk(DEBUG_ERROR, "warning: no free poll slots\n");
193         }
194
195         n = (v9fs_mux_num + 1) / v9fs_mux_poll_task_num +
196             ((v9fs_mux_num + 1) % v9fs_mux_poll_task_num ? 1 : 0);
197
198         vptlast = NULL;
199         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
200                 vpt = &v9fs_mux_poll_tasks[i];
201                 if (vpt->task != NULL) {
202                         vptlast = vpt;
203                         if (vpt->muxnum < n) {
204                                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
205                                 list_add(&m->mux_list, &vpt->mux_list);
206                                 vpt->muxnum++;
207                                 m->poll_task = vpt;
208                                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
209                                 init_poll_funcptr(&m->pt, v9fs_pollwait);
210                                 break;
211                         }
212                 }
213         }
214
215         if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) {
216                 if (vptlast == NULL)
217                         return -ENOMEM;
218
219                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
220                 list_add(&m->mux_list, &vptlast->mux_list);
221                 vptlast->muxnum++;
222                 m->poll_task = vptlast;
223                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
224                 init_poll_funcptr(&m->pt, v9fs_pollwait);
225         }
226
227         v9fs_mux_num++;
228         down(&v9fs_mux_task_lock);
229
230         return 0;
231 }
232
233 static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
234 {
235         int i;
236         struct v9fs_mux_poll_task *vpt;
237
238         up(&v9fs_mux_task_lock);
239         vpt = m->poll_task;
240         list_del(&m->mux_list);
241         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
242                 if (m->poll_waddr[i] != NULL) {
243                         remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
244                         m->poll_waddr[i] = NULL;
245                 }
246         }
247         vpt->muxnum--;
248         if (!vpt->muxnum) {
249                 dprintk(DEBUG_MUX, "destroy proc %p\n", vpt);
250                 send_sig(SIGKILL, vpt->task, 1);
251                 vpt->task = NULL;
252                 v9fs_mux_poll_task_num--;
253         }
254         v9fs_mux_num--;
255         down(&v9fs_mux_task_lock);
256 }
257
258 /**
259  * v9fs_mux_init - allocate and initialize the per-session mux data
260  * Creates the polling task if this is the first session.
261  *
262  * @trans - transport structure
263  * @msize - maximum message size
264  * @extended - pointer to the extended flag
265  */
266 struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
267                                     unsigned char *extended)
268 {
269         int i, n;
270         struct v9fs_mux_data *m, *mtmp;
271
272         dprintk(DEBUG_MUX, "transport %p msize %d\n", trans, msize);
273         m = kmalloc(sizeof(struct v9fs_mux_data), GFP_KERNEL);
274         if (!m)
275                 return ERR_PTR(-ENOMEM);
276
277         spin_lock_init(&m->lock);
278         INIT_LIST_HEAD(&m->mux_list);
279         m->msize = msize;
280         m->extended = extended;
281         m->trans = trans;
282         idr_init(&m->tidpool.pool);
283         init_MUTEX(&m->tidpool.lock);
284         m->err = 0;
285         init_waitqueue_head(&m->equeue);
286         INIT_LIST_HEAD(&m->req_list);
287         INIT_LIST_HEAD(&m->unsent_req_list);
288         m->rcall = NULL;
289         m->rpos = 0;
290         m->rbuf = NULL;
291         m->wpos = m->wsize = 0;
292         m->wbuf = NULL;
293         INIT_WORK(&m->rq, v9fs_read_work, m);
294         INIT_WORK(&m->wq, v9fs_write_work, m);
295         m->wsched = 0;
296         memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
297         m->poll_task = NULL;
298         n = v9fs_mux_poll_start(m);
299         if (n)
300                 return ERR_PTR(n);
301
302         n = trans->poll(trans, &m->pt);
303         if (n & POLLIN) {
304                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
305                 set_bit(Rpending, &m->wsched);
306         }
307
308         if (n & POLLOUT) {
309                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
310                 set_bit(Wpending, &m->wsched);
311         }
312
313         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
314                 if (IS_ERR(m->poll_waddr[i])) {
315                         v9fs_mux_poll_stop(m);
316                         mtmp = (void *)m->poll_waddr;   /* the error code */
317                         kfree(m);
318                         m = mtmp;
319                         break;
320                 }
321         }
322
323         return m;
324 }
325
326 /**
327  * v9fs_mux_destroy - cancels all pending requests and frees mux resources
328  */
329 void v9fs_mux_destroy(struct v9fs_mux_data *m)
330 {
331         dprintk(DEBUG_MUX, "mux %p prev %p next %p\n", m,
332                 m->mux_list.prev, m->mux_list.next);
333         v9fs_mux_cancel(m, -ECONNRESET);
334
335         if (!list_empty(&m->req_list)) {
336                 /* wait until all processes waiting on this session exit */
337                 dprintk(DEBUG_MUX, "mux %p waiting for empty request queue\n",
338                         m);
339                 wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
340                 dprintk(DEBUG_MUX, "mux %p request queue empty: %d\n", m,
341                         list_empty(&m->req_list));
342         }
343
344         v9fs_mux_poll_stop(m);
345         m->trans = NULL;
346
347         kfree(m);
348 }
349
350 /**
351  * v9fs_pollwait - called by files poll operation to add v9fs-poll task
352  *      to files wait queue
353  */
354 static void
355 v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
356               poll_table * p)
357 {
358         int i;
359         struct v9fs_mux_data *m;
360
361         m = container_of(p, struct v9fs_mux_data, pt);
362         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
363                 if (m->poll_waddr[i] == NULL)
364                         break;
365
366         if (i >= ARRAY_SIZE(m->poll_waddr)) {
367                 dprintk(DEBUG_ERROR, "not enough wait_address slots\n");
368                 return;
369         }
370
371         m->poll_waddr[i] = wait_address;
372
373         if (!wait_address) {
374                 dprintk(DEBUG_ERROR, "no wait_address\n");
375                 m->poll_waddr[i] = ERR_PTR(-EIO);
376                 return;
377         }
378
379         init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
380         add_wait_queue(wait_address, &m->poll_wait[i]);
381 }
382
383 /**
384  * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
385  */
386 static inline void v9fs_poll_mux(struct v9fs_mux_data *m)
387 {
388         int n;
389
390         if (m->err < 0)
391                 return;
392
393         n = m->trans->poll(m->trans, NULL);
394         if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
395                 dprintk(DEBUG_MUX, "error mux %p err %d\n", m, n);
396                 if (n >= 0)
397                         n = -ECONNRESET;
398                 v9fs_mux_cancel(m, n);
399         }
400
401         if (n & POLLIN) {
402                 set_bit(Rpending, &m->wsched);
403                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
404                 if (!test_and_set_bit(Rworksched, &m->wsched)) {
405                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
406                         queue_work(v9fs_mux_wq, &m->rq);
407                 }
408         }
409
410         if (n & POLLOUT) {
411                 set_bit(Wpending, &m->wsched);
412                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
413                 if ((m->wsize || !list_empty(&m->unsent_req_list))
414                     && !test_and_set_bit(Wworksched, &m->wsched)) {
415                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
416                         queue_work(v9fs_mux_wq, &m->wq);
417                 }
418         }
419 }
420
421 /**
422  * v9fs_poll_proc - polls all v9fs transports for new events and queues
423  *      the appropriate work to the work queue
424  */
425 static int v9fs_poll_proc(void *a)
426 {
427         struct v9fs_mux_data *m, *mtmp;
428         struct v9fs_mux_poll_task *vpt;
429
430         vpt = a;
431         dprintk(DEBUG_MUX, "start %p %p\n", current, vpt);
432         allow_signal(SIGKILL);
433         while (!kthread_should_stop()) {
434                 set_current_state(TASK_INTERRUPTIBLE);
435                 if (signal_pending(current))
436                         break;
437
438                 list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
439                         v9fs_poll_mux(m);
440                 }
441
442                 dprintk(DEBUG_MUX, "sleeping...\n");
443                 schedule_timeout(SCHED_TIMEOUT * HZ);
444         }
445
446         __set_current_state(TASK_RUNNING);
447         dprintk(DEBUG_MUX, "finish\n");
448         return 0;
449 }
450
451 /**
452  * v9fs_write_work - called when a transport can send some data
453  */
454 static void v9fs_write_work(void *a)
455 {
456         int n, err;
457         struct v9fs_mux_data *m;
458         struct v9fs_req *req;
459
460         m = a;
461
462         if (m->err < 0) {
463                 clear_bit(Wworksched, &m->wsched);
464                 return;
465         }
466
467         if (!m->wsize) {
468                 if (list_empty(&m->unsent_req_list)) {
469                         clear_bit(Wworksched, &m->wsched);
470                         return;
471                 }
472
473                 spin_lock(&m->lock);
474                 req =
475                     list_entry(m->unsent_req_list.next, struct v9fs_req,
476                                req_list);
477                 list_move_tail(&req->req_list, &m->req_list);
478                 m->wbuf = req->tcall->sdata;
479                 m->wsize = req->tcall->size;
480                 m->wpos = 0;
481                 dump_data(m->wbuf, m->wsize);
482                 spin_unlock(&m->lock);
483         }
484
485         dprintk(DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, m->wsize);
486         clear_bit(Wpending, &m->wsched);
487         err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
488         dprintk(DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
489         if (err == -EAGAIN) {
490                 clear_bit(Wworksched, &m->wsched);
491                 return;
492         }
493
494         if (err <= 0)
495                 goto error;
496
497         m->wpos += err;
498         if (m->wpos == m->wsize)
499                 m->wpos = m->wsize = 0;
500
501         if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
502                 if (test_and_clear_bit(Wpending, &m->wsched))
503                         n = POLLOUT;
504                 else
505                         n = m->trans->poll(m->trans, NULL);
506
507                 if (n & POLLOUT) {
508                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
509                         queue_work(v9fs_mux_wq, &m->wq);
510                 } else
511                         clear_bit(Wworksched, &m->wsched);
512         } else
513                 clear_bit(Wworksched, &m->wsched);
514
515         return;
516
517       error:
518         v9fs_mux_cancel(m, err);
519         clear_bit(Wworksched, &m->wsched);
520 }
521
522 static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
523 {
524         int ecode, tag;
525         struct v9fs_str *ename;
526
527         tag = req->tag;
528         if (req->rcall->id == RERROR && !req->err) {
529                 ecode = req->rcall->params.rerror.errno;
530                 ename = &req->rcall->params.rerror.error;
531
532                 dprintk(DEBUG_MUX, "Rerror %.*s\n", ename->len, ename->str);
533
534                 if (*m->extended)
535                         req->err = -ecode;
536
537                 if (!req->err) {
538                         req->err = v9fs_errstr2errno(ename->str, ename->len);
539
540                         if (!req->err) {        /* string match failed */
541                                 PRINT_FCALL_ERROR("unknown error", req->rcall);
542                         }
543
544                         if (!req->err)
545                                 req->err = -ESERVERFAULT;
546                 }
547         } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
548                 dprintk(DEBUG_ERROR, "fcall mismatch: expected %d, got %d\n",
549                         req->tcall->id + 1, req->rcall->id);
550                 if (!req->err)
551                         req->err = -EIO;
552         }
553
554         if (req->cb && req->err != ERREQFLUSH) {
555                 dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
556                         req->tcall, req->rcall);
557
558                 (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
559                 req->cb = NULL;
560         } else
561                 kfree(req->rcall);
562
563         v9fs_mux_put_tag(m, tag);
564
565         wake_up(&m->equeue);
566         kfree(req);
567 }
568
569 /**
570  * v9fs_read_work - called when there is some data to be read from a transport
571  */
572 static void v9fs_read_work(void *a)
573 {
574         int n, err;
575         struct v9fs_mux_data *m;
576         struct v9fs_req *req, *rptr, *rreq;
577         struct v9fs_fcall *rcall;
578         char *rbuf;
579
580         m = a;
581
582         if (m->err < 0)
583                 return;
584
585         rcall = NULL;
586         dprintk(DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
587
588         if (!m->rcall) {
589                 m->rcall =
590                     kmalloc(sizeof(struct v9fs_fcall) + m->msize, GFP_KERNEL);
591                 if (!m->rcall) {
592                         err = -ENOMEM;
593                         goto error;
594                 }
595
596                 m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
597                 m->rpos = 0;
598         }
599
600         clear_bit(Rpending, &m->wsched);
601         err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
602         dprintk(DEBUG_MUX, "mux %p got %d bytes\n", m, err);
603         if (err == -EAGAIN) {
604                 clear_bit(Rworksched, &m->wsched);
605                 return;
606         }
607
608         if (err <= 0)
609                 goto error;
610
611         m->rpos += err;
612         while (m->rpos > 4) {
613                 n = le32_to_cpu(*(__le32 *) m->rbuf);
614                 if (n >= m->msize) {
615                         dprintk(DEBUG_ERROR,
616                                 "requested packet size too big: %d\n", n);
617                         err = -EIO;
618                         goto error;
619                 }
620
621                 if (m->rpos < n)
622                         break;
623
624                 dump_data(m->rbuf, n);
625                 err =
626                     v9fs_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
627                 if (err < 0) {
628                         goto error;
629                 }
630
631                 rcall = m->rcall;
632                 rbuf = m->rbuf;
633                 if (m->rpos > n) {
634                         m->rcall = kmalloc(sizeof(struct v9fs_fcall) + m->msize,
635                                            GFP_KERNEL);
636                         if (!m->rcall) {
637                                 err = -ENOMEM;
638                                 goto error;
639                         }
640
641                         m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
642                         memmove(m->rbuf, rbuf + n, m->rpos - n);
643                         m->rpos -= n;
644                 } else {
645                         m->rcall = NULL;
646                         m->rbuf = NULL;
647                         m->rpos = 0;
648                 }
649
650                 dprintk(DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, rcall->id,
651                         rcall->tag);
652
653                 req = NULL;
654                 spin_lock(&m->lock);
655                 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
656                         if (rreq->tag == rcall->tag) {
657                                 req = rreq;
658                                 req->rcall = rcall;
659                                 list_del(&req->req_list);
660                                 spin_unlock(&m->lock);
661                                 process_request(m, req);
662                                 break;
663                         }
664
665                 }
666
667                 if (!req) {
668                         spin_unlock(&m->lock);
669                         if (err >= 0 && rcall->id != RFLUSH)
670                                 dprintk(DEBUG_ERROR,
671                                         "unexpected response mux %p id %d tag %d\n",
672                                         m, rcall->id, rcall->tag);
673                         kfree(rcall);
674                 }
675         }
676
677         if (!list_empty(&m->req_list)) {
678                 if (test_and_clear_bit(Rpending, &m->wsched))
679                         n = POLLIN;
680                 else
681                         n = m->trans->poll(m->trans, NULL);
682
683                 if (n & POLLIN) {
684                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
685                         queue_work(v9fs_mux_wq, &m->rq);
686                 } else
687                         clear_bit(Rworksched, &m->wsched);
688         } else
689                 clear_bit(Rworksched, &m->wsched);
690
691         return;
692
693       error:
694         v9fs_mux_cancel(m, err);
695         clear_bit(Rworksched, &m->wsched);
696 }
697
698 /**
699  * v9fs_send_request - send 9P request
700  * The function can sleep until the request is scheduled for sending.
701  * The function can be interrupted. Return from the function is not
702  * a guarantee that the request is sent succesfully. Can return errors
703  * that can be retrieved by PTR_ERR macros.
704  *
705  * @m: mux data
706  * @tc: request to be sent
707  * @cb: callback function to call when response is received
708  * @cba: parameter to pass to the callback function
709  */
710 static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
711                                           struct v9fs_fcall *tc,
712                                           v9fs_mux_req_callback cb, void *cba)
713 {
714         int n;
715         struct v9fs_req *req;
716
717         dprintk(DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
718                 tc, tc->id);
719         if (m->err < 0)
720                 return ERR_PTR(m->err);
721
722         req = kmalloc(sizeof(struct v9fs_req), GFP_KERNEL);
723         if (!req)
724                 return ERR_PTR(-ENOMEM);
725
726         if (tc->id == TVERSION)
727                 n = V9FS_NOTAG;
728         else
729                 n = v9fs_mux_get_tag(m);
730
731         if (n < 0)
732                 return ERR_PTR(-ENOMEM);
733
734         v9fs_set_tag(tc, n);
735
736         req->tag = n;
737         req->tcall = tc;
738         req->rcall = NULL;
739         req->err = 0;
740         req->cb = cb;
741         req->cba = cba;
742
743         spin_lock(&m->lock);
744         list_add_tail(&req->req_list, &m->unsent_req_list);
745         spin_unlock(&m->lock);
746
747         if (test_and_clear_bit(Wpending, &m->wsched))
748                 n = POLLOUT;
749         else
750                 n = m->trans->poll(m->trans, NULL);
751
752         if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
753                 queue_work(v9fs_mux_wq, &m->wq);
754
755         return req;
756 }
757
758 static inline void
759 v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc,
760                   int err)
761 {
762         v9fs_mux_req_callback cb;
763         int tag;
764         struct v9fs_mux_data *m;
765         struct v9fs_req *req, *rptr;
766
767         m = a;
768         dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
769                 rc, err, tc->params.tflush.oldtag);
770
771         spin_lock(&m->lock);
772         cb = NULL;
773         tag = tc->params.tflush.oldtag;
774         list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
775                 if (req->tag == tag) {
776                         list_del(&req->req_list);
777                         if (req->cb) {
778                                 cb = req->cb;
779                                 req->cb = NULL;
780                                 spin_unlock(&m->lock);
781                                 (*cb) (req->cba, req->tcall, req->rcall,
782                                        req->err);
783                         }
784                         kfree(req);
785                         wake_up(&m->equeue);
786                         break;
787                 }
788         }
789
790         if (!cb)
791                 spin_unlock(&m->lock);
792
793         v9fs_mux_put_tag(m, tag);
794         kfree(tc);
795         kfree(rc);
796 }
797
798 static void
799 v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
800 {
801         struct v9fs_fcall *fc;
802
803         dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
804
805         fc = v9fs_create_tflush(req->tag);
806         v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
807 }
808
809 static void
810 v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
811 {
812         struct v9fs_mux_rpc *r;
813
814         if (err == ERREQFLUSH) {
815                 dprintk(DEBUG_MUX, "err req flush\n");
816                 return;
817         }
818
819         r = a;
820         dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
821                 tc, rc, err);
822         r->rcall = rc;
823         r->err = err;
824         wake_up(&r->wqueue);
825 }
826
827 /**
828  * v9fs_mux_rpc - sends 9P request and waits until a response is available.
829  *      The function can be interrupted.
830  * @m: mux data
831  * @tc: request to be sent
832  * @rc: pointer where a pointer to the response is stored
833  */
834 int
835 v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
836              struct v9fs_fcall **rc)
837 {
838         int err;
839         unsigned long flags;
840         struct v9fs_req *req;
841         struct v9fs_mux_rpc r;
842
843         r.err = 0;
844         r.rcall = NULL;
845         r.m = m;
846         init_waitqueue_head(&r.wqueue);
847
848         if (rc)
849                 *rc = NULL;
850
851         req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
852         if (IS_ERR(req)) {
853                 err = PTR_ERR(req);
854                 dprintk(DEBUG_MUX, "error %d\n", err);
855                 return PTR_ERR(req);
856         }
857
858         r.req = req;
859         dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
860                 req->tag, &r, req);
861         err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
862         if (r.err < 0)
863                 err = r.err;
864
865         if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
866                 spin_lock(&m->lock);
867                 req->tcall = NULL;
868                 req->err = ERREQFLUSH;
869                 spin_unlock(&m->lock);
870
871                 clear_thread_flag(TIF_SIGPENDING);
872                 v9fs_mux_flush_request(m, req);
873                 spin_lock_irqsave(&current->sighand->siglock, flags);
874                 recalc_sigpending();
875                 spin_unlock_irqrestore(&current->sighand->siglock, flags);
876         }
877
878         if (!err) {
879                 if (r.rcall)
880                         dprintk(DEBUG_MUX, "got response id %d tag %d\n",
881                                 r.rcall->id, r.rcall->tag);
882
883                 if (rc)
884                         *rc = r.rcall;
885                 else
886                         kfree(r.rcall);
887         } else {
888                 kfree(r.rcall);
889                 dprintk(DEBUG_MUX, "got error %d\n", err);
890                 if (err > 0)
891                         err = -EIO;
892         }
893
894         return err;
895 }
896
897 /**
898  * v9fs_mux_rpcnb - sends 9P request without waiting for response.
899  * @m: mux data
900  * @tc: request to be sent
901  * @cb: callback function to be called when response arrives
902  * @cba: value to pass to the callback function
903  */
904 int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
905                    v9fs_mux_req_callback cb, void *a)
906 {
907         int err;
908         struct v9fs_req *req;
909
910         req = v9fs_send_request(m, tc, cb, a);
911         if (IS_ERR(req)) {
912                 err = PTR_ERR(req);
913                 dprintk(DEBUG_MUX, "error %d\n", err);
914                 return PTR_ERR(req);
915         }
916
917         dprintk(DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
918         return 0;
919 }
920
921 /**
922  * v9fs_mux_cancel - cancel all pending requests with error
923  * @m: mux data
924  * @err: error code
925  */
926 void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
927 {
928         struct v9fs_req *req, *rtmp;
929         LIST_HEAD(cancel_list);
930
931         dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
932         m->err = err;
933         spin_lock(&m->lock);
934         list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
935                 list_move(&req->req_list, &cancel_list);
936         }
937         spin_unlock(&m->lock);
938
939         list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
940                 list_del(&req->req_list);
941                 if (!req->err)
942                         req->err = err;
943
944                 if (req->cb)
945                         (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
946                 else
947                         kfree(req->rcall);
948
949                 kfree(req);
950         }
951
952         wake_up(&m->equeue);
953 }
954
955 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *m)
956 {
957         int tag;
958
959         tag = v9fs_get_idpool(&m->tidpool);
960         if (tag < 0)
961                 return V9FS_NOTAG;
962         else
963                 return (u16) tag;
964 }
965
966 static void v9fs_mux_put_tag(struct v9fs_mux_data *m, u16 tag)
967 {
968         if (tag != V9FS_NOTAG && v9fs_check_idpool(tag, &m->tidpool))
969                 v9fs_put_idpool(tag, &m->tidpool);
970 }