[PATCH] v9fs: rename tids to tags to be consistent with Plan 9 documentation
[safe/jmp/linux-2.6] / fs / 9p / mux.c
1 /*
2  * linux/fs/9p/mux.c
3  *
4  * Protocol Multiplexer
5  *
6  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License as published by
11  *  the Free Software Foundation; either version 2 of the License, or
12  *  (at your option) any later version.
13  *
14  *  This program is distributed in the hope that it will be useful,
15  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *  GNU General Public License for more details.
18  *
19  *  You should have received a copy of the GNU General Public License
20  *  along with this program; if not, write to:
21  *  Free Software Foundation
22  *  51 Franklin Street, Fifth Floor
23  *  Boston, MA  02111-1301  USA
24  *
25  */
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/fs.h>
31 #include <linux/poll.h>
32 #include <linux/kthread.h>
33 #include <linux/idr.h>
34 #include <linux/mutex.h>
35
36 #include "debug.h"
37 #include "v9fs.h"
38 #include "9p.h"
39 #include "conv.h"
40 #include "transport.h"
41 #include "mux.h"
42
43 #define ERREQFLUSH      1
44 #define SCHED_TIMEOUT   10
45 #define MAXPOLLWADDR    2
46
47 enum {
48         Rworksched = 1,         /* read work scheduled or running */
49         Rpending = 2,           /* can read */
50         Wworksched = 4,         /* write work scheduled or running */
51         Wpending = 8,           /* can write */
52 };
53
54 struct v9fs_mux_poll_task;
55
56 struct v9fs_req {
57         int tag;
58         struct v9fs_fcall *tcall;
59         struct v9fs_fcall *rcall;
60         int err;
61         v9fs_mux_req_callback cb;
62         void *cba;
63         struct list_head req_list;
64 };
65
66 struct v9fs_mux_data {
67         spinlock_t lock;
68         struct list_head mux_list;
69         struct v9fs_mux_poll_task *poll_task;
70         int msize;
71         unsigned char *extended;
72         struct v9fs_transport *trans;
73         struct v9fs_idpool tagpool;
74         int err;
75         wait_queue_head_t equeue;
76         struct list_head req_list;
77         struct list_head unsent_req_list;
78         struct v9fs_fcall *rcall;
79         int rpos;
80         char *rbuf;
81         int wpos;
82         int wsize;
83         char *wbuf;
84         wait_queue_t poll_wait[MAXPOLLWADDR];
85         wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
86         poll_table pt;
87         struct work_struct rq;
88         struct work_struct wq;
89         unsigned long wsched;
90 };
91
92 struct v9fs_mux_poll_task {
93         struct task_struct *task;
94         struct list_head mux_list;
95         int muxnum;
96 };
97
98 struct v9fs_mux_rpc {
99         struct v9fs_mux_data *m;
100         struct v9fs_req *req;
101         int err;
102         struct v9fs_fcall *rcall;
103         wait_queue_head_t wqueue;
104 };
105
106 static int v9fs_poll_proc(void *);
107 static void v9fs_read_work(void *);
108 static void v9fs_write_work(void *);
109 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
110                           poll_table * p);
111 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
112 static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
113
114 static DEFINE_MUTEX(v9fs_mux_task_lock);
115 static struct workqueue_struct *v9fs_mux_wq;
116
117 static int v9fs_mux_num;
118 static int v9fs_mux_poll_task_num;
119 static struct v9fs_mux_poll_task v9fs_mux_poll_tasks[100];
120
121 int v9fs_mux_global_init(void)
122 {
123         int i;
124
125         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++)
126                 v9fs_mux_poll_tasks[i].task = NULL;
127
128         v9fs_mux_wq = create_workqueue("v9fs");
129         if (!v9fs_mux_wq)
130                 return -ENOMEM;
131
132         return 0;
133 }
134
135 void v9fs_mux_global_exit(void)
136 {
137         destroy_workqueue(v9fs_mux_wq);
138 }
139
140 /**
141  * v9fs_mux_calc_poll_procs - calculates the number of polling procs
142  * based on the number of mounted v9fs filesystems.
143  *
144  * The current implementation returns sqrt of the number of mounts.
145  */
146 static int v9fs_mux_calc_poll_procs(int muxnum)
147 {
148         int n;
149
150         if (v9fs_mux_poll_task_num)
151                 n = muxnum / v9fs_mux_poll_task_num +
152                     (muxnum % v9fs_mux_poll_task_num ? 1 : 0);
153         else
154                 n = 1;
155
156         if (n > ARRAY_SIZE(v9fs_mux_poll_tasks))
157                 n = ARRAY_SIZE(v9fs_mux_poll_tasks);
158
159         return n;
160 }
161
162 static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
163 {
164         int i, n;
165         struct v9fs_mux_poll_task *vpt, *vptlast;
166         struct task_struct *pproc;
167
168         dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
169                 v9fs_mux_poll_task_num);
170         mutex_lock(&v9fs_mux_task_lock);
171
172         n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
173         if (n > v9fs_mux_poll_task_num) {
174                 for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
175                         if (v9fs_mux_poll_tasks[i].task == NULL) {
176                                 vpt = &v9fs_mux_poll_tasks[i];
177                                 dprintk(DEBUG_MUX, "create proc %p\n", vpt);
178                                 pproc = kthread_create(v9fs_poll_proc, vpt,
179                                                    "v9fs-poll");
180
181                                 if (!IS_ERR(pproc)) {
182                                         vpt->task = pproc;
183                                         INIT_LIST_HEAD(&vpt->mux_list);
184                                         vpt->muxnum = 0;
185                                         v9fs_mux_poll_task_num++;
186                                         wake_up_process(vpt->task);
187                                 }
188                                 break;
189                         }
190                 }
191
192                 if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks))
193                         dprintk(DEBUG_ERROR, "warning: no free poll slots\n");
194         }
195
196         n = (v9fs_mux_num + 1) / v9fs_mux_poll_task_num +
197             ((v9fs_mux_num + 1) % v9fs_mux_poll_task_num ? 1 : 0);
198
199         vptlast = NULL;
200         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
201                 vpt = &v9fs_mux_poll_tasks[i];
202                 if (vpt->task != NULL) {
203                         vptlast = vpt;
204                         if (vpt->muxnum < n) {
205                                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
206                                 list_add(&m->mux_list, &vpt->mux_list);
207                                 vpt->muxnum++;
208                                 m->poll_task = vpt;
209                                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
210                                 init_poll_funcptr(&m->pt, v9fs_pollwait);
211                                 break;
212                         }
213                 }
214         }
215
216         if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) {
217                 if (vptlast == NULL)
218                         return -ENOMEM;
219
220                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
221                 list_add(&m->mux_list, &vptlast->mux_list);
222                 vptlast->muxnum++;
223                 m->poll_task = vptlast;
224                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
225                 init_poll_funcptr(&m->pt, v9fs_pollwait);
226         }
227
228         v9fs_mux_num++;
229         mutex_unlock(&v9fs_mux_task_lock);
230
231         return 0;
232 }
233
234 static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
235 {
236         int i;
237         struct v9fs_mux_poll_task *vpt;
238
239         mutex_lock(&v9fs_mux_task_lock);
240         vpt = m->poll_task;
241         list_del(&m->mux_list);
242         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
243                 if (m->poll_waddr[i] != NULL) {
244                         remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
245                         m->poll_waddr[i] = NULL;
246                 }
247         }
248         vpt->muxnum--;
249         if (!vpt->muxnum) {
250                 dprintk(DEBUG_MUX, "destroy proc %p\n", vpt);
251                 send_sig(SIGKILL, vpt->task, 1);
252                 vpt->task = NULL;
253                 v9fs_mux_poll_task_num--;
254         }
255         v9fs_mux_num--;
256         mutex_unlock(&v9fs_mux_task_lock);
257 }
258
259 /**
260  * v9fs_mux_init - allocate and initialize the per-session mux data
261  * Creates the polling task if this is the first session.
262  *
263  * @trans - transport structure
264  * @msize - maximum message size
265  * @extended - pointer to the extended flag
266  */
267 struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
268                                     unsigned char *extended)
269 {
270         int i, n;
271         struct v9fs_mux_data *m, *mtmp;
272
273         dprintk(DEBUG_MUX, "transport %p msize %d\n", trans, msize);
274         m = kmalloc(sizeof(struct v9fs_mux_data), GFP_KERNEL);
275         if (!m)
276                 return ERR_PTR(-ENOMEM);
277
278         spin_lock_init(&m->lock);
279         INIT_LIST_HEAD(&m->mux_list);
280         m->msize = msize;
281         m->extended = extended;
282         m->trans = trans;
283         idr_init(&m->tagpool.pool);
284         init_MUTEX(&m->tagpool.lock);
285         m->err = 0;
286         init_waitqueue_head(&m->equeue);
287         INIT_LIST_HEAD(&m->req_list);
288         INIT_LIST_HEAD(&m->unsent_req_list);
289         m->rcall = NULL;
290         m->rpos = 0;
291         m->rbuf = NULL;
292         m->wpos = m->wsize = 0;
293         m->wbuf = NULL;
294         INIT_WORK(&m->rq, v9fs_read_work, m);
295         INIT_WORK(&m->wq, v9fs_write_work, m);
296         m->wsched = 0;
297         memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
298         m->poll_task = NULL;
299         n = v9fs_mux_poll_start(m);
300         if (n)
301                 return ERR_PTR(n);
302
303         n = trans->poll(trans, &m->pt);
304         if (n & POLLIN) {
305                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
306                 set_bit(Rpending, &m->wsched);
307         }
308
309         if (n & POLLOUT) {
310                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
311                 set_bit(Wpending, &m->wsched);
312         }
313
314         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
315                 if (IS_ERR(m->poll_waddr[i])) {
316                         v9fs_mux_poll_stop(m);
317                         mtmp = (void *)m->poll_waddr;   /* the error code */
318                         kfree(m);
319                         m = mtmp;
320                         break;
321                 }
322         }
323
324         return m;
325 }
326
327 /**
328  * v9fs_mux_destroy - cancels all pending requests and frees mux resources
329  */
330 void v9fs_mux_destroy(struct v9fs_mux_data *m)
331 {
332         dprintk(DEBUG_MUX, "mux %p prev %p next %p\n", m,
333                 m->mux_list.prev, m->mux_list.next);
334         v9fs_mux_cancel(m, -ECONNRESET);
335
336         if (!list_empty(&m->req_list)) {
337                 /* wait until all processes waiting on this session exit */
338                 dprintk(DEBUG_MUX, "mux %p waiting for empty request queue\n",
339                         m);
340                 wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
341                 dprintk(DEBUG_MUX, "mux %p request queue empty: %d\n", m,
342                         list_empty(&m->req_list));
343         }
344
345         v9fs_mux_poll_stop(m);
346         m->trans = NULL;
347
348         kfree(m);
349 }
350
351 /**
352  * v9fs_pollwait - called by files poll operation to add v9fs-poll task
353  *      to files wait queue
354  */
355 static void
356 v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
357               poll_table * p)
358 {
359         int i;
360         struct v9fs_mux_data *m;
361
362         m = container_of(p, struct v9fs_mux_data, pt);
363         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
364                 if (m->poll_waddr[i] == NULL)
365                         break;
366
367         if (i >= ARRAY_SIZE(m->poll_waddr)) {
368                 dprintk(DEBUG_ERROR, "not enough wait_address slots\n");
369                 return;
370         }
371
372         m->poll_waddr[i] = wait_address;
373
374         if (!wait_address) {
375                 dprintk(DEBUG_ERROR, "no wait_address\n");
376                 m->poll_waddr[i] = ERR_PTR(-EIO);
377                 return;
378         }
379
380         init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
381         add_wait_queue(wait_address, &m->poll_wait[i]);
382 }
383
384 /**
385  * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
386  */
387 static void v9fs_poll_mux(struct v9fs_mux_data *m)
388 {
389         int n;
390
391         if (m->err < 0)
392                 return;
393
394         n = m->trans->poll(m->trans, NULL);
395         if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
396                 dprintk(DEBUG_MUX, "error mux %p err %d\n", m, n);
397                 if (n >= 0)
398                         n = -ECONNRESET;
399                 v9fs_mux_cancel(m, n);
400         }
401
402         if (n & POLLIN) {
403                 set_bit(Rpending, &m->wsched);
404                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
405                 if (!test_and_set_bit(Rworksched, &m->wsched)) {
406                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
407                         queue_work(v9fs_mux_wq, &m->rq);
408                 }
409         }
410
411         if (n & POLLOUT) {
412                 set_bit(Wpending, &m->wsched);
413                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
414                 if ((m->wsize || !list_empty(&m->unsent_req_list))
415                     && !test_and_set_bit(Wworksched, &m->wsched)) {
416                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
417                         queue_work(v9fs_mux_wq, &m->wq);
418                 }
419         }
420 }
421
422 /**
423  * v9fs_poll_proc - polls all v9fs transports for new events and queues
424  *      the appropriate work to the work queue
425  */
426 static int v9fs_poll_proc(void *a)
427 {
428         struct v9fs_mux_data *m, *mtmp;
429         struct v9fs_mux_poll_task *vpt;
430
431         vpt = a;
432         dprintk(DEBUG_MUX, "start %p %p\n", current, vpt);
433         allow_signal(SIGKILL);
434         while (!kthread_should_stop()) {
435                 set_current_state(TASK_INTERRUPTIBLE);
436                 if (signal_pending(current))
437                         break;
438
439                 list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
440                         v9fs_poll_mux(m);
441                 }
442
443                 dprintk(DEBUG_MUX, "sleeping...\n");
444                 schedule_timeout(SCHED_TIMEOUT * HZ);
445         }
446
447         __set_current_state(TASK_RUNNING);
448         dprintk(DEBUG_MUX, "finish\n");
449         return 0;
450 }
451
452 /**
453  * v9fs_write_work - called when a transport can send some data
454  */
455 static void v9fs_write_work(void *a)
456 {
457         int n, err;
458         struct v9fs_mux_data *m;
459         struct v9fs_req *req;
460
461         m = a;
462
463         if (m->err < 0) {
464                 clear_bit(Wworksched, &m->wsched);
465                 return;
466         }
467
468         if (!m->wsize) {
469                 if (list_empty(&m->unsent_req_list)) {
470                         clear_bit(Wworksched, &m->wsched);
471                         return;
472                 }
473
474                 spin_lock(&m->lock);
475 again:
476                 req = list_entry(m->unsent_req_list.next, struct v9fs_req,
477                                req_list);
478                 list_move_tail(&req->req_list, &m->req_list);
479                 if (req->err == ERREQFLUSH)
480                         goto again;
481
482                 m->wbuf = req->tcall->sdata;
483                 m->wsize = req->tcall->size;
484                 m->wpos = 0;
485                 dump_data(m->wbuf, m->wsize);
486                 spin_unlock(&m->lock);
487         }
488
489         dprintk(DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, m->wsize);
490         clear_bit(Wpending, &m->wsched);
491         err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
492         dprintk(DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
493         if (err == -EAGAIN) {
494                 clear_bit(Wworksched, &m->wsched);
495                 return;
496         }
497
498         if (err <= 0)
499                 goto error;
500
501         m->wpos += err;
502         if (m->wpos == m->wsize)
503                 m->wpos = m->wsize = 0;
504
505         if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
506                 if (test_and_clear_bit(Wpending, &m->wsched))
507                         n = POLLOUT;
508                 else
509                         n = m->trans->poll(m->trans, NULL);
510
511                 if (n & POLLOUT) {
512                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
513                         queue_work(v9fs_mux_wq, &m->wq);
514                 } else
515                         clear_bit(Wworksched, &m->wsched);
516         } else
517                 clear_bit(Wworksched, &m->wsched);
518
519         return;
520
521       error:
522         v9fs_mux_cancel(m, err);
523         clear_bit(Wworksched, &m->wsched);
524 }
525
526 static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
527 {
528         int ecode, tag;
529         struct v9fs_str *ename;
530
531         tag = req->tag;
532         if (!req->err && req->rcall->id == RERROR) {
533                 ecode = req->rcall->params.rerror.errno;
534                 ename = &req->rcall->params.rerror.error;
535
536                 dprintk(DEBUG_MUX, "Rerror %.*s\n", ename->len, ename->str);
537
538                 if (*m->extended)
539                         req->err = -ecode;
540
541                 if (!req->err) {
542                         req->err = v9fs_errstr2errno(ename->str, ename->len);
543
544                         if (!req->err) {        /* string match failed */
545                                 PRINT_FCALL_ERROR("unknown error", req->rcall);
546                         }
547
548                         if (!req->err)
549                                 req->err = -ESERVERFAULT;
550                 }
551         } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
552                 dprintk(DEBUG_ERROR, "fcall mismatch: expected %d, got %d\n",
553                         req->tcall->id + 1, req->rcall->id);
554                 if (!req->err)
555                         req->err = -EIO;
556         }
557
558         if (req->err == ERREQFLUSH)
559                 return;
560
561         if (req->cb) {
562                 dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
563                         req->tcall, req->rcall);
564
565                 (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
566                 req->cb = NULL;
567         } else
568                 kfree(req->rcall);
569
570         v9fs_mux_put_tag(m, tag);
571
572         wake_up(&m->equeue);
573         kfree(req);
574 }
575
576 /**
577  * v9fs_read_work - called when there is some data to be read from a transport
578  */
579 static void v9fs_read_work(void *a)
580 {
581         int n, err;
582         struct v9fs_mux_data *m;
583         struct v9fs_req *req, *rptr, *rreq;
584         struct v9fs_fcall *rcall;
585         char *rbuf;
586
587         m = a;
588
589         if (m->err < 0)
590                 return;
591
592         rcall = NULL;
593         dprintk(DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
594
595         if (!m->rcall) {
596                 m->rcall =
597                     kmalloc(sizeof(struct v9fs_fcall) + m->msize, GFP_KERNEL);
598                 if (!m->rcall) {
599                         err = -ENOMEM;
600                         goto error;
601                 }
602
603                 m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
604                 m->rpos = 0;
605         }
606
607         clear_bit(Rpending, &m->wsched);
608         err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
609         dprintk(DEBUG_MUX, "mux %p got %d bytes\n", m, err);
610         if (err == -EAGAIN) {
611                 clear_bit(Rworksched, &m->wsched);
612                 return;
613         }
614
615         if (err <= 0)
616                 goto error;
617
618         m->rpos += err;
619         while (m->rpos > 4) {
620                 n = le32_to_cpu(*(__le32 *) m->rbuf);
621                 if (n >= m->msize) {
622                         dprintk(DEBUG_ERROR,
623                                 "requested packet size too big: %d\n", n);
624                         err = -EIO;
625                         goto error;
626                 }
627
628                 if (m->rpos < n)
629                         break;
630
631                 dump_data(m->rbuf, n);
632                 err =
633                     v9fs_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
634                 if (err < 0) {
635                         goto error;
636                 }
637
638                 rcall = m->rcall;
639                 rbuf = m->rbuf;
640                 if (m->rpos > n) {
641                         m->rcall = kmalloc(sizeof(struct v9fs_fcall) + m->msize,
642                                            GFP_KERNEL);
643                         if (!m->rcall) {
644                                 err = -ENOMEM;
645                                 goto error;
646                         }
647
648                         m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
649                         memmove(m->rbuf, rbuf + n, m->rpos - n);
650                         m->rpos -= n;
651                 } else {
652                         m->rcall = NULL;
653                         m->rbuf = NULL;
654                         m->rpos = 0;
655                 }
656
657                 dprintk(DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, rcall->id,
658                         rcall->tag);
659
660                 req = NULL;
661                 spin_lock(&m->lock);
662                 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
663                         if (rreq->tag == rcall->tag) {
664                                 req = rreq;
665                                 req->rcall = rcall;
666                                 list_del(&req->req_list);
667                                 spin_unlock(&m->lock);
668                                 process_request(m, req);
669                                 break;
670                         }
671
672                 }
673
674                 if (!req) {
675                         spin_unlock(&m->lock);
676                         if (err >= 0 && rcall->id != RFLUSH)
677                                 dprintk(DEBUG_ERROR,
678                                         "unexpected response mux %p id %d tag %d\n",
679                                         m, rcall->id, rcall->tag);
680                         kfree(rcall);
681                 }
682         }
683
684         if (!list_empty(&m->req_list)) {
685                 if (test_and_clear_bit(Rpending, &m->wsched))
686                         n = POLLIN;
687                 else
688                         n = m->trans->poll(m->trans, NULL);
689
690                 if (n & POLLIN) {
691                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
692                         queue_work(v9fs_mux_wq, &m->rq);
693                 } else
694                         clear_bit(Rworksched, &m->wsched);
695         } else
696                 clear_bit(Rworksched, &m->wsched);
697
698         return;
699
700       error:
701         v9fs_mux_cancel(m, err);
702         clear_bit(Rworksched, &m->wsched);
703 }
704
705 /**
706  * v9fs_send_request - send 9P request
707  * The function can sleep until the request is scheduled for sending.
708  * The function can be interrupted. Return from the function is not
709  * a guarantee that the request is sent succesfully. Can return errors
710  * that can be retrieved by PTR_ERR macros.
711  *
712  * @m: mux data
713  * @tc: request to be sent
714  * @cb: callback function to call when response is received
715  * @cba: parameter to pass to the callback function
716  */
717 static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
718                                           struct v9fs_fcall *tc,
719                                           v9fs_mux_req_callback cb, void *cba)
720 {
721         int n;
722         struct v9fs_req *req;
723
724         dprintk(DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
725                 tc, tc->id);
726         if (m->err < 0)
727                 return ERR_PTR(m->err);
728
729         req = kmalloc(sizeof(struct v9fs_req), GFP_KERNEL);
730         if (!req)
731                 return ERR_PTR(-ENOMEM);
732
733         if (tc->id == TVERSION)
734                 n = V9FS_NOTAG;
735         else
736                 n = v9fs_mux_get_tag(m);
737
738         if (n < 0)
739                 return ERR_PTR(-ENOMEM);
740
741         v9fs_set_tag(tc, n);
742
743         req->tag = n;
744         req->tcall = tc;
745         req->rcall = NULL;
746         req->err = 0;
747         req->cb = cb;
748         req->cba = cba;
749
750         spin_lock(&m->lock);
751         list_add_tail(&req->req_list, &m->unsent_req_list);
752         spin_unlock(&m->lock);
753
754         if (test_and_clear_bit(Wpending, &m->wsched))
755                 n = POLLOUT;
756         else
757                 n = m->trans->poll(m->trans, NULL);
758
759         if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
760                 queue_work(v9fs_mux_wq, &m->wq);
761
762         return req;
763 }
764
765 static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc,
766                               struct v9fs_fcall *rc, int err)
767 {
768         v9fs_mux_req_callback cb;
769         int tag;
770         struct v9fs_mux_data *m;
771         struct v9fs_req *req, *rptr;
772
773         m = a;
774         dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
775                 rc, err, tc->params.tflush.oldtag);
776
777         spin_lock(&m->lock);
778         cb = NULL;
779         tag = tc->params.tflush.oldtag;
780         list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
781                 if (req->tag == tag) {
782                         list_del(&req->req_list);
783                         if (req->cb) {
784                                 cb = req->cb;
785                                 req->cb = NULL;
786                                 spin_unlock(&m->lock);
787                                 (*cb) (req->cba, req->tcall, req->rcall,
788                                        req->err);
789                         }
790                         kfree(req);
791                         wake_up(&m->equeue);
792                         break;
793                 }
794         }
795
796         if (!cb)
797                 spin_unlock(&m->lock);
798
799         v9fs_mux_put_tag(m, tag);
800         kfree(tc);
801         kfree(rc);
802 }
803
804 static void
805 v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
806 {
807         struct v9fs_fcall *fc;
808
809         dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
810
811         fc = v9fs_create_tflush(req->tag);
812         v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
813 }
814
815 static void
816 v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
817 {
818         struct v9fs_mux_rpc *r;
819
820         if (err == ERREQFLUSH) {
821                 kfree(rc);
822                 dprintk(DEBUG_MUX, "err req flush\n");
823                 return;
824         }
825
826         r = a;
827         dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
828                 tc, rc, err);
829         r->rcall = rc;
830         r->err = err;
831         wake_up(&r->wqueue);
832 }
833
834 /**
835  * v9fs_mux_rpc - sends 9P request and waits until a response is available.
836  *      The function can be interrupted.
837  * @m: mux data
838  * @tc: request to be sent
839  * @rc: pointer where a pointer to the response is stored
840  */
841 int
842 v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
843              struct v9fs_fcall **rc)
844 {
845         int err;
846         unsigned long flags;
847         struct v9fs_req *req;
848         struct v9fs_mux_rpc r;
849
850         r.err = 0;
851         r.rcall = NULL;
852         r.m = m;
853         init_waitqueue_head(&r.wqueue);
854
855         if (rc)
856                 *rc = NULL;
857
858         req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
859         if (IS_ERR(req)) {
860                 err = PTR_ERR(req);
861                 dprintk(DEBUG_MUX, "error %d\n", err);
862                 return PTR_ERR(req);
863         }
864
865         r.req = req;
866         dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
867                 req->tag, &r, req);
868         err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
869         if (r.err < 0)
870                 err = r.err;
871
872         if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
873                 spin_lock(&m->lock);
874                 req->tcall = NULL;
875                 req->err = ERREQFLUSH;
876                 spin_unlock(&m->lock);
877
878                 clear_thread_flag(TIF_SIGPENDING);
879                 v9fs_mux_flush_request(m, req);
880                 spin_lock_irqsave(&current->sighand->siglock, flags);
881                 recalc_sigpending();
882                 spin_unlock_irqrestore(&current->sighand->siglock, flags);
883         }
884
885         if (!err) {
886                 if (r.rcall)
887                         dprintk(DEBUG_MUX, "got response id %d tag %d\n",
888                                 r.rcall->id, r.rcall->tag);
889
890                 if (rc)
891                         *rc = r.rcall;
892                 else
893                         kfree(r.rcall);
894         } else {
895                 kfree(r.rcall);
896                 dprintk(DEBUG_MUX, "got error %d\n", err);
897                 if (err > 0)
898                         err = -EIO;
899         }
900
901         return err;
902 }
903
904 #if 0
905 /**
906  * v9fs_mux_rpcnb - sends 9P request without waiting for response.
907  * @m: mux data
908  * @tc: request to be sent
909  * @cb: callback function to be called when response arrives
910  * @cba: value to pass to the callback function
911  */
912 int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
913                    v9fs_mux_req_callback cb, void *a)
914 {
915         int err;
916         struct v9fs_req *req;
917
918         req = v9fs_send_request(m, tc, cb, a);
919         if (IS_ERR(req)) {
920                 err = PTR_ERR(req);
921                 dprintk(DEBUG_MUX, "error %d\n", err);
922                 return PTR_ERR(req);
923         }
924
925         dprintk(DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
926         return 0;
927 }
928 #endif  /*  0  */
929
930 /**
931  * v9fs_mux_cancel - cancel all pending requests with error
932  * @m: mux data
933  * @err: error code
934  */
935 void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
936 {
937         struct v9fs_req *req, *rtmp;
938         LIST_HEAD(cancel_list);
939
940         dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
941         m->err = err;
942         spin_lock(&m->lock);
943         list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
944                 list_move(&req->req_list, &cancel_list);
945         }
946         spin_unlock(&m->lock);
947
948         list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
949                 list_del(&req->req_list);
950                 if (!req->err)
951                         req->err = err;
952
953                 if (req->cb)
954                         (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
955                 else
956                         kfree(req->rcall);
957
958                 kfree(req);
959         }
960
961         wake_up(&m->equeue);
962 }
963
964 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *m)
965 {
966         int tag;
967
968         tag = v9fs_get_idpool(&m->tagpool);
969         if (tag < 0)
970                 return V9FS_NOTAG;
971         else
972                 return (u16) tag;
973 }
974
975 static void v9fs_mux_put_tag(struct v9fs_mux_data *m, u16 tag)
976 {
977         if (tag != V9FS_NOTAG && v9fs_check_idpool(tag, &m->tagpool))
978                 v9fs_put_idpool(tag, &m->tagpool);
979 }