180163b3e8f9d58f2ea45059b005803e97e768f0
[safe/jmp/linux-2.6] / net / 9p / trans_fd.c
1 /*
2  * linux/fs/9p/trans_fd.c
3  *
4  * Fd transport layer.  Includes deprecated socket layer.
5  *
6  *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9  *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License version 2
13  *  as published by the Free Software Foundation.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to:
22  *  Free Software Foundation
23  *  51 Franklin Street, Fifth Floor
24  *  Boston, MA  02111-1301  USA
25  *
26  */
27
28 #include <linux/in.h>
29 #include <linux/module.h>
30 #include <linux/net.h>
31 #include <linux/ipv6.h>
32 #include <linux/kthread.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/un.h>
36 #include <linux/uaccess.h>
37 #include <linux/inet.h>
38 #include <linux/idr.h>
39 #include <linux/file.h>
40 #include <linux/parser.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/client.h>
43 #include <net/9p/transport.h>
44
45 #define P9_PORT 564
46 #define MAX_SOCK_BUF (64*1024)
47 #define ERREQFLUSH      1
48 #define MAXPOLLWADDR    2
49
50 /**
51  * struct p9_fd_opts - per-transport options
52  * @rfd: file descriptor for reading (trans=fd)
53  * @wfd: file descriptor for writing (trans=fd)
54  * @port: port to connect to (trans=tcp)
55  *
56  */
57
58 struct p9_fd_opts {
59         int rfd;
60         int wfd;
61         u16 port;
62 };
63
64 /**
65  * struct p9_trans_fd - transport state
66  * @rd: reference to file to read from
67  * @wr: reference of file to write to
68  * @conn: connection state reference
69  *
70  */
71
72 struct p9_trans_fd {
73         struct file *rd;
74         struct file *wr;
75         struct p9_conn *conn;
76 };
77
78 /*
79   * Option Parsing (code inspired by NFS code)
80   *  - a little lazy - parse all fd-transport options
81   */
82
83 enum {
84         /* Options that take integer arguments */
85         Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
86 };
87
88 static const match_table_t tokens = {
89         {Opt_port, "port=%u"},
90         {Opt_rfdno, "rfdno=%u"},
91         {Opt_wfdno, "wfdno=%u"},
92         {Opt_err, NULL},
93 };
94
95 enum {
96         Rworksched = 1,         /* read work scheduled or running */
97         Rpending = 2,           /* can read */
98         Wworksched = 4,         /* write work scheduled or running */
99         Wpending = 8,           /* can write */
100 };
101
102 enum {
103         None,
104         Flushing,
105         Flushed,
106 };
107
108 /**
109  * struct p9_req - fd mux encoding of an rpc transaction
110  * @lock: protects req_list
111  * @tag: numeric tag for rpc transaction
112  * @tcall: request &p9_fcall structure
113  * @rcall: response &p9_fcall structure
114  * @err: error state
115  * @flush: flag to indicate RPC has been flushed
116  * @req_list: list link for higher level objects to chain requests
117  * @m: connection this request was issued on
118  * @wqueue: wait queue that client is blocked on for this rpc
119  *
120  */
121
122 struct p9_req {
123         spinlock_t lock;
124         int tag;
125         struct p9_fcall *tcall;
126         struct p9_fcall *rcall;
127         int err;
128         int flush;
129         struct list_head req_list;
130         struct p9_conn *m;
131         wait_queue_head_t wqueue;
132 };
133
134 struct p9_poll_wait {
135         struct p9_conn *conn;
136         wait_queue_t wait;
137         wait_queue_head_t *wait_addr;
138 };
139
140 /**
141  * struct p9_conn - fd mux connection state information
142  * @lock: protects mux_list (?)
143  * @mux_list: list link for mux to manage multiple connections (?)
144  * @client: reference to client instance for this connection
145  * @tagpool: id accounting for transactions
146  * @err: error state
147  * @req_list: accounting for requests which have been sent
148  * @unsent_req_list: accounting for requests that haven't been sent
149  * @rcall: current response &p9_fcall structure
150  * @rpos: read position in current frame
151  * @rbuf: current read buffer
152  * @wpos: write position for current frame
153  * @wsize: amount of data to write for current frame
154  * @wbuf: current write buffer
155  * @poll_wait: array of wait_q's for various worker threads
156  * @poll_waddr: ????
157  * @pt: poll state
158  * @rq: current read work
159  * @wq: current write work
160  * @wsched: ????
161  *
162  */
163
164 struct p9_conn {
165         spinlock_t lock; /* protect lock structure */
166         struct list_head mux_list;
167         struct p9_client *client;
168         struct p9_idpool *tagpool;
169         int err;
170         struct list_head req_list;
171         struct list_head unsent_req_list;
172         struct p9_fcall *rcall;
173         int rpos;
174         char *rbuf;
175         int wpos;
176         int wsize;
177         char *wbuf;
178         struct list_head poll_pending_link;
179         struct p9_poll_wait poll_wait[MAXPOLLWADDR];
180         poll_table pt;
181         struct work_struct rq;
182         struct work_struct wq;
183         unsigned long wsched;
184 };
185
186 static DEFINE_SPINLOCK(p9_poll_lock);
187 static LIST_HEAD(p9_poll_pending_list);
188 static struct workqueue_struct *p9_mux_wq;
189 static struct task_struct *p9_poll_task;
190
191 static u16 p9_mux_get_tag(struct p9_conn *m)
192 {
193         int tag;
194
195         tag = p9_idpool_get(m->tagpool);
196         if (tag < 0)
197                 return P9_NOTAG;
198         else
199                 return (u16) tag;
200 }
201
202 static void p9_mux_put_tag(struct p9_conn *m, u16 tag)
203 {
204         if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool))
205                 p9_idpool_put(tag, m->tagpool);
206 }
207
208 static void p9_mux_poll_stop(struct p9_conn *m)
209 {
210         unsigned long flags;
211         int i;
212
213         for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
214                 struct p9_poll_wait *pwait = &m->poll_wait[i];
215
216                 if (pwait->wait_addr) {
217                         remove_wait_queue(pwait->wait_addr, &pwait->wait);
218                         pwait->wait_addr = NULL;
219                 }
220         }
221
222         spin_lock_irqsave(&p9_poll_lock, flags);
223         list_del_init(&m->poll_pending_link);
224         spin_unlock_irqrestore(&p9_poll_lock, flags);
225 }
226
227 static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
228 {
229         p9_mux_put_tag(m, req->tag);
230         kfree(req);
231 }
232
233 static void p9_conn_rpc_cb(struct p9_req *req);
234
235 static void p9_mux_flush_cb(struct p9_req *freq)
236 {
237         int tag;
238         struct p9_conn *m = freq->m;
239         struct p9_req *req, *rreq, *rptr;
240
241         P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
242                 freq->tcall, freq->rcall, freq->err,
243                 freq->tcall->params.tflush.oldtag);
244
245         spin_lock(&m->lock);
246         tag = freq->tcall->params.tflush.oldtag;
247         req = NULL;
248         list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
249                 if (rreq->tag == tag) {
250                         req = rreq;
251                         list_del(&req->req_list);
252                         break;
253                 }
254         }
255         spin_unlock(&m->lock);
256
257         if (req) {
258                 spin_lock(&req->lock);
259                 req->flush = Flushed;
260                 spin_unlock(&req->lock);
261
262                 p9_conn_rpc_cb(req);
263         }
264
265         kfree(freq->tcall);
266         kfree(freq->rcall);
267         p9_mux_free_request(m, freq);
268 }
269
270 static void p9_conn_rpc_cb(struct p9_req *req)
271 {
272         P9_DPRINTK(P9_DEBUG_MUX, "req %p\n", req);
273
274         if (req->tcall->id == P9_TFLUSH) { /* flush callback */
275                 P9_DPRINTK(P9_DEBUG_MUX, "flush req %p\n", req);
276                 p9_mux_flush_cb(req);
277         } else {                        /* normal wakeup path */
278                 P9_DPRINTK(P9_DEBUG_MUX, "normal req %p\n", req);
279                 if (req->flush != None && !req->err)
280                         req->err = -ERESTARTSYS;
281
282                 wake_up(&req->wqueue);
283         }
284 }
285
286 /**
287  * p9_conn_cancel - cancel all pending requests with error
288  * @m: mux data
289  * @err: error code
290  *
291  */
292
293 void p9_conn_cancel(struct p9_conn *m, int err)
294 {
295         struct p9_req *req, *rtmp;
296         LIST_HEAD(cancel_list);
297
298         P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
299         m->err = err;
300         spin_lock(&m->lock);
301         list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
302                 list_move(&req->req_list, &cancel_list);
303         }
304         list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
305                 list_move(&req->req_list, &cancel_list);
306         }
307         spin_unlock(&m->lock);
308
309         list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
310                 list_del(&req->req_list);
311                 if (!req->err)
312                         req->err = err;
313
314                 p9_conn_rpc_cb(req);
315         }
316 }
317
318 static void process_request(struct p9_conn *m, struct p9_req *req)
319 {
320         int ecode;
321         struct p9_str *ename;
322
323         if (!req->err && req->rcall->id == P9_RERROR) {
324                 ecode = req->rcall->params.rerror.errno;
325                 ename = &req->rcall->params.rerror.error;
326
327                 P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
328                                                                 ename->str);
329
330                 if (m->client->dotu)
331                         req->err = -ecode;
332
333                 if (!req->err) {
334                         req->err = p9_errstr2errno(ename->str, ename->len);
335
336                         /* string match failed */
337                         if (!req->err) {
338                                 PRINT_FCALL_ERROR("unknown error", req->rcall);
339                                 req->err = -ESERVERFAULT;
340                         }
341                 }
342         } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
343                 P9_DPRINTK(P9_DEBUG_ERROR,
344                                 "fcall mismatch: expected %d, got %d\n",
345                                 req->tcall->id + 1, req->rcall->id);
346                 if (!req->err)
347                         req->err = -EIO;
348         }
349 }
350
351 static unsigned int
352 p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
353 {
354         int ret, n;
355         struct p9_trans_fd *ts = NULL;
356
357         if (client && client->status == Connected)
358                 ts = client->trans;
359
360         if (!ts)
361                 return -EREMOTEIO;
362
363         if (!ts->rd->f_op || !ts->rd->f_op->poll)
364                 return -EIO;
365
366         if (!ts->wr->f_op || !ts->wr->f_op->poll)
367                 return -EIO;
368
369         ret = ts->rd->f_op->poll(ts->rd, pt);
370         if (ret < 0)
371                 return ret;
372
373         if (ts->rd != ts->wr) {
374                 n = ts->wr->f_op->poll(ts->wr, pt);
375                 if (n < 0)
376                         return n;
377                 ret = (ret & ~POLLOUT) | (n & ~POLLIN);
378         }
379
380         return ret;
381 }
382
383 /**
384  * p9_fd_read- read from a fd
385  * @client: client instance
386  * @v: buffer to receive data into
387  * @len: size of receive buffer
388  *
389  */
390
391 static int p9_fd_read(struct p9_client *client, void *v, int len)
392 {
393         int ret;
394         struct p9_trans_fd *ts = NULL;
395
396         if (client && client->status != Disconnected)
397                 ts = client->trans;
398
399         if (!ts)
400                 return -EREMOTEIO;
401
402         if (!(ts->rd->f_flags & O_NONBLOCK))
403                 P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
404
405         ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
406         if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
407                 client->status = Disconnected;
408         return ret;
409 }
410
411 /**
412  * p9_read_work - called when there is some data to be read from a transport
413  * @work: container of work to be done
414  *
415  */
416
417 static void p9_read_work(struct work_struct *work)
418 {
419         int n, err;
420         struct p9_conn *m;
421         struct p9_req *req, *rptr, *rreq;
422         struct p9_fcall *rcall;
423         char *rbuf;
424
425         m = container_of(work, struct p9_conn, rq);
426
427         if (m->err < 0)
428                 return;
429
430         rcall = NULL;
431         P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
432
433         if (!m->rcall) {
434                 m->rcall =
435                     kmalloc(sizeof(struct p9_fcall) + m->client->msize,
436                                                                 GFP_KERNEL);
437                 if (!m->rcall) {
438                         err = -ENOMEM;
439                         goto error;
440                 }
441
442                 m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
443                 m->rpos = 0;
444         }
445
446         clear_bit(Rpending, &m->wsched);
447         err = p9_fd_read(m->client, m->rbuf + m->rpos,
448                                                 m->client->msize - m->rpos);
449         P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
450         if (err == -EAGAIN) {
451                 clear_bit(Rworksched, &m->wsched);
452                 return;
453         }
454
455         if (err <= 0)
456                 goto error;
457
458         m->rpos += err;
459         while (m->rpos > 4) {
460                 n = le32_to_cpu(*(__le32 *) m->rbuf);
461                 if (n >= m->client->msize) {
462                         P9_DPRINTK(P9_DEBUG_ERROR,
463                                 "requested packet size too big: %d\n", n);
464                         err = -EIO;
465                         goto error;
466                 }
467
468                 if (m->rpos < n)
469                         break;
470
471                 err =
472                     p9_deserialize_fcall(m->rbuf, n, m->rcall, m->client->dotu);
473                 if (err < 0)
474                         goto error;
475
476 #ifdef CONFIG_NET_9P_DEBUG
477                 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
478                         char buf[150];
479
480                         p9_printfcall(buf, sizeof(buf), m->rcall,
481                                 m->client->dotu);
482                         printk(KERN_NOTICE ">>> %p %s\n", m, buf);
483                 }
484 #endif
485
486                 rcall = m->rcall;
487                 rbuf = m->rbuf;
488                 if (m->rpos > n) {
489                         m->rcall = kmalloc(sizeof(struct p9_fcall) +
490                                                 m->client->msize, GFP_KERNEL);
491                         if (!m->rcall) {
492                                 err = -ENOMEM;
493                                 goto error;
494                         }
495
496                         m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
497                         memmove(m->rbuf, rbuf + n, m->rpos - n);
498                         m->rpos -= n;
499                 } else {
500                         m->rcall = NULL;
501                         m->rbuf = NULL;
502                         m->rpos = 0;
503                 }
504
505                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
506                                                         rcall->id, rcall->tag);
507
508                 req = NULL;
509                 spin_lock(&m->lock);
510                 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
511                         if (rreq->tag == rcall->tag) {
512                                 req = rreq;
513                                 if (req->flush != Flushing)
514                                         list_del(&req->req_list);
515                                 break;
516                         }
517                 }
518                 spin_unlock(&m->lock);
519
520                 if (req) {
521                         req->rcall = rcall;
522                         process_request(m, req);
523
524                         if (req->flush != Flushing)
525                                 p9_conn_rpc_cb(req);
526                 } else {
527                         if (err >= 0 && rcall->id != P9_RFLUSH)
528                                 P9_DPRINTK(P9_DEBUG_ERROR,
529                                   "unexpected response mux %p id %d tag %d\n",
530                                   m, rcall->id, rcall->tag);
531                         kfree(rcall);
532                 }
533         }
534
535         if (!list_empty(&m->req_list)) {
536                 if (test_and_clear_bit(Rpending, &m->wsched))
537                         n = POLLIN;
538                 else
539                         n = p9_fd_poll(m->client, NULL);
540
541                 if (n & POLLIN) {
542                         P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
543                         queue_work(p9_mux_wq, &m->rq);
544                 } else
545                         clear_bit(Rworksched, &m->wsched);
546         } else
547                 clear_bit(Rworksched, &m->wsched);
548
549         return;
550
551 error:
552         p9_conn_cancel(m, err);
553         clear_bit(Rworksched, &m->wsched);
554 }
555
556 /**
557  * p9_fd_write - write to a socket
558  * @client: client instance
559  * @v: buffer to send data from
560  * @len: size of send buffer
561  *
562  */
563
564 static int p9_fd_write(struct p9_client *client, void *v, int len)
565 {
566         int ret;
567         mm_segment_t oldfs;
568         struct p9_trans_fd *ts = NULL;
569
570         if (client && client->status != Disconnected)
571                 ts = client->trans;
572
573         if (!ts)
574                 return -EREMOTEIO;
575
576         if (!(ts->wr->f_flags & O_NONBLOCK))
577                 P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
578
579         oldfs = get_fs();
580         set_fs(get_ds());
581         /* The cast to a user pointer is valid due to the set_fs() */
582         ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos);
583         set_fs(oldfs);
584
585         if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
586                 client->status = Disconnected;
587         return ret;
588 }
589
590 /**
591  * p9_write_work - called when a transport can send some data
592  * @work: container for work to be done
593  *
594  */
595
596 static void p9_write_work(struct work_struct *work)
597 {
598         int n, err;
599         struct p9_conn *m;
600         struct p9_req *req;
601
602         m = container_of(work, struct p9_conn, wq);
603
604         if (m->err < 0) {
605                 clear_bit(Wworksched, &m->wsched);
606                 return;
607         }
608
609         if (!m->wsize) {
610                 if (list_empty(&m->unsent_req_list)) {
611                         clear_bit(Wworksched, &m->wsched);
612                         return;
613                 }
614
615                 spin_lock(&m->lock);
616 again:
617                 req = list_entry(m->unsent_req_list.next, struct p9_req,
618                                req_list);
619                 list_move_tail(&req->req_list, &m->req_list);
620                 if (req->err == ERREQFLUSH)
621                         goto again;
622
623                 m->wbuf = req->tcall->sdata;
624                 m->wsize = req->tcall->size;
625                 m->wpos = 0;
626                 spin_unlock(&m->lock);
627         }
628
629         P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
630                                                                 m->wsize);
631         clear_bit(Wpending, &m->wsched);
632         err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
633         P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
634         if (err == -EAGAIN) {
635                 clear_bit(Wworksched, &m->wsched);
636                 return;
637         }
638
639         if (err < 0)
640                 goto error;
641         else if (err == 0) {
642                 err = -EREMOTEIO;
643                 goto error;
644         }
645
646         m->wpos += err;
647         if (m->wpos == m->wsize)
648                 m->wpos = m->wsize = 0;
649
650         if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
651                 if (test_and_clear_bit(Wpending, &m->wsched))
652                         n = POLLOUT;
653                 else
654                         n = p9_fd_poll(m->client, NULL);
655
656                 if (n & POLLOUT) {
657                         P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
658                         queue_work(p9_mux_wq, &m->wq);
659                 } else
660                         clear_bit(Wworksched, &m->wsched);
661         } else
662                 clear_bit(Wworksched, &m->wsched);
663
664         return;
665
666 error:
667         p9_conn_cancel(m, err);
668         clear_bit(Wworksched, &m->wsched);
669 }
670
671 static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
672 {
673         struct p9_poll_wait *pwait =
674                 container_of(wait, struct p9_poll_wait, wait);
675         struct p9_conn *m = pwait->conn;
676         unsigned long flags;
677         DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
678
679         spin_lock_irqsave(&p9_poll_lock, flags);
680         if (list_empty(&m->poll_pending_link))
681                 list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
682         spin_unlock_irqrestore(&p9_poll_lock, flags);
683
684         /* perform the default wake up operation */
685         return default_wake_function(&dummy_wait, mode, sync, key);
686 }
687
688 /**
689  * p9_pollwait - add poll task to the wait queue
690  * @filp: file pointer being polled
691  * @wait_address: wait_q to block on
692  * @p: poll state
693  *
694  * called by files poll operation to add v9fs-poll task to files wait queue
695  */
696
697 static void
698 p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
699 {
700         struct p9_conn *m = container_of(p, struct p9_conn, pt);
701         struct p9_poll_wait *pwait = NULL;
702         int i;
703
704         for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
705                 if (m->poll_wait[i].wait_addr == NULL) {
706                         pwait = &m->poll_wait[i];
707                         break;
708                 }
709         }
710
711         if (!pwait) {
712                 P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
713                 return;
714         }
715
716         if (!wait_address) {
717                 P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
718                 pwait->wait_addr = ERR_PTR(-EIO);
719                 return;
720         }
721
722         pwait->conn = m;
723         pwait->wait_addr = wait_address;
724         init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
725         add_wait_queue(wait_address, &pwait->wait);
726 }
727
728 /**
729  * p9_conn_create - allocate and initialize the per-session mux data
730  * @client: client instance
731  *
732  * Note: Creates the polling task if this is the first session.
733  */
734
735 static struct p9_conn *p9_conn_create(struct p9_client *client)
736 {
737         int i, n;
738         struct p9_conn *m;
739
740         P9_DPRINTK(P9_DEBUG_MUX, "client %p msize %d\n", client, client->msize);
741         m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
742         if (!m)
743                 return ERR_PTR(-ENOMEM);
744
745         spin_lock_init(&m->lock);
746         INIT_LIST_HEAD(&m->mux_list);
747         m->client = client;
748         m->tagpool = p9_idpool_create();
749         if (IS_ERR(m->tagpool)) {
750                 kfree(m);
751                 return ERR_PTR(-ENOMEM);
752         }
753
754         INIT_LIST_HEAD(&m->req_list);
755         INIT_LIST_HEAD(&m->unsent_req_list);
756         INIT_WORK(&m->rq, p9_read_work);
757         INIT_WORK(&m->wq, p9_write_work);
758         INIT_LIST_HEAD(&m->poll_pending_link);
759         init_poll_funcptr(&m->pt, p9_pollwait);
760
761         n = p9_fd_poll(client, &m->pt);
762         if (n & POLLIN) {
763                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
764                 set_bit(Rpending, &m->wsched);
765         }
766
767         if (n & POLLOUT) {
768                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
769                 set_bit(Wpending, &m->wsched);
770         }
771
772         for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
773                 if (IS_ERR(m->poll_wait[i].wait_addr)) {
774                         p9_mux_poll_stop(m);
775                         kfree(m);
776                         /* return the error code */
777                         return (void *)m->poll_wait[i].wait_addr;
778                 }
779         }
780
781         return m;
782 }
783
784 /**
785  * p9_poll_mux - polls a mux and schedules read or write works if necessary
786  * @m: connection to poll
787  *
788  */
789
790 static void p9_poll_mux(struct p9_conn *m)
791 {
792         int n;
793
794         if (m->err < 0)
795                 return;
796
797         n = p9_fd_poll(m->client, NULL);
798         if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
799                 P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
800                 if (n >= 0)
801                         n = -ECONNRESET;
802                 p9_conn_cancel(m, n);
803         }
804
805         if (n & POLLIN) {
806                 set_bit(Rpending, &m->wsched);
807                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
808                 if (!test_and_set_bit(Rworksched, &m->wsched)) {
809                         P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
810                         queue_work(p9_mux_wq, &m->rq);
811                 }
812         }
813
814         if (n & POLLOUT) {
815                 set_bit(Wpending, &m->wsched);
816                 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
817                 if ((m->wsize || !list_empty(&m->unsent_req_list))
818                     && !test_and_set_bit(Wworksched, &m->wsched)) {
819                         P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
820                         queue_work(p9_mux_wq, &m->wq);
821                 }
822         }
823 }
824
825 /**
826  * p9_send_request - send 9P request
827  * The function can sleep until the request is scheduled for sending.
828  * The function can be interrupted. Return from the function is not
829  * a guarantee that the request is sent successfully. Can return errors
830  * that can be retrieved by PTR_ERR macros.
831  *
832  * @m: mux data
833  * @tc: request to be sent
834  *
835  */
836
837 static struct p9_req *p9_send_request(struct p9_conn *m, struct p9_fcall *tc)
838 {
839         int n;
840         struct p9_req *req;
841
842         P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
843                 tc, tc->id);
844         if (m->err < 0)
845                 return ERR_PTR(m->err);
846
847         req = kmalloc(sizeof(struct p9_req), GFP_KERNEL);
848         if (!req)
849                 return ERR_PTR(-ENOMEM);
850
851         if (tc->id == P9_TVERSION)
852                 n = P9_NOTAG;
853         else
854                 n = p9_mux_get_tag(m);
855
856         if (n < 0) {
857                 kfree(req);
858                 return ERR_PTR(-ENOMEM);
859         }
860
861         p9_set_tag(tc, n);
862
863 #ifdef CONFIG_NET_9P_DEBUG
864         if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
865                 char buf[150];
866
867                 p9_printfcall(buf, sizeof(buf), tc, m->client->dotu);
868                 printk(KERN_NOTICE "<<< %p %s\n", m, buf);
869         }
870 #endif
871
872         spin_lock_init(&req->lock);
873         req->m = m;
874         init_waitqueue_head(&req->wqueue);
875         req->tag = n;
876         req->tcall = tc;
877         req->rcall = NULL;
878         req->err = 0;
879         req->flush = None;
880
881         spin_lock(&m->lock);
882         list_add_tail(&req->req_list, &m->unsent_req_list);
883         spin_unlock(&m->lock);
884
885         if (test_and_clear_bit(Wpending, &m->wsched))
886                 n = POLLOUT;
887         else
888                 n = p9_fd_poll(m->client, NULL);
889
890         if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
891                 queue_work(p9_mux_wq, &m->wq);
892
893         return req;
894 }
895
896 static int
897 p9_mux_flush_request(struct p9_conn *m, struct p9_req *req)
898 {
899         struct p9_fcall *fc;
900         struct p9_req *rreq, *rptr;
901
902         P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
903
904         /* if a response was received for a request, do nothing */
905         spin_lock(&req->lock);
906         if (req->rcall || req->err) {
907                 spin_unlock(&req->lock);
908                 P9_DPRINTK(P9_DEBUG_MUX,
909                         "mux %p req %p response already received\n", m, req);
910                 return 0;
911         }
912
913         req->flush = Flushing;
914         spin_unlock(&req->lock);
915
916         spin_lock(&m->lock);
917         /* if the request is not sent yet, just remove it from the list */
918         list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
919                 if (rreq->tag == req->tag) {
920                         P9_DPRINTK(P9_DEBUG_MUX,
921                            "mux %p req %p request is not sent yet\n", m, req);
922                         list_del(&rreq->req_list);
923                         req->flush = Flushed;
924                         spin_unlock(&m->lock);
925                         p9_conn_rpc_cb(req);
926                         return 0;
927                 }
928         }
929         spin_unlock(&m->lock);
930
931         clear_thread_flag(TIF_SIGPENDING);
932         fc = p9_create_tflush(req->tag);
933         p9_send_request(m, fc);
934         return 1;
935 }
936
937 /**
938  * p9_fd_rpc- sends 9P request and waits until a response is available.
939  *      The function can be interrupted.
940  * @client: client instance
941  * @tc: request to be sent
942  * @rc: pointer where a pointer to the response is stored
943  *
944  */
945
946 int
947 p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
948 {
949         struct p9_trans_fd *p = client->trans;
950         struct p9_conn *m = p->conn;
951         int err, sigpending;
952         unsigned long flags;
953         struct p9_req *req;
954
955         if (rc)
956                 *rc = NULL;
957
958         sigpending = 0;
959         if (signal_pending(current)) {
960                 sigpending = 1;
961                 clear_thread_flag(TIF_SIGPENDING);
962         }
963
964         req = p9_send_request(m, tc);
965         if (IS_ERR(req)) {
966                 err = PTR_ERR(req);
967                 P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
968                 return err;
969         }
970
971         err = wait_event_interruptible(req->wqueue, req->rcall != NULL ||
972                                                                 req->err < 0);
973         if (req->err < 0)
974                 err = req->err;
975
976         if (err == -ERESTARTSYS && client->status == Connected
977                                                         && m->err == 0) {
978                 if (p9_mux_flush_request(m, req)) {
979                         /* wait until we get response of the flush message */
980                         do {
981                                 clear_thread_flag(TIF_SIGPENDING);
982                                 err = wait_event_interruptible(req->wqueue,
983                                         req->rcall || req->err);
984                         } while (!req->rcall && !req->err &&
985                                         err == -ERESTARTSYS &&
986                                         client->status == Connected && !m->err);
987
988                         err = -ERESTARTSYS;
989                 }
990                 sigpending = 1;
991         }
992
993         if (sigpending) {
994                 spin_lock_irqsave(&current->sighand->siglock, flags);
995                 recalc_sigpending();
996                 spin_unlock_irqrestore(&current->sighand->siglock, flags);
997         }
998
999         if (rc)
1000                 *rc = req->rcall;
1001         else
1002                 kfree(req->rcall);
1003
1004         p9_mux_free_request(m, req);
1005         if (err > 0)
1006                 err = -EIO;
1007
1008         return err;
1009 }
1010
1011 /**
1012  * parse_options - parse mount options into session structure
1013  * @options: options string passed from mount
1014  * @opts: transport-specific structure to parse options into
1015  *
1016  * Returns 0 upon success, -ERRNO upon failure
1017  */
1018
1019 static int parse_opts(char *params, struct p9_fd_opts *opts)
1020 {
1021         char *p;
1022         substring_t args[MAX_OPT_ARGS];
1023         int option;
1024         char *options;
1025         int ret;
1026
1027         opts->port = P9_PORT;
1028         opts->rfd = ~0;
1029         opts->wfd = ~0;
1030
1031         if (!params)
1032                 return 0;
1033
1034         options = kstrdup(params, GFP_KERNEL);
1035         if (!options) {
1036                 P9_DPRINTK(P9_DEBUG_ERROR,
1037                                 "failed to allocate copy of option string\n");
1038                 return -ENOMEM;
1039         }
1040
1041         while ((p = strsep(&options, ",")) != NULL) {
1042                 int token;
1043                 int r;
1044                 if (!*p)
1045                         continue;
1046                 token = match_token(p, tokens, args);
1047                 r = match_int(&args[0], &option);
1048                 if (r < 0) {
1049                         P9_DPRINTK(P9_DEBUG_ERROR,
1050                          "integer field, but no integer?\n");
1051                         ret = r;
1052                         continue;
1053                 }
1054                 switch (token) {
1055                 case Opt_port:
1056                         opts->port = option;
1057                         break;
1058                 case Opt_rfdno:
1059                         opts->rfd = option;
1060                         break;
1061                 case Opt_wfdno:
1062                         opts->wfd = option;
1063                         break;
1064                 default:
1065                         continue;
1066                 }
1067         }
1068         kfree(options);
1069         return 0;
1070 }
1071
1072 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
1073 {
1074         struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
1075                                            GFP_KERNEL);
1076         if (!ts)
1077                 return -ENOMEM;
1078
1079         ts->rd = fget(rfd);
1080         ts->wr = fget(wfd);
1081         if (!ts->rd || !ts->wr) {
1082                 if (ts->rd)
1083                         fput(ts->rd);
1084                 if (ts->wr)
1085                         fput(ts->wr);
1086                 kfree(ts);
1087                 return -EIO;
1088         }
1089
1090         client->trans = ts;
1091         client->status = Connected;
1092
1093         return 0;
1094 }
1095
1096 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
1097 {
1098         int fd, ret;
1099
1100         csocket->sk->sk_allocation = GFP_NOIO;
1101         fd = sock_map_fd(csocket, 0);
1102         if (fd < 0) {
1103                 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
1104                 return fd;
1105         }
1106
1107         ret = p9_fd_open(client, fd, fd);
1108         if (ret < 0) {
1109                 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n");
1110                 sockfd_put(csocket);
1111                 return ret;
1112         }
1113
1114         ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
1115
1116         return 0;
1117 }
1118
1119 /**
1120  * p9_mux_destroy - cancels all pending requests and frees mux resources
1121  * @m: mux to destroy
1122  *
1123  */
1124
1125 static void p9_conn_destroy(struct p9_conn *m)
1126 {
1127         P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
1128                 m->mux_list.prev, m->mux_list.next);
1129
1130         p9_mux_poll_stop(m);
1131         cancel_work_sync(&m->rq);
1132         cancel_work_sync(&m->wq);
1133
1134         p9_conn_cancel(m, -ECONNRESET);
1135
1136         m->client = NULL;
1137         p9_idpool_destroy(m->tagpool);
1138         kfree(m);
1139 }
1140
1141 /**
1142  * p9_fd_close - shutdown file descriptor transport
1143  * @client: client instance
1144  *
1145  */
1146
1147 static void p9_fd_close(struct p9_client *client)
1148 {
1149         struct p9_trans_fd *ts;
1150
1151         if (!client)
1152                 return;
1153
1154         ts = client->trans;
1155         if (!ts)
1156                 return;
1157
1158         client->status = Disconnected;
1159
1160         p9_conn_destroy(ts->conn);
1161
1162         if (ts->rd)
1163                 fput(ts->rd);
1164         if (ts->wr)
1165                 fput(ts->wr);
1166
1167         kfree(ts);
1168 }
1169
1170 /*
1171  * stolen from NFS - maybe should be made a generic function?
1172  */
1173 static inline int valid_ipaddr4(const char *buf)
1174 {
1175         int rc, count, in[4];
1176
1177         rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
1178         if (rc != 4)
1179                 return -EINVAL;
1180         for (count = 0; count < 4; count++) {
1181                 if (in[count] > 255)
1182                         return -EINVAL;
1183         }
1184         return 0;
1185 }
1186
1187 static int
1188 p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
1189 {
1190         int err;
1191         struct socket *csocket;
1192         struct sockaddr_in sin_server;
1193         struct p9_fd_opts opts;
1194         struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
1195
1196         err = parse_opts(args, &opts);
1197         if (err < 0)
1198                 return err;
1199
1200         if (valid_ipaddr4(addr) < 0)
1201                 return -EINVAL;
1202
1203         csocket = NULL;
1204
1205         sin_server.sin_family = AF_INET;
1206         sin_server.sin_addr.s_addr = in_aton(addr);
1207         sin_server.sin_port = htons(opts.port);
1208         sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
1209
1210         if (!csocket) {
1211                 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
1212                 err = -EIO;
1213                 goto error;
1214         }
1215
1216         err = csocket->ops->connect(csocket,
1217                                     (struct sockaddr *)&sin_server,
1218                                     sizeof(struct sockaddr_in), 0);
1219         if (err < 0) {
1220                 P9_EPRINTK(KERN_ERR,
1221                         "p9_trans_tcp: problem connecting socket to %s\n",
1222                         addr);
1223                 goto error;
1224         }
1225
1226         err = p9_socket_open(client, csocket);
1227         if (err < 0)
1228                 goto error;
1229
1230         p = (struct p9_trans_fd *) client->trans;
1231         p->conn = p9_conn_create(client);
1232         if (IS_ERR(p->conn)) {
1233                 err = PTR_ERR(p->conn);
1234                 p->conn = NULL;
1235                 goto error;
1236         }
1237
1238         return 0;
1239
1240 error:
1241         if (csocket)
1242                 sock_release(csocket);
1243
1244         kfree(p);
1245
1246         return err;
1247 }
1248
1249 static int
1250 p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
1251 {
1252         int err;
1253         struct socket *csocket;
1254         struct sockaddr_un sun_server;
1255         struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
1256
1257         csocket = NULL;
1258
1259         if (strlen(addr) > UNIX_PATH_MAX) {
1260                 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
1261                         addr);
1262                 err = -ENAMETOOLONG;
1263                 goto error;
1264         }
1265
1266         sun_server.sun_family = PF_UNIX;
1267         strcpy(sun_server.sun_path, addr);
1268         sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
1269         err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
1270                         sizeof(struct sockaddr_un) - 1, 0);
1271         if (err < 0) {
1272                 P9_EPRINTK(KERN_ERR,
1273                         "p9_trans_unix: problem connecting socket: %s: %d\n",
1274                         addr, err);
1275                 goto error;
1276         }
1277
1278         err = p9_socket_open(client, csocket);
1279         if (err < 0)
1280                 goto error;
1281
1282         p = (struct p9_trans_fd *) client->trans;
1283         p->conn = p9_conn_create(client);
1284         if (IS_ERR(p->conn)) {
1285                 err = PTR_ERR(p->conn);
1286                 p->conn = NULL;
1287                 goto error;
1288         }
1289
1290         return 0;
1291
1292 error:
1293         if (csocket)
1294                 sock_release(csocket);
1295
1296         kfree(p);
1297         return err;
1298 }
1299
1300 static int
1301 p9_fd_create(struct p9_client *client, const char *addr, char *args)
1302 {
1303         int err;
1304         struct p9_fd_opts opts;
1305         struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */
1306
1307         parse_opts(args, &opts);
1308
1309         if (opts.rfd == ~0 || opts.wfd == ~0) {
1310                 printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
1311                 return -ENOPROTOOPT;
1312         }
1313
1314         err = p9_fd_open(client, opts.rfd, opts.wfd);
1315         if (err < 0)
1316                 goto error;
1317
1318         p = (struct p9_trans_fd *) client->trans;
1319         p->conn = p9_conn_create(client);
1320         if (IS_ERR(p->conn)) {
1321                 err = PTR_ERR(p->conn);
1322                 p->conn = NULL;
1323                 goto error;
1324         }
1325
1326         return 0;
1327
1328 error:
1329         kfree(p);
1330         return err;
1331 }
1332
1333 static struct p9_trans_module p9_tcp_trans = {
1334         .name = "tcp",
1335         .maxsize = MAX_SOCK_BUF,
1336         .def = 1,
1337         .create = p9_fd_create_tcp,
1338         .close = p9_fd_close,
1339         .rpc = p9_fd_rpc,
1340         .owner = THIS_MODULE,
1341 };
1342
1343 static struct p9_trans_module p9_unix_trans = {
1344         .name = "unix",
1345         .maxsize = MAX_SOCK_BUF,
1346         .def = 0,
1347         .create = p9_fd_create_unix,
1348         .close = p9_fd_close,
1349         .rpc = p9_fd_rpc,
1350         .owner = THIS_MODULE,
1351 };
1352
1353 static struct p9_trans_module p9_fd_trans = {
1354         .name = "fd",
1355         .maxsize = MAX_SOCK_BUF,
1356         .def = 0,
1357         .create = p9_fd_create,
1358         .close = p9_fd_close,
1359         .rpc = p9_fd_rpc,
1360         .owner = THIS_MODULE,
1361 };
1362
1363 /**
1364  * p9_poll_proc - poll worker thread
1365  * @a: thread state and arguments
1366  *
1367  * polls all v9fs transports for new events and queues the appropriate
1368  * work to the work queue
1369  *
1370  */
1371
1372 static int p9_poll_proc(void *a)
1373 {
1374         unsigned long flags;
1375
1376         P9_DPRINTK(P9_DEBUG_MUX, "start %p\n", current);
1377  repeat:
1378         spin_lock_irqsave(&p9_poll_lock, flags);
1379         while (!list_empty(&p9_poll_pending_list)) {
1380                 struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1381                                                         struct p9_conn,
1382                                                         poll_pending_link);
1383                 list_del_init(&conn->poll_pending_link);
1384                 spin_unlock_irqrestore(&p9_poll_lock, flags);
1385
1386                 p9_poll_mux(conn);
1387
1388                 spin_lock_irqsave(&p9_poll_lock, flags);
1389         }
1390         spin_unlock_irqrestore(&p9_poll_lock, flags);
1391
1392         set_current_state(TASK_INTERRUPTIBLE);
1393         if (list_empty(&p9_poll_pending_list)) {
1394                 P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
1395                 schedule();
1396         }
1397         __set_current_state(TASK_RUNNING);
1398
1399         if (!kthread_should_stop())
1400                 goto repeat;
1401
1402         P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
1403         return 0;
1404 }
1405
1406 int p9_trans_fd_init(void)
1407 {
1408         p9_mux_wq = create_workqueue("v9fs");
1409         if (!p9_mux_wq) {
1410                 printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
1411                 return -ENOMEM;
1412         }
1413
1414         p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
1415         if (IS_ERR(p9_poll_task)) {
1416                 destroy_workqueue(p9_mux_wq);
1417                 printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
1418                 return PTR_ERR(p9_poll_task);
1419         }
1420
1421         v9fs_register_trans(&p9_tcp_trans);
1422         v9fs_register_trans(&p9_unix_trans);
1423         v9fs_register_trans(&p9_fd_trans);
1424
1425         return 0;
1426 }
1427
1428 void p9_trans_fd_exit(void)
1429 {
1430         kthread_stop(p9_poll_task);
1431         v9fs_unregister_trans(&p9_tcp_trans);
1432         v9fs_unregister_trans(&p9_unix_trans);
1433         v9fs_unregister_trans(&p9_fd_trans);
1434
1435         destroy_workqueue(p9_mux_wq);
1436 }