caif: Bugfix - missing spin_unlock
[safe/jmp/linux-2.6] / net / caif / caif_socket.c
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Author:      Sjur Brendeland sjur.brandeland@stericsson.com
4  * License terms: GNU General Public License (GPL) version 2
5  */
6
7 #include <linux/fs.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/spinlock.h>
12 #include <linux/mutex.h>
13 #include <linux/list.h>
14 #include <linux/wait.h>
15 #include <linux/poll.h>
16 #include <linux/tcp.h>
17 #include <linux/uaccess.h>
18 #include <linux/mutex.h>
19 #include <linux/debugfs.h>
20 #include <linux/caif/caif_socket.h>
21 #include <asm/atomic.h>
22 #include <net/sock.h>
23 #include <net/tcp_states.h>
24 #include <net/caif/caif_layer.h>
25 #include <net/caif/caif_dev.h>
26 #include <net/caif/cfpkt.h>
27
28 MODULE_LICENSE("GPL");
29 MODULE_ALIAS_NETPROTO(AF_CAIF);
30
31 #define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10)
32 #define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100)
33
34 /*
35  * CAIF state is re-using the TCP socket states.
36  * caif_states stored in sk_state reflect the state as reported by
37  * the CAIF stack, while sk_socket->state is the state of the socket.
38  */
39 enum caif_states {
40         CAIF_CONNECTED          = TCP_ESTABLISHED,
41         CAIF_CONNECTING = TCP_SYN_SENT,
42         CAIF_DISCONNECTED       = TCP_CLOSE
43 };
44
45 #define TX_FLOW_ON_BIT  1
46 #define RX_FLOW_ON_BIT  2
47
48 static struct dentry *debugfsdir;
49
50 #ifdef CONFIG_DEBUG_FS
51 struct debug_fs_counter {
52         atomic_t caif_nr_socks;
53         atomic_t num_connect_req;
54         atomic_t num_connect_resp;
55         atomic_t num_connect_fail_resp;
56         atomic_t num_disconnect;
57         atomic_t num_remote_shutdown_ind;
58         atomic_t num_tx_flow_off_ind;
59         atomic_t num_tx_flow_on_ind;
60         atomic_t num_rx_flow_off;
61         atomic_t num_rx_flow_on;
62 };
63 static struct debug_fs_counter cnt;
64 #define dbfs_atomic_inc(v) atomic_inc(v)
65 #define dbfs_atomic_dec(v) atomic_dec(v)
66 #else
67 #define dbfs_atomic_inc(v)
68 #define dbfs_atomic_dec(v)
69 #endif
70
71 struct caifsock {
72         struct sock sk; /* must be first member */
73         struct cflayer layer;
74         char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
75         u32 flow_state;
76         struct caif_connect_request conn_req;
77         struct mutex readlock;
78         struct dentry *debugfs_socket_dir;
79 };
80
81 static int rx_flow_is_on(struct caifsock *cf_sk)
82 {
83         return test_bit(RX_FLOW_ON_BIT,
84                         (void *) &cf_sk->flow_state);
85 }
86
87 static int tx_flow_is_on(struct caifsock *cf_sk)
88 {
89         return test_bit(TX_FLOW_ON_BIT,
90                         (void *) &cf_sk->flow_state);
91 }
92
93 static void set_rx_flow_off(struct caifsock *cf_sk)
94 {
95          clear_bit(RX_FLOW_ON_BIT,
96                  (void *) &cf_sk->flow_state);
97 }
98
99 static void set_rx_flow_on(struct caifsock *cf_sk)
100 {
101          set_bit(RX_FLOW_ON_BIT,
102                         (void *) &cf_sk->flow_state);
103 }
104
105 static void set_tx_flow_off(struct caifsock *cf_sk)
106 {
107          clear_bit(TX_FLOW_ON_BIT,
108                 (void *) &cf_sk->flow_state);
109 }
110
111 static void set_tx_flow_on(struct caifsock *cf_sk)
112 {
113          set_bit(TX_FLOW_ON_BIT,
114                 (void *) &cf_sk->flow_state);
115 }
116
117 static void caif_read_lock(struct sock *sk)
118 {
119         struct caifsock *cf_sk;
120         cf_sk = container_of(sk, struct caifsock, sk);
121         mutex_lock(&cf_sk->readlock);
122 }
123
124 static void caif_read_unlock(struct sock *sk)
125 {
126         struct caifsock *cf_sk;
127         cf_sk = container_of(sk, struct caifsock, sk);
128         mutex_unlock(&cf_sk->readlock);
129 }
130
131 static int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
132 {
133         /* A quarter of full buffer is used a low water mark */
134         return cf_sk->sk.sk_rcvbuf / 4;
135 }
136
137 static void caif_flow_ctrl(struct sock *sk, int mode)
138 {
139         struct caifsock *cf_sk;
140         cf_sk = container_of(sk, struct caifsock, sk);
141         if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd)
142                 cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
143 }
144
145 /*
146  * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
147  * not dropped, but CAIF is sending flow off instead.
148  */
149 static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
150 {
151         int err;
152         int skb_len;
153         unsigned long flags;
154         struct sk_buff_head *list = &sk->sk_receive_queue;
155         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
156
157         if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
158                 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
159                 trace_printk("CAIF: %s():"
160                         " sending flow OFF (queue len = %d %d)\n",
161                         __func__,
162                         atomic_read(&cf_sk->sk.sk_rmem_alloc),
163                         sk_rcvbuf_lowwater(cf_sk));
164                 set_rx_flow_off(cf_sk);
165                 dbfs_atomic_inc(&cnt.num_rx_flow_off);
166                 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
167         }
168
169         err = sk_filter(sk, skb);
170         if (err)
171                 return err;
172         if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
173                 set_rx_flow_off(cf_sk);
174                 trace_printk("CAIF: %s():"
175                         " sending flow OFF due to rmem_schedule\n",
176                         __func__);
177                 dbfs_atomic_inc(&cnt.num_rx_flow_off);
178                 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
179         }
180         skb->dev = NULL;
181         skb_set_owner_r(skb, sk);
182         /* Cache the SKB length before we tack it onto the receive
183          * queue. Once it is added it no longer belongs to us and
184          * may be freed by other threads of control pulling packets
185          * from the queue.
186          */
187         skb_len = skb->len;
188         spin_lock_irqsave(&list->lock, flags);
189         if (!sock_flag(sk, SOCK_DEAD))
190                 __skb_queue_tail(list, skb);
191         spin_unlock_irqrestore(&list->lock, flags);
192
193         if (!sock_flag(sk, SOCK_DEAD))
194                 sk->sk_data_ready(sk, skb_len);
195         else
196                 kfree_skb(skb);
197         return 0;
198 }
199
200 /* Packet Receive Callback function called from CAIF Stack */
201 static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
202 {
203         struct caifsock *cf_sk;
204         struct sk_buff *skb;
205
206         cf_sk = container_of(layr, struct caifsock, layer);
207         skb = cfpkt_tonative(pkt);
208
209         if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
210                 cfpkt_destroy(pkt);
211                 return 0;
212         }
213         caif_queue_rcv_skb(&cf_sk->sk, skb);
214         return 0;
215 }
216
217 /* Packet Control Callback function called from CAIF */
218 static void caif_ctrl_cb(struct cflayer *layr,
219                                 enum caif_ctrlcmd flow,
220                                 int phyid)
221 {
222         struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
223         switch (flow) {
224         case CAIF_CTRLCMD_FLOW_ON_IND:
225                 /* OK from modem to start sending again */
226                 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
227                 set_tx_flow_on(cf_sk);
228                 cf_sk->sk.sk_state_change(&cf_sk->sk);
229                 break;
230
231         case CAIF_CTRLCMD_FLOW_OFF_IND:
232                 /* Modem asks us to shut up */
233                 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
234                 set_tx_flow_off(cf_sk);
235                 cf_sk->sk.sk_state_change(&cf_sk->sk);
236                 break;
237
238         case CAIF_CTRLCMD_INIT_RSP:
239                 /* We're now connected */
240                 dbfs_atomic_inc(&cnt.num_connect_resp);
241                 cf_sk->sk.sk_state = CAIF_CONNECTED;
242                 set_tx_flow_on(cf_sk);
243                 cf_sk->sk.sk_state_change(&cf_sk->sk);
244                 break;
245
246         case CAIF_CTRLCMD_DEINIT_RSP:
247                 /* We're now disconnected */
248                 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
249                 cf_sk->sk.sk_state_change(&cf_sk->sk);
250                 cfcnfg_release_adap_layer(&cf_sk->layer);
251                 break;
252
253         case CAIF_CTRLCMD_INIT_FAIL_RSP:
254                 /* Connect request failed */
255                 dbfs_atomic_inc(&cnt.num_connect_fail_resp);
256                 cf_sk->sk.sk_err = ECONNREFUSED;
257                 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
258                 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
259                 /*
260                  * Socket "standards" seems to require POLLOUT to
261                  * be set at connect failure.
262                  */
263                 set_tx_flow_on(cf_sk);
264                 cf_sk->sk.sk_state_change(&cf_sk->sk);
265                 break;
266
267         case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
268                 /* Modem has closed this connection, or device is down. */
269                 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
270                 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
271                 cf_sk->sk.sk_err = ECONNRESET;
272                 set_rx_flow_on(cf_sk);
273                 cf_sk->sk.sk_error_report(&cf_sk->sk);
274                 break;
275
276         default:
277                 pr_debug("CAIF: %s(): Unexpected flow command %d\n",
278                                 __func__, flow);
279         }
280 }
281
282 static void caif_check_flow_release(struct sock *sk)
283 {
284         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
285
286         if (rx_flow_is_on(cf_sk))
287                 return;
288
289         if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
290                         dbfs_atomic_inc(&cnt.num_rx_flow_on);
291                         set_rx_flow_on(cf_sk);
292                         caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
293         }
294 }
295 /*
296  * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer
297  * has sufficient size.
298  */
299
300 static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
301                                 struct msghdr *m, size_t buf_len, int flags)
302
303 {
304         struct sock *sk = sock->sk;
305         struct sk_buff *skb;
306         int ret = 0;
307         int len;
308
309         if (unlikely(!buf_len))
310                 return -EINVAL;
311
312         skb = skb_recv_datagram(sk, flags, 0 , &ret);
313         if (!skb)
314                 goto read_error;
315
316         len = skb->len;
317
318         if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) {
319                 len = buf_len;
320                 /*
321                  * Push skb back on receive queue if buffer too small.
322                  * This has a built-in race where multi-threaded receive
323                  * may get packet in wrong order, but multiple read does
324                  * not really guarantee ordered delivery anyway.
325                  * Let's optimize for speed without taking locks.
326                  */
327
328                 skb_queue_head(&sk->sk_receive_queue, skb);
329                 ret = -EMSGSIZE;
330                 goto read_error;
331         }
332
333         ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
334         if (ret)
335                 goto read_error;
336
337         skb_free_datagram(sk, skb);
338
339         caif_check_flow_release(sk);
340
341         return len;
342
343 read_error:
344         return ret;
345 }
346
347
348 /* Copied from unix_stream_wait_data, identical except for lock call. */
349 static long caif_stream_data_wait(struct sock *sk, long timeo)
350 {
351         DEFINE_WAIT(wait);
352         lock_sock(sk);
353
354         for (;;) {
355                 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
356
357                 if (!skb_queue_empty(&sk->sk_receive_queue) ||
358                         sk->sk_err ||
359                         sk->sk_state != CAIF_CONNECTED ||
360                         sock_flag(sk, SOCK_DEAD) ||
361                         (sk->sk_shutdown & RCV_SHUTDOWN) ||
362                         signal_pending(current) ||
363                         !timeo)
364                         break;
365
366                 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
367                 release_sock(sk);
368                 timeo = schedule_timeout(timeo);
369                 lock_sock(sk);
370                 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
371         }
372
373         finish_wait(sk_sleep(sk), &wait);
374         release_sock(sk);
375         return timeo;
376 }
377
378
379 /*
380  * Copied from unix_stream_recvmsg, but removed credit checks,
381  * changed locking calls, changed address handling.
382  */
383 static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
384                                 struct msghdr *msg, size_t size,
385                                 int flags)
386 {
387         struct sock *sk = sock->sk;
388         int copied = 0;
389         int target;
390         int err = 0;
391         long timeo;
392
393         err = -EOPNOTSUPP;
394         if (flags&MSG_OOB)
395                 goto out;
396
397         msg->msg_namelen = 0;
398
399         /*
400          * Lock the socket to prevent queue disordering
401          * while sleeps in memcpy_tomsg
402          */
403         err = -EAGAIN;
404         if (sk->sk_state == CAIF_CONNECTING)
405                 goto out;
406
407         caif_read_lock(sk);
408         target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
409         timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
410
411         do {
412                 int chunk;
413                 struct sk_buff *skb;
414
415                 lock_sock(sk);
416                 skb = skb_dequeue(&sk->sk_receive_queue);
417                 caif_check_flow_release(sk);
418
419                 if (skb == NULL) {
420                         if (copied >= target)
421                                 goto unlock;
422                         /*
423                          *      POSIX 1003.1g mandates this order.
424                          */
425                         err = sock_error(sk);
426                         if (err)
427                                 goto unlock;
428                         err = -ECONNRESET;
429                         if (sk->sk_shutdown & RCV_SHUTDOWN)
430                                 goto unlock;
431
432                         err = -EPIPE;
433                         if (sk->sk_state != CAIF_CONNECTED)
434                                 goto unlock;
435                         if (sock_flag(sk, SOCK_DEAD))
436                                 goto unlock;
437
438                         release_sock(sk);
439
440                         err = -EAGAIN;
441                         if (!timeo)
442                                 break;
443
444                         caif_read_unlock(sk);
445
446                         timeo = caif_stream_data_wait(sk, timeo);
447
448                         if (signal_pending(current)) {
449                                 err = sock_intr_errno(timeo);
450                                 goto out;
451                         }
452                         caif_read_lock(sk);
453                         continue;
454 unlock:
455                         release_sock(sk);
456                         break;
457                 }
458                 release_sock(sk);
459                 chunk = min_t(unsigned int, skb->len, size);
460                 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
461                         skb_queue_head(&sk->sk_receive_queue, skb);
462                         if (copied == 0)
463                                 copied = -EFAULT;
464                         break;
465                 }
466                 copied += chunk;
467                 size -= chunk;
468
469                 /* Mark read part of skb as used */
470                 if (!(flags & MSG_PEEK)) {
471                         skb_pull(skb, chunk);
472
473                         /* put the skb back if we didn't use it up. */
474                         if (skb->len) {
475                                 skb_queue_head(&sk->sk_receive_queue, skb);
476                                 break;
477                         }
478                         kfree_skb(skb);
479
480                 } else {
481                         /*
482                          * It is questionable, see note in unix_dgram_recvmsg.
483                          */
484                         /* put message back and return */
485                         skb_queue_head(&sk->sk_receive_queue, skb);
486                         break;
487                 }
488         } while (size);
489         caif_read_unlock(sk);
490
491 out:
492         return copied ? : err;
493 }
494
495 /*
496  * Copied from sock.c:sock_wait_for_wmem, but change to wait for
497  * CAIF flow-on and sock_writable.
498  */
499 static long caif_wait_for_flow_on(struct caifsock *cf_sk,
500                                 int wait_writeable, long timeo, int *err)
501 {
502         struct sock *sk = &cf_sk->sk;
503         DEFINE_WAIT(wait);
504         for (;;) {
505                 *err = 0;
506                 if (tx_flow_is_on(cf_sk) &&
507                         (!wait_writeable || sock_writeable(&cf_sk->sk)))
508                         break;
509                 *err = -ETIMEDOUT;
510                 if (!timeo)
511                         break;
512                 *err = -ERESTARTSYS;
513                 if (signal_pending(current))
514                         break;
515                 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
516                 *err = -ECONNRESET;
517                 if (sk->sk_shutdown & SHUTDOWN_MASK)
518                         break;
519                 *err = -sk->sk_err;
520                 if (sk->sk_err)
521                         break;
522                 *err = -EPIPE;
523                 if (cf_sk->sk.sk_state != CAIF_CONNECTED)
524                         break;
525                 timeo = schedule_timeout(timeo);
526         }
527         finish_wait(sk_sleep(sk), &wait);
528         return timeo;
529 }
530
531 /*
532  * Transmit a SKB. The device may temporarily request re-transmission
533  * by returning EAGAIN.
534  */
535 static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
536                         int noblock, long timeo)
537 {
538         struct cfpkt *pkt;
539         int ret, loopcnt = 0;
540
541         pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
542         memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info));
543         do {
544
545                 ret = -ETIMEDOUT;
546
547                 /* Slight paranoia, probably not needed. */
548                 if (unlikely(loopcnt++ > 1000)) {
549                         pr_warning("CAIF: %s(): transmit retries failed,"
550                                 " error = %d\n", __func__, ret);
551                         break;
552                 }
553
554                 if (cf_sk->layer.dn != NULL)
555                         ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
556                 if (likely(ret >= 0))
557                         break;
558                 /* if transmit return -EAGAIN, then retry */
559                 if (noblock && ret == -EAGAIN)
560                         break;
561                 timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
562                 if (signal_pending(current)) {
563                         ret = sock_intr_errno(timeo);
564                         break;
565                 }
566                 if (ret)
567                         break;
568                 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
569                         sock_flag(&cf_sk->sk, SOCK_DEAD) ||
570                         (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
571                         ret = -EPIPE;
572                         cf_sk->sk.sk_err = EPIPE;
573                         break;
574                 }
575         } while (ret == -EAGAIN);
576         return ret;
577 }
578
579 /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
580 static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
581                         struct msghdr *msg, size_t len)
582 {
583         struct sock *sk = sock->sk;
584         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
585         int buffer_size;
586         int ret = 0;
587         struct sk_buff *skb = NULL;
588         int noblock;
589         long timeo;
590         caif_assert(cf_sk);
591         ret = sock_error(sk);
592         if (ret)
593                 goto err;
594
595         ret = -EOPNOTSUPP;
596         if (msg->msg_flags&MSG_OOB)
597                 goto err;
598
599         ret = -EOPNOTSUPP;
600         if (msg->msg_namelen)
601                 goto err;
602
603         ret = -EINVAL;
604         if (unlikely(msg->msg_iov->iov_base == NULL))
605                 goto err;
606         noblock = msg->msg_flags & MSG_DONTWAIT;
607
608         buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
609
610         ret = -EMSGSIZE;
611         if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
612                 goto err;
613
614         timeo = sock_sndtimeo(sk, noblock);
615         timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
616                                 1, timeo, &ret);
617
618         ret = -EPIPE;
619         if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
620                 sock_flag(sk, SOCK_DEAD) ||
621                 (sk->sk_shutdown & RCV_SHUTDOWN))
622                 goto err;
623
624         ret = -ENOMEM;
625         skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
626         if (!skb)
627                 goto err;
628         skb_reserve(skb, CAIF_NEEDED_HEADROOM);
629
630         ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
631
632         if (ret)
633                 goto err;
634         ret = transmit_skb(skb, cf_sk, noblock, timeo);
635         if (ret < 0)
636                 goto err;
637         return len;
638 err:
639         kfree_skb(skb);
640         return ret;
641 }
642
643 /*
644  * Copied from unix_stream_sendmsg and adapted to CAIF:
645  * Changed removed permission handling and added waiting for flow on
646  * and other minor adaptations.
647  */
648 static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
649                                 struct msghdr *msg, size_t len)
650 {
651         struct sock *sk = sock->sk;
652         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
653         int err, size;
654         struct sk_buff *skb;
655         int sent = 0;
656         long timeo;
657
658         err = -EOPNOTSUPP;
659
660         if (unlikely(msg->msg_flags&MSG_OOB))
661                 goto out_err;
662
663         if (unlikely(msg->msg_namelen))
664                 goto out_err;
665
666         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
667         timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
668
669         if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
670                 goto pipe_err;
671
672         while (sent < len) {
673
674                 size = len-sent;
675
676                 if (size > CAIF_MAX_PAYLOAD_SIZE)
677                         size = CAIF_MAX_PAYLOAD_SIZE;
678
679                 /* If size is more than half of sndbuf, chop up message */
680                 if (size > ((sk->sk_sndbuf >> 1) - 64))
681                         size = (sk->sk_sndbuf >> 1) - 64;
682
683                 if (size > SKB_MAX_ALLOC)
684                         size = SKB_MAX_ALLOC;
685
686                 skb = sock_alloc_send_skb(sk,
687                                         size + CAIF_NEEDED_HEADROOM
688                                         + CAIF_NEEDED_TAILROOM,
689                                         msg->msg_flags&MSG_DONTWAIT,
690                                         &err);
691                 if (skb == NULL)
692                         goto out_err;
693
694                 skb_reserve(skb, CAIF_NEEDED_HEADROOM);
695                 /*
696                  *      If you pass two values to the sock_alloc_send_skb
697                  *      it tries to grab the large buffer with GFP_NOFS
698                  *      (which can fail easily), and if it fails grab the
699                  *      fallback size buffer which is under a page and will
700                  *      succeed. [Alan]
701                  */
702                 size = min_t(int, size, skb_tailroom(skb));
703
704                 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
705                 if (err) {
706                         kfree_skb(skb);
707                         goto out_err;
708                 }
709                 err = transmit_skb(skb, cf_sk,
710                                 msg->msg_flags&MSG_DONTWAIT, timeo);
711                 if (err < 0) {
712                         kfree_skb(skb);
713                         goto pipe_err;
714                 }
715                 sent += size;
716         }
717
718         return sent;
719
720 pipe_err:
721         if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
722                 send_sig(SIGPIPE, current, 0);
723         err = -EPIPE;
724 out_err:
725         return sent ? : err;
726 }
727
728 static int setsockopt(struct socket *sock,
729                         int lvl, int opt, char __user *ov, unsigned int ol)
730 {
731         struct sock *sk = sock->sk;
732         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
733         int prio, linksel;
734         struct ifreq ifreq;
735
736         if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
737                 return -ENOPROTOOPT;
738
739         switch (opt) {
740         case CAIFSO_LINK_SELECT:
741                 if (ol < sizeof(int))
742                         return -EINVAL;
743                 if (lvl != SOL_CAIF)
744                         goto bad_sol;
745                 if (copy_from_user(&linksel, ov, sizeof(int)))
746                         return -EINVAL;
747                 lock_sock(&(cf_sk->sk));
748                 cf_sk->conn_req.link_selector = linksel;
749                 release_sock(&cf_sk->sk);
750                 return 0;
751
752         case SO_PRIORITY:
753                 if (lvl != SOL_SOCKET)
754                         goto bad_sol;
755                 if (ol < sizeof(int))
756                         return -EINVAL;
757                 if (copy_from_user(&prio, ov, sizeof(int)))
758                         return -EINVAL;
759                 lock_sock(&(cf_sk->sk));
760                 cf_sk->conn_req.priority = prio;
761                 release_sock(&cf_sk->sk);
762                 return 0;
763
764         case SO_BINDTODEVICE:
765                 if (lvl != SOL_SOCKET)
766                         goto bad_sol;
767                 if (ol < sizeof(struct ifreq))
768                         return -EINVAL;
769                 if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
770                         return -EFAULT;
771                 lock_sock(&(cf_sk->sk));
772                 strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
773                         sizeof(cf_sk->conn_req.link_name));
774                 cf_sk->conn_req.link_name
775                         [sizeof(cf_sk->conn_req.link_name)-1] = 0;
776                 release_sock(&cf_sk->sk);
777                 return 0;
778
779         case CAIFSO_REQ_PARAM:
780                 if (lvl != SOL_CAIF)
781                         goto bad_sol;
782                 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
783                         return -ENOPROTOOPT;
784                 lock_sock(&(cf_sk->sk));
785                 cf_sk->conn_req.param.size = ol;
786                 if (ol > sizeof(cf_sk->conn_req.param.data) ||
787                         copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
788                         release_sock(&cf_sk->sk);
789                         return -EINVAL;
790                 }
791                 release_sock(&cf_sk->sk);
792                 return 0;
793
794         default:
795                 return -ENOPROTOOPT;
796         }
797
798         return 0;
799 bad_sol:
800         return -ENOPROTOOPT;
801
802 }
803
804 /*
805  * caif_connect() - Connect a CAIF Socket
806  * Copied and modified af_irda.c:irda_connect().
807  *
808  * Note : by consulting "errno", the user space caller may learn the cause
809  * of the failure. Most of them are visible in the function, others may come
810  * from subroutines called and are listed here :
811  *  o -EAFNOSUPPORT: bad socket family or type.
812  *  o -ESOCKTNOSUPPORT: bad socket type or protocol
813  *  o -EINVAL: bad socket address, or CAIF link type
814  *  o -ECONNREFUSED: remote end refused the connection.
815  *  o -EINPROGRESS: connect request sent but timed out (or non-blocking)
816  *  o -EISCONN: already connected.
817  *  o -ETIMEDOUT: Connection timed out (send timeout)
818  *  o -ENODEV: No link layer to send request
819  *  o -ECONNRESET: Received Shutdown indication or lost link layer
820  *  o -ENOMEM: Out of memory
821  *
822  *  State Strategy:
823  *  o sk_state: holds the CAIF_* protocol state, it's updated by
824  *      caif_ctrl_cb.
825  *  o sock->state: holds the SS_* socket state and is updated by connect and
826  *      disconnect.
827  */
828 static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
829                         int addr_len, int flags)
830 {
831         struct sock *sk = sock->sk;
832         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
833         long timeo;
834         int err;
835         lock_sock(sk);
836
837         err = -EAFNOSUPPORT;
838         if (uaddr->sa_family != AF_CAIF)
839                 goto out;
840
841         err = -ESOCKTNOSUPPORT;
842         if (unlikely(!(sk->sk_type == SOCK_STREAM &&
843                        cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
844                        sk->sk_type != SOCK_SEQPACKET))
845                 goto out;
846         switch (sock->state) {
847         case SS_UNCONNECTED:
848                 /* Normal case, a fresh connect */
849                 caif_assert(sk->sk_state == CAIF_DISCONNECTED);
850                 break;
851         case SS_CONNECTING:
852                 switch (sk->sk_state) {
853                 case CAIF_CONNECTED:
854                         sock->state = SS_CONNECTED;
855                         err = -EISCONN;
856                         goto out;
857                 case CAIF_DISCONNECTED:
858                         /* Reconnect allowed */
859                         break;
860                 case CAIF_CONNECTING:
861                         err = -EALREADY;
862                         if (flags & O_NONBLOCK)
863                                 goto out;
864                         goto wait_connect;
865                 }
866                 break;
867         case SS_CONNECTED:
868                 caif_assert(sk->sk_state == CAIF_CONNECTED ||
869                                 sk->sk_state == CAIF_DISCONNECTED);
870                 if (sk->sk_shutdown & SHUTDOWN_MASK) {
871                         /* Allow re-connect after SHUTDOWN_IND */
872                         caif_disconnect_client(&cf_sk->layer);
873                         break;
874                 }
875                 /* No reconnect on a seqpacket socket */
876                 err = -EISCONN;
877                 goto out;
878         case SS_DISCONNECTING:
879         case SS_FREE:
880                 caif_assert(1); /*Should never happen */
881                 break;
882         }
883         sk->sk_state = CAIF_DISCONNECTED;
884         sock->state = SS_UNCONNECTED;
885         sk_stream_kill_queues(&cf_sk->sk);
886
887         err = -EINVAL;
888         if (addr_len != sizeof(struct sockaddr_caif) ||
889                 !uaddr)
890                 goto out;
891
892         memcpy(&cf_sk->conn_req.sockaddr, uaddr,
893                 sizeof(struct sockaddr_caif));
894
895         /* Move to connecting socket, start sending Connect Requests */
896         sock->state = SS_CONNECTING;
897         sk->sk_state = CAIF_CONNECTING;
898
899         dbfs_atomic_inc(&cnt.num_connect_req);
900         cf_sk->layer.receive = caif_sktrecv_cb;
901         err = caif_connect_client(&cf_sk->conn_req,
902                                 &cf_sk->layer);
903         if (err < 0) {
904                 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
905                 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
906                 goto out;
907         }
908
909         err = -EINPROGRESS;
910 wait_connect:
911
912         if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
913                 goto out;
914
915         timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
916
917         release_sock(sk);
918         err = -ERESTARTSYS;
919         timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
920                         sk->sk_state != CAIF_CONNECTING,
921                         timeo);
922         lock_sock(sk);
923         if (timeo < 0)
924                 goto out; /* -ERESTARTSYS */
925
926         err = -ETIMEDOUT;
927         if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
928                 goto out;
929         if (sk->sk_state != CAIF_CONNECTED) {
930                 sock->state = SS_UNCONNECTED;
931                 err = sock_error(sk);
932                 if (!err)
933                         err = -ECONNREFUSED;
934                 goto out;
935         }
936         sock->state = SS_CONNECTED;
937         err = 0;
938 out:
939         release_sock(sk);
940         return err;
941 }
942
943 /*
944  * caif_release() - Disconnect a CAIF Socket
945  * Copied and modified af_irda.c:irda_release().
946  */
947 static int caif_release(struct socket *sock)
948 {
949         struct sock *sk = sock->sk;
950         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
951         int res = 0;
952
953         if (!sk)
954                 return 0;
955
956         set_tx_flow_off(cf_sk);
957
958         /*
959          * Ensure that packets are not queued after this point in time.
960          * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
961          * this ensures no packets when sock is dead.
962          */
963         spin_lock(&sk->sk_receive_queue.lock);
964         sock_set_flag(sk, SOCK_DEAD);
965         spin_unlock(&sk->sk_receive_queue.lock);
966         sock->sk = NULL;
967
968         dbfs_atomic_inc(&cnt.num_disconnect);
969
970         if (cf_sk->debugfs_socket_dir != NULL)
971                 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
972
973         lock_sock(&(cf_sk->sk));
974         sk->sk_state = CAIF_DISCONNECTED;
975         sk->sk_shutdown = SHUTDOWN_MASK;
976
977         if (cf_sk->sk.sk_socket->state == SS_CONNECTED ||
978                 cf_sk->sk.sk_socket->state == SS_CONNECTING)
979                 res = caif_disconnect_client(&cf_sk->layer);
980
981         cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
982         wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
983
984         sock_orphan(sk);
985         cf_sk->layer.dn = NULL;
986         sk_stream_kill_queues(&cf_sk->sk);
987         release_sock(sk);
988         sock_put(sk);
989         return res;
990 }
991
992 /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
993 static unsigned int caif_poll(struct file *file,
994                                 struct socket *sock, poll_table *wait)
995 {
996         struct sock *sk = sock->sk;
997         unsigned int mask;
998         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
999
1000         sock_poll_wait(file, sk_sleep(sk), wait);
1001         mask = 0;
1002
1003         /* exceptional events? */
1004         if (sk->sk_err)
1005                 mask |= POLLERR;
1006         if (sk->sk_shutdown == SHUTDOWN_MASK)
1007                 mask |= POLLHUP;
1008         if (sk->sk_shutdown & RCV_SHUTDOWN)
1009                 mask |= POLLRDHUP;
1010
1011         /* readable? */
1012         if (!skb_queue_empty(&sk->sk_receive_queue) ||
1013                 (sk->sk_shutdown & RCV_SHUTDOWN))
1014                 mask |= POLLIN | POLLRDNORM;
1015
1016         /*
1017          * we set writable also when the other side has shut down the
1018          * connection. This prevents stuck sockets.
1019          */
1020         if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
1021                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1022
1023         return mask;
1024 }
1025
1026 static const struct proto_ops caif_seqpacket_ops = {
1027         .family = PF_CAIF,
1028         .owner = THIS_MODULE,
1029         .release = caif_release,
1030         .bind = sock_no_bind,
1031         .connect = caif_connect,
1032         .socketpair = sock_no_socketpair,
1033         .accept = sock_no_accept,
1034         .getname = sock_no_getname,
1035         .poll = caif_poll,
1036         .ioctl = sock_no_ioctl,
1037         .listen = sock_no_listen,
1038         .shutdown = sock_no_shutdown,
1039         .setsockopt = setsockopt,
1040         .getsockopt = sock_no_getsockopt,
1041         .sendmsg = caif_seqpkt_sendmsg,
1042         .recvmsg = caif_seqpkt_recvmsg,
1043         .mmap = sock_no_mmap,
1044         .sendpage = sock_no_sendpage,
1045 };
1046
1047 static const struct proto_ops caif_stream_ops = {
1048         .family = PF_CAIF,
1049         .owner = THIS_MODULE,
1050         .release = caif_release,
1051         .bind = sock_no_bind,
1052         .connect = caif_connect,
1053         .socketpair = sock_no_socketpair,
1054         .accept = sock_no_accept,
1055         .getname = sock_no_getname,
1056         .poll = caif_poll,
1057         .ioctl = sock_no_ioctl,
1058         .listen = sock_no_listen,
1059         .shutdown = sock_no_shutdown,
1060         .setsockopt = setsockopt,
1061         .getsockopt = sock_no_getsockopt,
1062         .sendmsg = caif_stream_sendmsg,
1063         .recvmsg = caif_stream_recvmsg,
1064         .mmap = sock_no_mmap,
1065         .sendpage = sock_no_sendpage,
1066 };
1067
1068 /* This function is called when a socket is finally destroyed. */
1069 static void caif_sock_destructor(struct sock *sk)
1070 {
1071         struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
1072         caif_assert(!atomic_read(&sk->sk_wmem_alloc));
1073         caif_assert(sk_unhashed(sk));
1074         caif_assert(!sk->sk_socket);
1075         if (!sock_flag(sk, SOCK_DEAD)) {
1076                 pr_info("Attempt to release alive CAIF socket: %p\n", sk);
1077                 return;
1078         }
1079         sk_stream_kill_queues(&cf_sk->sk);
1080         dbfs_atomic_dec(&cnt.caif_nr_socks);
1081 }
1082
1083 static int caif_create(struct net *net, struct socket *sock, int protocol,
1084                         int kern)
1085 {
1086         struct sock *sk = NULL;
1087         struct caifsock *cf_sk = NULL;
1088         static struct proto prot = {.name = "PF_CAIF",
1089                 .owner = THIS_MODULE,
1090                 .obj_size = sizeof(struct caifsock),
1091         };
1092
1093         if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
1094                 return -EPERM;
1095         /*
1096          * The sock->type specifies the socket type to use.
1097          * The CAIF socket is a packet stream in the sense
1098          * that it is packet based. CAIF trusts the reliability
1099          * of the link, no resending is implemented.
1100          */
1101         if (sock->type == SOCK_SEQPACKET)
1102                 sock->ops = &caif_seqpacket_ops;
1103         else if (sock->type == SOCK_STREAM)
1104                 sock->ops = &caif_stream_ops;
1105         else
1106                 return -ESOCKTNOSUPPORT;
1107
1108         if (protocol < 0 || protocol >= CAIFPROTO_MAX)
1109                 return -EPROTONOSUPPORT;
1110         /*
1111          * Set the socket state to unconnected.  The socket state
1112          * is really not used at all in the net/core or socket.c but the
1113          * initialization makes sure that sock->state is not uninitialized.
1114          */
1115         sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
1116         if (!sk)
1117                 return -ENOMEM;
1118
1119         cf_sk = container_of(sk, struct caifsock, sk);
1120
1121         /* Store the protocol */
1122         sk->sk_protocol = (unsigned char) protocol;
1123
1124         /* Sendbuf dictates the amount of outbound packets not yet sent */
1125         sk->sk_sndbuf = CAIF_DEF_SNDBUF;
1126         sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
1127
1128         /*
1129          * Lock in order to try to stop someone from opening the socket
1130          * too early.
1131          */
1132         lock_sock(&(cf_sk->sk));
1133
1134         /* Initialize the nozero default sock structure data. */
1135         sock_init_data(sock, sk);
1136         sk->sk_destruct = caif_sock_destructor;
1137
1138         mutex_init(&cf_sk->readlock); /* single task reading lock */
1139         cf_sk->layer.ctrlcmd = caif_ctrl_cb;
1140         cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
1141         cf_sk->sk.sk_state = CAIF_DISCONNECTED;
1142
1143         set_tx_flow_off(cf_sk);
1144         set_rx_flow_on(cf_sk);
1145
1146         /* Set default options on configuration */
1147         cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
1148         cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1149         cf_sk->conn_req.protocol = protocol;
1150         /* Increase the number of sockets created. */
1151         dbfs_atomic_inc(&cnt.caif_nr_socks);
1152 #ifdef CONFIG_DEBUG_FS
1153         if (!IS_ERR(debugfsdir)) {
1154                 /* Fill in some information concerning the misc socket. */
1155                 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d",
1156                                 atomic_read(&cnt.caif_nr_socks));
1157
1158                 cf_sk->debugfs_socket_dir =
1159                         debugfs_create_dir(cf_sk->name, debugfsdir);
1160                 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
1161                                 cf_sk->debugfs_socket_dir,
1162                                 (u32 *) &cf_sk->sk.sk_state);
1163                 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
1164                                 cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
1165                 debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
1166                                 cf_sk->debugfs_socket_dir,
1167                                 (u32 *) &cf_sk->sk.sk_rmem_alloc);
1168                 debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
1169                                 cf_sk->debugfs_socket_dir,
1170                                 (u32 *) &cf_sk->sk.sk_wmem_alloc);
1171                 debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
1172                                 cf_sk->debugfs_socket_dir,
1173                                 (u32 *) &cf_sk->layer.id);
1174         }
1175 #endif
1176         release_sock(&cf_sk->sk);
1177         return 0;
1178 }
1179
1180
1181 static struct net_proto_family caif_family_ops = {
1182         .family = PF_CAIF,
1183         .create = caif_create,
1184         .owner = THIS_MODULE,
1185 };
1186
1187 static int af_caif_init(void)
1188 {
1189         int err = sock_register(&caif_family_ops);
1190         if (!err)
1191                 return err;
1192         return 0;
1193 }
1194
1195 static int __init caif_sktinit_module(void)
1196 {
1197 #ifdef CONFIG_DEBUG_FS
1198         debugfsdir = debugfs_create_dir("caif_sk", NULL);
1199         if (!IS_ERR(debugfsdir)) {
1200                 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1201                                 debugfsdir,
1202                                 (u32 *) &cnt.caif_nr_socks);
1203                 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
1204                                 debugfsdir,
1205                                 (u32 *) &cnt.num_connect_req);
1206                 debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
1207                                 debugfsdir,
1208                                 (u32 *) &cnt.num_connect_resp);
1209                 debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
1210                                 debugfsdir,
1211                                 (u32 *) &cnt.num_connect_fail_resp);
1212                 debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
1213                                 debugfsdir,
1214                                 (u32 *) &cnt.num_disconnect);
1215                 debugfs_create_u32("num_remote_shutdown_ind",
1216                                 S_IRUSR | S_IWUSR, debugfsdir,
1217                                 (u32 *) &cnt.num_remote_shutdown_ind);
1218                 debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
1219                                 debugfsdir,
1220                                 (u32 *) &cnt.num_tx_flow_off_ind);
1221                 debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
1222                                 debugfsdir,
1223                                 (u32 *) &cnt.num_tx_flow_on_ind);
1224                 debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
1225                                 debugfsdir,
1226                                 (u32 *) &cnt.num_rx_flow_off);
1227                 debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
1228                                 debugfsdir,
1229                                 (u32 *) &cnt.num_rx_flow_on);
1230         }
1231 #endif
1232         return af_caif_init();
1233 }
1234
1235 static void __exit caif_sktexit_module(void)
1236 {
1237         sock_unregister(PF_CAIF);
1238         if (debugfsdir != NULL)
1239                 debugfs_remove_recursive(debugfsdir);
1240 }
1241 module_init(caif_sktinit_module);
1242 module_exit(caif_sktexit_module);