[AF_IUCV]: remove static declarations from header file.
[safe/jmp/linux-2.6] / net / iucv / af_iucv.c
1 /*
2  *  linux/net/iucv/af_iucv.c
3  *
4  *  IUCV protocol stack for Linux on zSeries
5  *
6  *  Copyright 2006 IBM Corporation
7  *
8  *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
9  */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/poll.h>
21 #include <net/sock.h>
22 #include <asm/ebcdic.h>
23 #include <asm/cpcmd.h>
24 #include <linux/kmod.h>
25
26 #include <net/iucv/iucv.h>
27 #include <net/iucv/af_iucv.h>
28
29 #define CONFIG_IUCV_SOCK_DEBUG 1
30
31 #define IPRMDATA 0x80
32 #define VERSION "1.0"
33
34 static char iucv_userid[80];
35
36 static struct proto_ops iucv_sock_ops;
37
38 static struct proto iucv_proto = {
39         .name           = "AF_IUCV",
40         .owner          = THIS_MODULE,
41         .obj_size       = sizeof(struct iucv_sock),
42 };
43
44 static void iucv_sock_kill(struct sock *sk);
45 static void iucv_sock_close(struct sock *sk);
46
47 /* Call Back functions */
48 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
49 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
50 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
51 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
52                                  u8 ipuser[16]);
53 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
54
55 static struct iucv_sock_list iucv_sk_list = {
56         .lock = RW_LOCK_UNLOCKED,
57         .autobind_name = ATOMIC_INIT(0)
58 };
59
60 static struct iucv_handler af_iucv_handler = {
61         .path_pending     = iucv_callback_connreq,
62         .path_complete    = iucv_callback_connack,
63         .path_severed     = iucv_callback_connrej,
64         .message_pending  = iucv_callback_rx,
65         .message_complete = iucv_callback_txdone
66 };
67
68 static inline void high_nmcpy(unsigned char *dst, char *src)
69 {
70        memcpy(dst, src, 8);
71 }
72
73 static inline void low_nmcpy(unsigned char *dst, char *src)
74 {
75        memcpy(&dst[8], src, 8);
76 }
77
78 /* Timers */
79 static void iucv_sock_timeout(unsigned long arg)
80 {
81         struct sock *sk = (struct sock *)arg;
82
83         bh_lock_sock(sk);
84         sk->sk_err = ETIMEDOUT;
85         sk->sk_state_change(sk);
86         bh_unlock_sock(sk);
87
88         iucv_sock_kill(sk);
89         sock_put(sk);
90 }
91
92 static void iucv_sock_clear_timer(struct sock *sk)
93 {
94         sk_stop_timer(sk, &sk->sk_timer);
95 }
96
97 static void iucv_sock_init_timer(struct sock *sk)
98 {
99         init_timer(&sk->sk_timer);
100         sk->sk_timer.function = iucv_sock_timeout;
101         sk->sk_timer.data = (unsigned long)sk;
102 }
103
104 static struct sock *__iucv_get_sock_by_name(char *nm)
105 {
106         struct sock *sk;
107         struct hlist_node *node;
108
109         sk_for_each(sk, node, &iucv_sk_list.head)
110                 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
111                         return sk;
112
113         return NULL;
114 }
115
116 static void iucv_sock_destruct(struct sock *sk)
117 {
118         skb_queue_purge(&sk->sk_receive_queue);
119         skb_queue_purge(&sk->sk_write_queue);
120 }
121
122 /* Cleanup Listen */
123 static void iucv_sock_cleanup_listen(struct sock *parent)
124 {
125         struct sock *sk;
126
127         /* Close non-accepted connections */
128         while ((sk = iucv_accept_dequeue(parent, NULL))) {
129                 iucv_sock_close(sk);
130                 iucv_sock_kill(sk);
131         }
132
133         parent->sk_state = IUCV_CLOSED;
134         sock_set_flag(parent, SOCK_ZAPPED);
135 }
136
137 /* Kill socket */
138 static void iucv_sock_kill(struct sock *sk)
139 {
140         if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
141                 return;
142
143         iucv_sock_unlink(&iucv_sk_list, sk);
144         sock_set_flag(sk, SOCK_DEAD);
145         sock_put(sk);
146 }
147
148 /* Close an IUCV socket */
149 static void iucv_sock_close(struct sock *sk)
150 {
151         unsigned char user_data[16];
152         struct iucv_sock *iucv = iucv_sk(sk);
153         int err;
154         unsigned long timeo;
155
156         iucv_sock_clear_timer(sk);
157         lock_sock(sk);
158
159         switch (sk->sk_state) {
160         case IUCV_LISTEN:
161                 iucv_sock_cleanup_listen(sk);
162                 break;
163
164         case IUCV_CONNECTED:
165         case IUCV_DISCONN:
166                 err = 0;
167
168                 sk->sk_state = IUCV_CLOSING;
169                 sk->sk_state_change(sk);
170
171                 if (!skb_queue_empty(&iucv->send_skb_q)) {
172                         if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
173                                 timeo = sk->sk_lingertime;
174                         else
175                                 timeo = IUCV_DISCONN_TIMEOUT;
176                         err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
177                 }
178
179                 sk->sk_state = IUCV_CLOSED;
180                 sk->sk_state_change(sk);
181
182                 if (iucv->path) {
183                         low_nmcpy(user_data, iucv->src_name);
184                         high_nmcpy(user_data, iucv->dst_name);
185                         ASCEBC(user_data, sizeof(user_data));
186                         err = iucv_path_sever(iucv->path, user_data);
187                         iucv_path_free(iucv->path);
188                         iucv->path = NULL;
189                 }
190
191                 sk->sk_err = ECONNRESET;
192                 sk->sk_state_change(sk);
193
194                 skb_queue_purge(&iucv->send_skb_q);
195                 skb_queue_purge(&iucv->backlog_skb_q);
196
197                 sock_set_flag(sk, SOCK_ZAPPED);
198                 break;
199
200         default:
201                 sock_set_flag(sk, SOCK_ZAPPED);
202                 break;
203         }
204
205         release_sock(sk);
206         iucv_sock_kill(sk);
207 }
208
209 static void iucv_sock_init(struct sock *sk, struct sock *parent)
210 {
211         if (parent)
212                 sk->sk_type = parent->sk_type;
213 }
214
215 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
216 {
217         struct sock *sk;
218
219         sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1);
220         if (!sk)
221                 return NULL;
222
223         sock_init_data(sock, sk);
224         INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
225         spin_lock_init(&iucv_sk(sk)->accept_q_lock);
226         skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
227         skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
228         iucv_sk(sk)->send_tag = 0;
229
230         sk->sk_destruct = iucv_sock_destruct;
231         sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
232         sk->sk_allocation = GFP_DMA;
233
234         sock_reset_flag(sk, SOCK_ZAPPED);
235
236         sk->sk_protocol = proto;
237         sk->sk_state    = IUCV_OPEN;
238
239         iucv_sock_init_timer(sk);
240
241         iucv_sock_link(&iucv_sk_list, sk);
242         return sk;
243 }
244
245 /* Create an IUCV socket */
246 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
247 {
248         struct sock *sk;
249
250         if (sock->type != SOCK_STREAM)
251                 return -ESOCKTNOSUPPORT;
252
253         sock->state = SS_UNCONNECTED;
254         sock->ops = &iucv_sock_ops;
255
256         sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
257         if (!sk)
258                 return -ENOMEM;
259
260         iucv_sock_init(sk, NULL);
261
262         return 0;
263 }
264
265 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
266 {
267         write_lock_bh(&l->lock);
268         sk_add_node(sk, &l->head);
269         write_unlock_bh(&l->lock);
270 }
271
272 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
273 {
274         write_lock_bh(&l->lock);
275         sk_del_node_init(sk);
276         write_unlock_bh(&l->lock);
277 }
278
279 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
280 {
281         unsigned long flags;
282         struct iucv_sock *par = iucv_sk(parent);
283
284         sock_hold(sk);
285         spin_lock_irqsave(&par->accept_q_lock, flags);
286         list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
287         spin_unlock_irqrestore(&par->accept_q_lock, flags);
288         iucv_sk(sk)->parent = parent;
289         parent->sk_ack_backlog++;
290 }
291
292 void iucv_accept_unlink(struct sock *sk)
293 {
294         unsigned long flags;
295         struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
296
297         spin_lock_irqsave(&par->accept_q_lock, flags);
298         list_del_init(&iucv_sk(sk)->accept_q);
299         spin_unlock_irqrestore(&par->accept_q_lock, flags);
300         iucv_sk(sk)->parent->sk_ack_backlog--;
301         iucv_sk(sk)->parent = NULL;
302         sock_put(sk);
303 }
304
305 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
306 {
307         struct iucv_sock *isk, *n;
308         struct sock *sk;
309
310         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
311                 sk = (struct sock *) isk;
312                 lock_sock(sk);
313
314                 if (sk->sk_state == IUCV_CLOSED) {
315                         iucv_accept_unlink(sk);
316                         release_sock(sk);
317                         continue;
318                 }
319
320                 if (sk->sk_state == IUCV_CONNECTED ||
321                     sk->sk_state == IUCV_SEVERED ||
322                     !newsock) {
323                         iucv_accept_unlink(sk);
324                         if (newsock)
325                                 sock_graft(sk, newsock);
326
327                         if (sk->sk_state == IUCV_SEVERED)
328                                 sk->sk_state = IUCV_DISCONN;
329
330                         release_sock(sk);
331                         return sk;
332                 }
333
334                 release_sock(sk);
335         }
336         return NULL;
337 }
338
339 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
340                          unsigned long timeo)
341 {
342         DECLARE_WAITQUEUE(wait, current);
343         int err = 0;
344
345         add_wait_queue(sk->sk_sleep, &wait);
346         while (sk->sk_state != state && sk->sk_state != state2) {
347                 set_current_state(TASK_INTERRUPTIBLE);
348
349                 if (!timeo) {
350                         err = -EAGAIN;
351                         break;
352                 }
353
354                 if (signal_pending(current)) {
355                         err = sock_intr_errno(timeo);
356                         break;
357                 }
358
359                 release_sock(sk);
360                 timeo = schedule_timeout(timeo);
361                 lock_sock(sk);
362
363                 err = sock_error(sk);
364                 if (err)
365                         break;
366         }
367         set_current_state(TASK_RUNNING);
368         remove_wait_queue(sk->sk_sleep, &wait);
369         return err;
370 }
371
372 /* Bind an unbound socket */
373 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
374                           int addr_len)
375 {
376         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
377         struct sock *sk = sock->sk;
378         struct iucv_sock *iucv;
379         int err;
380
381         /* Verify the input sockaddr */
382         if (!addr || addr->sa_family != AF_IUCV)
383                 return -EINVAL;
384
385         lock_sock(sk);
386         if (sk->sk_state != IUCV_OPEN) {
387                 err = -EBADFD;
388                 goto done;
389         }
390
391         write_lock_bh(&iucv_sk_list.lock);
392
393         iucv = iucv_sk(sk);
394         if (__iucv_get_sock_by_name(sa->siucv_name)) {
395                 err = -EADDRINUSE;
396                 goto done_unlock;
397         }
398         if (iucv->path) {
399                 err = 0;
400                 goto done_unlock;
401         }
402
403         /* Bind the socket */
404         memcpy(iucv->src_name, sa->siucv_name, 8);
405
406         /* Copy the user id */
407         memcpy(iucv->src_user_id, iucv_userid, 8);
408         sk->sk_state = IUCV_BOUND;
409         err = 0;
410
411 done_unlock:
412         /* Release the socket list lock */
413         write_unlock_bh(&iucv_sk_list.lock);
414 done:
415         release_sock(sk);
416         return err;
417 }
418
419 /* Automatically bind an unbound socket */
420 static int iucv_sock_autobind(struct sock *sk)
421 {
422         struct iucv_sock *iucv = iucv_sk(sk);
423         char query_buffer[80];
424         char name[12];
425         int err = 0;
426
427         /* Set the userid and name */
428         cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
429         if (unlikely(err))
430                 return -EPROTO;
431
432         memcpy(iucv->src_user_id, query_buffer, 8);
433
434         write_lock_bh(&iucv_sk_list.lock);
435
436         sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
437         while (__iucv_get_sock_by_name(name)) {
438                 sprintf(name, "%08x",
439                         atomic_inc_return(&iucv_sk_list.autobind_name));
440         }
441
442         write_unlock_bh(&iucv_sk_list.lock);
443
444         memcpy(&iucv->src_name, name, 8);
445
446         return err;
447 }
448
449 /* Connect an unconnected socket */
450 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
451                              int alen, int flags)
452 {
453         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
454         struct sock *sk = sock->sk;
455         struct iucv_sock *iucv;
456         unsigned char user_data[16];
457         int err;
458
459         if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
460                 return -EINVAL;
461
462         if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
463                 return -EBADFD;
464
465         if (sk->sk_type != SOCK_STREAM)
466                 return -EINVAL;
467
468         iucv = iucv_sk(sk);
469
470         if (sk->sk_state == IUCV_OPEN) {
471                 err = iucv_sock_autobind(sk);
472                 if (unlikely(err))
473                         return err;
474         }
475
476         lock_sock(sk);
477
478         /* Set the destination information */
479         memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
480         memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
481
482         high_nmcpy(user_data, sa->siucv_name);
483         low_nmcpy(user_data, iucv_sk(sk)->src_name);
484         ASCEBC(user_data, sizeof(user_data));
485
486         iucv = iucv_sk(sk);
487         /* Create path. */
488         iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
489                                      IPRMDATA, GFP_KERNEL);
490         err = iucv_path_connect(iucv->path, &af_iucv_handler,
491                                 sa->siucv_user_id, NULL, user_data, sk);
492         if (err) {
493                 iucv_path_free(iucv->path);
494                 iucv->path = NULL;
495                 err = -ECONNREFUSED;
496                 goto done;
497         }
498
499         if (sk->sk_state != IUCV_CONNECTED) {
500                 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
501                                 sock_sndtimeo(sk, flags & O_NONBLOCK));
502         }
503
504         if (sk->sk_state == IUCV_DISCONN) {
505                 release_sock(sk);
506                 return -ECONNREFUSED;
507         }
508 done:
509         release_sock(sk);
510         return err;
511 }
512
513 /* Move a socket into listening state. */
514 static int iucv_sock_listen(struct socket *sock, int backlog)
515 {
516         struct sock *sk = sock->sk;
517         int err;
518
519         lock_sock(sk);
520
521         err = -EINVAL;
522         if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
523                 goto done;
524
525         sk->sk_max_ack_backlog = backlog;
526         sk->sk_ack_backlog = 0;
527         sk->sk_state = IUCV_LISTEN;
528         err = 0;
529
530 done:
531         release_sock(sk);
532         return err;
533 }
534
535 /* Accept a pending connection */
536 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
537                             int flags)
538 {
539         DECLARE_WAITQUEUE(wait, current);
540         struct sock *sk = sock->sk, *nsk;
541         long timeo;
542         int err = 0;
543
544         lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
545
546         if (sk->sk_state != IUCV_LISTEN) {
547                 err = -EBADFD;
548                 goto done;
549         }
550
551         timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
552
553         /* Wait for an incoming connection */
554         add_wait_queue_exclusive(sk->sk_sleep, &wait);
555         while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
556                 set_current_state(TASK_INTERRUPTIBLE);
557                 if (!timeo) {
558                         err = -EAGAIN;
559                         break;
560                 }
561
562                 release_sock(sk);
563                 timeo = schedule_timeout(timeo);
564                 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
565
566                 if (sk->sk_state != IUCV_LISTEN) {
567                         err = -EBADFD;
568                         break;
569                 }
570
571                 if (signal_pending(current)) {
572                         err = sock_intr_errno(timeo);
573                         break;
574                 }
575         }
576
577         set_current_state(TASK_RUNNING);
578         remove_wait_queue(sk->sk_sleep, &wait);
579
580         if (err)
581                 goto done;
582
583         newsock->state = SS_CONNECTED;
584
585 done:
586         release_sock(sk);
587         return err;
588 }
589
590 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
591                              int *len, int peer)
592 {
593         struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
594         struct sock *sk = sock->sk;
595
596         addr->sa_family = AF_IUCV;
597         *len = sizeof(struct sockaddr_iucv);
598
599         if (peer) {
600                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
601                 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
602         } else {
603                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
604                 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
605         }
606         memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
607         memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
608         memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
609
610         return 0;
611 }
612
613 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
614                              struct msghdr *msg, size_t len)
615 {
616         struct sock *sk = sock->sk;
617         struct iucv_sock *iucv = iucv_sk(sk);
618         struct sk_buff *skb;
619         struct iucv_message txmsg;
620         int err;
621
622         err = sock_error(sk);
623         if (err)
624                 return err;
625
626         if (msg->msg_flags & MSG_OOB)
627                 return -EOPNOTSUPP;
628
629         lock_sock(sk);
630
631         if (sk->sk_shutdown & SEND_SHUTDOWN) {
632                 err = -EPIPE;
633                 goto out;
634         }
635
636         if (sk->sk_state == IUCV_CONNECTED) {
637                 if (!(skb = sock_alloc_send_skb(sk, len,
638                                                 msg->msg_flags & MSG_DONTWAIT,
639                                                 &err)))
640                         goto out;
641
642                 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
643                         err = -EFAULT;
644                         goto fail;
645                 }
646
647                 txmsg.class = 0;
648                 txmsg.tag = iucv->send_tag++;
649                 memcpy(skb->cb, &txmsg.tag, 4);
650                 skb_queue_tail(&iucv->send_skb_q, skb);
651                 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
652                                         (void *) skb->data, skb->len);
653                 if (err) {
654                         if (err == 3)
655                                 printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
656                         skb_unlink(skb, &iucv->send_skb_q);
657                         err = -EPIPE;
658                         goto fail;
659                 }
660
661         } else {
662                 err = -ENOTCONN;
663                 goto out;
664         }
665
666         release_sock(sk);
667         return len;
668
669 fail:
670         kfree_skb(skb);
671 out:
672         release_sock(sk);
673         return err;
674 }
675
676 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
677                              struct msghdr *msg, size_t len, int flags)
678 {
679         int noblock = flags & MSG_DONTWAIT;
680         struct sock *sk = sock->sk;
681         struct iucv_sock *iucv = iucv_sk(sk);
682         int target, copied = 0;
683         struct sk_buff *skb, *rskb, *cskb;
684         int err = 0;
685
686         if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
687                 skb_queue_empty(&iucv->backlog_skb_q) &&
688                 skb_queue_empty(&sk->sk_receive_queue))
689                 return 0;
690
691         if (flags & (MSG_OOB))
692                 return -EOPNOTSUPP;
693
694         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
695
696         skb = skb_recv_datagram(sk, flags, noblock, &err);
697         if (!skb) {
698                 if (sk->sk_shutdown & RCV_SHUTDOWN)
699                         return 0;
700                 return err;
701         }
702
703         copied = min_t(unsigned int, skb->len, len);
704
705         cskb = skb;
706         if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
707                 skb_queue_head(&sk->sk_receive_queue, skb);
708                 if (copied == 0)
709                         return -EFAULT;
710                 goto done;
711         }
712
713         len -= copied;
714
715         /* Mark read part of skb as used */
716         if (!(flags & MSG_PEEK)) {
717                 skb_pull(skb, copied);
718
719                 if (skb->len) {
720                         skb_queue_head(&sk->sk_receive_queue, skb);
721                         goto done;
722                 }
723
724                 kfree_skb(skb);
725
726                 /* Queue backlog skbs */
727                 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
728                 while (rskb) {
729                         if (sock_queue_rcv_skb(sk, rskb)) {
730                                 skb_queue_head(&iucv_sk(sk)->backlog_skb_q,
731                                                 rskb);
732                                 break;
733                         } else {
734                                 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
735                         }
736                 }
737         } else
738                 skb_queue_head(&sk->sk_receive_queue, skb);
739
740 done:
741         return err ? : copied;
742 }
743
744 static inline unsigned int iucv_accept_poll(struct sock *parent)
745 {
746         struct iucv_sock *isk, *n;
747         struct sock *sk;
748
749         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
750                 sk = (struct sock *) isk;
751
752                 if (sk->sk_state == IUCV_CONNECTED)
753                         return POLLIN | POLLRDNORM;
754         }
755
756         return 0;
757 }
758
759 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
760                             poll_table *wait)
761 {
762         struct sock *sk = sock->sk;
763         unsigned int mask = 0;
764
765         poll_wait(file, sk->sk_sleep, wait);
766
767         if (sk->sk_state == IUCV_LISTEN)
768                 return iucv_accept_poll(sk);
769
770         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
771                 mask |= POLLERR;
772
773         if (sk->sk_shutdown & RCV_SHUTDOWN)
774                 mask |= POLLRDHUP;
775
776         if (sk->sk_shutdown == SHUTDOWN_MASK)
777                 mask |= POLLHUP;
778
779         if (!skb_queue_empty(&sk->sk_receive_queue) ||
780             (sk->sk_shutdown & RCV_SHUTDOWN))
781                 mask |= POLLIN | POLLRDNORM;
782
783         if (sk->sk_state == IUCV_CLOSED)
784                 mask |= POLLHUP;
785
786         if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
787                 mask |= POLLIN;
788
789         if (sock_writeable(sk))
790                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
791         else
792                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
793
794         return mask;
795 }
796
797 static int iucv_sock_shutdown(struct socket *sock, int how)
798 {
799         struct sock *sk = sock->sk;
800         struct iucv_sock *iucv = iucv_sk(sk);
801         struct iucv_message txmsg;
802         int err = 0;
803         u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
804
805         how++;
806
807         if ((how & ~SHUTDOWN_MASK) || !how)
808                 return -EINVAL;
809
810         lock_sock(sk);
811         switch (sk->sk_state) {
812         case IUCV_CLOSED:
813                 err = -ENOTCONN;
814                 goto fail;
815
816         default:
817                 sk->sk_shutdown |= how;
818                 break;
819         }
820
821         if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
822                 txmsg.class = 0;
823                 txmsg.tag = 0;
824                 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
825                                         (void *) prmmsg, 8);
826                 if (err) {
827                         switch (err) {
828                         case 1:
829                                 err = -ENOTCONN;
830                                 break;
831                         case 2:
832                                 err = -ECONNRESET;
833                                 break;
834                         default:
835                                 err = -ENOTCONN;
836                                 break;
837                         }
838                 }
839         }
840
841         if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
842                 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
843                 if (err)
844                         err = -ENOTCONN;
845
846                 skb_queue_purge(&sk->sk_receive_queue);
847         }
848
849         /* Wake up anyone sleeping in poll */
850         sk->sk_state_change(sk);
851
852 fail:
853         release_sock(sk);
854         return err;
855 }
856
857 static int iucv_sock_release(struct socket *sock)
858 {
859         struct sock *sk = sock->sk;
860         int err = 0;
861
862         if (!sk)
863                 return 0;
864
865         iucv_sock_close(sk);
866
867         /* Unregister with IUCV base support */
868         if (iucv_sk(sk)->path) {
869                 iucv_path_sever(iucv_sk(sk)->path, NULL);
870                 iucv_path_free(iucv_sk(sk)->path);
871                 iucv_sk(sk)->path = NULL;
872         }
873
874         sock_orphan(sk);
875         iucv_sock_kill(sk);
876         return err;
877 }
878
879 /* Callback wrappers - called from iucv base support */
880 static int iucv_callback_connreq(struct iucv_path *path,
881                                  u8 ipvmid[8], u8 ipuser[16])
882 {
883         unsigned char user_data[16];
884         unsigned char nuser_data[16];
885         unsigned char src_name[8];
886         struct hlist_node *node;
887         struct sock *sk, *nsk;
888         struct iucv_sock *iucv, *niucv;
889         int err;
890
891         memcpy(src_name, ipuser, 8);
892         EBCASC(src_name, 8);
893         /* Find out if this path belongs to af_iucv. */
894         read_lock(&iucv_sk_list.lock);
895         iucv = NULL;
896         sk = NULL;
897         sk_for_each(sk, node, &iucv_sk_list.head)
898                 if (sk->sk_state == IUCV_LISTEN &&
899                     !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
900                         /*
901                          * Found a listening socket with
902                          * src_name == ipuser[0-7].
903                          */
904                         iucv = iucv_sk(sk);
905                         break;
906                 }
907         read_unlock(&iucv_sk_list.lock);
908         if (!iucv)
909                 /* No socket found, not one of our paths. */
910                 return -EINVAL;
911
912         bh_lock_sock(sk);
913
914         /* Check if parent socket is listening */
915         low_nmcpy(user_data, iucv->src_name);
916         high_nmcpy(user_data, iucv->dst_name);
917         ASCEBC(user_data, sizeof(user_data));
918         if (sk->sk_state != IUCV_LISTEN) {
919                 err = iucv_path_sever(path, user_data);
920                 goto fail;
921         }
922
923         /* Check for backlog size */
924         if (sk_acceptq_is_full(sk)) {
925                 err = iucv_path_sever(path, user_data);
926                 goto fail;
927         }
928
929         /* Create the new socket */
930         nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
931         if (!nsk) {
932                 err = iucv_path_sever(path, user_data);
933                 goto fail;
934         }
935
936         niucv = iucv_sk(nsk);
937         iucv_sock_init(nsk, sk);
938
939         /* Set the new iucv_sock */
940         memcpy(niucv->dst_name, ipuser + 8, 8);
941         EBCASC(niucv->dst_name, 8);
942         memcpy(niucv->dst_user_id, ipvmid, 8);
943         memcpy(niucv->src_name, iucv->src_name, 8);
944         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
945         niucv->path = path;
946
947         /* Call iucv_accept */
948         high_nmcpy(nuser_data, ipuser + 8);
949         memcpy(nuser_data + 8, niucv->src_name, 8);
950         ASCEBC(nuser_data + 8, 8);
951
952         path->msglim = IUCV_QUEUELEN_DEFAULT;
953         err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
954         if (err) {
955                 err = iucv_path_sever(path, user_data);
956                 goto fail;
957         }
958
959         iucv_accept_enqueue(sk, nsk);
960
961         /* Wake up accept */
962         nsk->sk_state = IUCV_CONNECTED;
963         sk->sk_data_ready(sk, 1);
964         err = 0;
965 fail:
966         bh_unlock_sock(sk);
967         return 0;
968 }
969
970 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
971 {
972         struct sock *sk = path->private;
973
974         sk->sk_state = IUCV_CONNECTED;
975         sk->sk_state_change(sk);
976 }
977
978 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len,
979                              struct sk_buff_head *fragmented_skb_q)
980 {
981         int dataleft, size, copied = 0;
982         struct sk_buff *nskb;
983
984         dataleft = len;
985         while (dataleft) {
986                 if (dataleft >= sk->sk_rcvbuf / 4)
987                         size = sk->sk_rcvbuf / 4;
988                 else
989                         size = dataleft;
990
991                 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
992                 if (!nskb)
993                         return -ENOMEM;
994
995                 memcpy(nskb->data, skb->data + copied, size);
996                 copied += size;
997                 dataleft -= size;
998
999                 skb_reset_transport_header(nskb);
1000                 skb_reset_network_header(nskb);
1001                 nskb->len = size;
1002
1003                 skb_queue_tail(fragmented_skb_q, nskb);
1004         }
1005
1006         return 0;
1007 }
1008
1009 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1010 {
1011         struct sock *sk = path->private;
1012         struct iucv_sock *iucv = iucv_sk(sk);
1013         struct sk_buff *skb, *fskb;
1014         struct sk_buff_head fragmented_skb_q;
1015         int rc;
1016
1017         skb_queue_head_init(&fragmented_skb_q);
1018
1019         if (sk->sk_shutdown & RCV_SHUTDOWN)
1020                 return;
1021
1022         skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1023         if (!skb) {
1024                 iucv_path_sever(path, NULL);
1025                 return;
1026         }
1027
1028         if (msg->flags & IPRMDATA) {
1029                 skb->data = NULL;
1030                 skb->len = 0;
1031         } else {
1032                 rc = iucv_message_receive(path, msg, 0, skb->data,
1033                                           msg->length, NULL);
1034                 if (rc) {
1035                         kfree_skb(skb);
1036                         return;
1037                 }
1038                 if (skb->truesize >= sk->sk_rcvbuf / 4) {
1039                         rc = iucv_fragment_skb(sk, skb, msg->length,
1040                                                &fragmented_skb_q);
1041                         kfree_skb(skb);
1042                         skb = NULL;
1043                         if (rc) {
1044                                 iucv_path_sever(path, NULL);
1045                                 return;
1046                         }
1047                 } else {
1048                         skb_reset_transport_header(skb);
1049                         skb_reset_network_header(skb);
1050                         skb->len = msg->length;
1051                 }
1052         }
1053         /* Queue the fragmented skb */
1054         fskb = skb_dequeue(&fragmented_skb_q);
1055         while (fskb) {
1056                 if (!skb_queue_empty(&iucv->backlog_skb_q))
1057                         skb_queue_tail(&iucv->backlog_skb_q, fskb);
1058                 else if (sock_queue_rcv_skb(sk, fskb))
1059                         skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb);
1060                 fskb = skb_dequeue(&fragmented_skb_q);
1061         }
1062
1063         /* Queue the original skb if it exists (was not fragmented) */
1064         if (skb) {
1065                 if (!skb_queue_empty(&iucv->backlog_skb_q))
1066                         skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1067                 else if (sock_queue_rcv_skb(sk, skb))
1068                         skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1069         }
1070
1071 }
1072
1073 static void iucv_callback_txdone(struct iucv_path *path,
1074                                  struct iucv_message *msg)
1075 {
1076         struct sock *sk = path->private;
1077         struct sk_buff *this;
1078         struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1079         struct sk_buff *list_skb = list->next;
1080         unsigned long flags;
1081
1082         if (list_skb) {
1083                 spin_lock_irqsave(&list->lock, flags);
1084
1085                 do {
1086                         this = list_skb;
1087                         list_skb = list_skb->next;
1088                 } while (memcmp(&msg->tag, this->cb, 4) && list_skb);
1089
1090                 spin_unlock_irqrestore(&list->lock, flags);
1091
1092                 skb_unlink(this, &iucv_sk(sk)->send_skb_q);
1093                 kfree_skb(this);
1094         }
1095
1096         if (sk->sk_state == IUCV_CLOSING) {
1097                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1098                         sk->sk_state = IUCV_CLOSED;
1099                         sk->sk_state_change(sk);
1100                 }
1101         }
1102
1103 }
1104
1105 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1106 {
1107         struct sock *sk = path->private;
1108
1109         if (!list_empty(&iucv_sk(sk)->accept_q))
1110                 sk->sk_state = IUCV_SEVERED;
1111         else
1112                 sk->sk_state = IUCV_DISCONN;
1113
1114         sk->sk_state_change(sk);
1115 }
1116
1117 static struct proto_ops iucv_sock_ops = {
1118         .family         = PF_IUCV,
1119         .owner          = THIS_MODULE,
1120         .release        = iucv_sock_release,
1121         .bind           = iucv_sock_bind,
1122         .connect        = iucv_sock_connect,
1123         .listen         = iucv_sock_listen,
1124         .accept         = iucv_sock_accept,
1125         .getname        = iucv_sock_getname,
1126         .sendmsg        = iucv_sock_sendmsg,
1127         .recvmsg        = iucv_sock_recvmsg,
1128         .poll           = iucv_sock_poll,
1129         .ioctl          = sock_no_ioctl,
1130         .mmap           = sock_no_mmap,
1131         .socketpair     = sock_no_socketpair,
1132         .shutdown       = iucv_sock_shutdown,
1133         .setsockopt     = sock_no_setsockopt,
1134         .getsockopt     = sock_no_getsockopt
1135 };
1136
1137 static struct net_proto_family iucv_sock_family_ops = {
1138         .family = AF_IUCV,
1139         .owner  = THIS_MODULE,
1140         .create = iucv_sock_create,
1141 };
1142
1143 static int __init afiucv_init(void)
1144 {
1145         int err;
1146
1147         if (!MACHINE_IS_VM) {
1148                 printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
1149                 err = -EPROTONOSUPPORT;
1150                 goto out;
1151         }
1152         cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1153         if (unlikely(err)) {
1154                 printk(KERN_ERR "AF_IUCV needs the VM userid\n");
1155                 err = -EPROTONOSUPPORT;
1156                 goto out;
1157         }
1158
1159         err = iucv_register(&af_iucv_handler, 0);
1160         if (err)
1161                 goto out;
1162         err = proto_register(&iucv_proto, 0);
1163         if (err)
1164                 goto out_iucv;
1165         err = sock_register(&iucv_sock_family_ops);
1166         if (err)
1167                 goto out_proto;
1168         printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
1169         return 0;
1170
1171 out_proto:
1172         proto_unregister(&iucv_proto);
1173 out_iucv:
1174         iucv_unregister(&af_iucv_handler, 0);
1175 out:
1176         return err;
1177 }
1178
1179 static void __exit afiucv_exit(void)
1180 {
1181         sock_unregister(PF_IUCV);
1182         proto_unregister(&iucv_proto);
1183         iucv_unregister(&af_iucv_handler, 0);
1184
1185         printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
1186 }
1187
1188 module_init(afiucv_init);
1189 module_exit(afiucv_exit);
1190
1191 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1192 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1193 MODULE_VERSION(VERSION);
1194 MODULE_LICENSE("GPL");
1195 MODULE_ALIAS_NETPROTO(PF_IUCV);