af_iucv: Support data in IUCV msg parameter lists (IPRMDATA)
[safe/jmp/linux-2.6] / net / iucv / af_iucv.c
1 /*
2  *  linux/net/iucv/af_iucv.c
3  *
4  *  IUCV protocol stack for Linux on zSeries
5  *
6  *  Copyright 2006 IBM Corporation
7  *
8  *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
9  */
10
11 #define KMSG_COMPONENT "af_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
24 #include <net/sock.h>
25 #include <asm/ebcdic.h>
26 #include <asm/cpcmd.h>
27 #include <linux/kmod.h>
28
29 #include <net/iucv/iucv.h>
30 #include <net/iucv/af_iucv.h>
31
32 #define VERSION "1.1"
33
34 static char iucv_userid[80];
35
36 static struct proto_ops iucv_sock_ops;
37
38 static struct proto iucv_proto = {
39         .name           = "AF_IUCV",
40         .owner          = THIS_MODULE,
41         .obj_size       = sizeof(struct iucv_sock),
42 };
43
44 /* special AF_IUCV IPRM messages */
45 static const u8 iprm_shutdown[8] =
46         {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
47
48 static void iucv_sock_kill(struct sock *sk);
49 static void iucv_sock_close(struct sock *sk);
50
51 /* Call Back functions */
52 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
53 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
54 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
55 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
56                                  u8 ipuser[16]);
57 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
58 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
59
60 static struct iucv_sock_list iucv_sk_list = {
61         .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
62         .autobind_name = ATOMIC_INIT(0)
63 };
64
65 static struct iucv_handler af_iucv_handler = {
66         .path_pending     = iucv_callback_connreq,
67         .path_complete    = iucv_callback_connack,
68         .path_severed     = iucv_callback_connrej,
69         .message_pending  = iucv_callback_rx,
70         .message_complete = iucv_callback_txdone,
71         .path_quiesced    = iucv_callback_shutdown,
72 };
73
74 static inline void high_nmcpy(unsigned char *dst, char *src)
75 {
76        memcpy(dst, src, 8);
77 }
78
79 static inline void low_nmcpy(unsigned char *dst, char *src)
80 {
81        memcpy(&dst[8], src, 8);
82 }
83
84 /**
85  * iucv_msg_length() - Returns the length of an iucv message.
86  * @msg:        Pointer to struct iucv_message, MUST NOT be NULL
87  *
88  * The function returns the length of the specified iucv message @msg of data
89  * stored in a buffer and of data stored in the parameter list (PRMDATA).
90  *
91  * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
92  * data:
93  *      PRMDATA[0..6]   socket data (max 7 bytes);
94  *      PRMDATA[7]      socket data length value (len is 0xff - PRMDATA[7])
95  *
96  * The socket data length is computed by substracting the socket data length
97  * value from 0xFF.
98  * If the socket data len is greater 7, then PRMDATA can be used for special
99  * notifications (see iucv_sock_shutdown); and further,
100  * if the socket data len is > 7, the function returns 8.
101  *
102  * Use this function to allocate socket buffers to store iucv message data.
103  */
104 static inline size_t iucv_msg_length(struct iucv_message *msg)
105 {
106         size_t datalen;
107
108         if (msg->flags & IUCV_IPRMDATA) {
109                 datalen = 0xff - msg->rmmsg[7];
110                 return (datalen < 8) ? datalen : 8;
111         }
112         return msg->length;
113 }
114
115 /* Timers */
116 static void iucv_sock_timeout(unsigned long arg)
117 {
118         struct sock *sk = (struct sock *)arg;
119
120         bh_lock_sock(sk);
121         sk->sk_err = ETIMEDOUT;
122         sk->sk_state_change(sk);
123         bh_unlock_sock(sk);
124
125         iucv_sock_kill(sk);
126         sock_put(sk);
127 }
128
129 static void iucv_sock_clear_timer(struct sock *sk)
130 {
131         sk_stop_timer(sk, &sk->sk_timer);
132 }
133
134 static struct sock *__iucv_get_sock_by_name(char *nm)
135 {
136         struct sock *sk;
137         struct hlist_node *node;
138
139         sk_for_each(sk, node, &iucv_sk_list.head)
140                 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
141                         return sk;
142
143         return NULL;
144 }
145
146 static void iucv_sock_destruct(struct sock *sk)
147 {
148         skb_queue_purge(&sk->sk_receive_queue);
149         skb_queue_purge(&sk->sk_write_queue);
150 }
151
152 /* Cleanup Listen */
153 static void iucv_sock_cleanup_listen(struct sock *parent)
154 {
155         struct sock *sk;
156
157         /* Close non-accepted connections */
158         while ((sk = iucv_accept_dequeue(parent, NULL))) {
159                 iucv_sock_close(sk);
160                 iucv_sock_kill(sk);
161         }
162
163         parent->sk_state = IUCV_CLOSED;
164         sock_set_flag(parent, SOCK_ZAPPED);
165 }
166
167 /* Kill socket */
168 static void iucv_sock_kill(struct sock *sk)
169 {
170         if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
171                 return;
172
173         iucv_sock_unlink(&iucv_sk_list, sk);
174         sock_set_flag(sk, SOCK_DEAD);
175         sock_put(sk);
176 }
177
178 /* Close an IUCV socket */
179 static void iucv_sock_close(struct sock *sk)
180 {
181         unsigned char user_data[16];
182         struct iucv_sock *iucv = iucv_sk(sk);
183         int err;
184         unsigned long timeo;
185
186         iucv_sock_clear_timer(sk);
187         lock_sock(sk);
188
189         switch (sk->sk_state) {
190         case IUCV_LISTEN:
191                 iucv_sock_cleanup_listen(sk);
192                 break;
193
194         case IUCV_CONNECTED:
195         case IUCV_DISCONN:
196                 err = 0;
197
198                 sk->sk_state = IUCV_CLOSING;
199                 sk->sk_state_change(sk);
200
201                 if (!skb_queue_empty(&iucv->send_skb_q)) {
202                         if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
203                                 timeo = sk->sk_lingertime;
204                         else
205                                 timeo = IUCV_DISCONN_TIMEOUT;
206                         err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
207                 }
208
209                 sk->sk_state = IUCV_CLOSED;
210                 sk->sk_state_change(sk);
211
212                 if (iucv->path) {
213                         low_nmcpy(user_data, iucv->src_name);
214                         high_nmcpy(user_data, iucv->dst_name);
215                         ASCEBC(user_data, sizeof(user_data));
216                         err = iucv_path_sever(iucv->path, user_data);
217                         iucv_path_free(iucv->path);
218                         iucv->path = NULL;
219                 }
220
221                 sk->sk_err = ECONNRESET;
222                 sk->sk_state_change(sk);
223
224                 skb_queue_purge(&iucv->send_skb_q);
225                 skb_queue_purge(&iucv->backlog_skb_q);
226
227                 sock_set_flag(sk, SOCK_ZAPPED);
228                 break;
229
230         default:
231                 sock_set_flag(sk, SOCK_ZAPPED);
232                 break;
233         }
234
235         release_sock(sk);
236         iucv_sock_kill(sk);
237 }
238
239 static void iucv_sock_init(struct sock *sk, struct sock *parent)
240 {
241         if (parent)
242                 sk->sk_type = parent->sk_type;
243 }
244
245 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
246 {
247         struct sock *sk;
248
249         sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
250         if (!sk)
251                 return NULL;
252
253         sock_init_data(sock, sk);
254         INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
255         spin_lock_init(&iucv_sk(sk)->accept_q_lock);
256         skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
257         INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
258         spin_lock_init(&iucv_sk(sk)->message_q.lock);
259         skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
260         iucv_sk(sk)->send_tag = 0;
261         iucv_sk(sk)->flags = 0;
262
263         sk->sk_destruct = iucv_sock_destruct;
264         sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
265         sk->sk_allocation = GFP_DMA;
266
267         sock_reset_flag(sk, SOCK_ZAPPED);
268
269         sk->sk_protocol = proto;
270         sk->sk_state    = IUCV_OPEN;
271
272         setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
273
274         iucv_sock_link(&iucv_sk_list, sk);
275         return sk;
276 }
277
278 /* Create an IUCV socket */
279 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
280 {
281         struct sock *sk;
282
283         if (sock->type != SOCK_STREAM)
284                 return -ESOCKTNOSUPPORT;
285
286         sock->state = SS_UNCONNECTED;
287         sock->ops = &iucv_sock_ops;
288
289         sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
290         if (!sk)
291                 return -ENOMEM;
292
293         iucv_sock_init(sk, NULL);
294
295         return 0;
296 }
297
298 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
299 {
300         write_lock_bh(&l->lock);
301         sk_add_node(sk, &l->head);
302         write_unlock_bh(&l->lock);
303 }
304
305 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
306 {
307         write_lock_bh(&l->lock);
308         sk_del_node_init(sk);
309         write_unlock_bh(&l->lock);
310 }
311
312 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
313 {
314         unsigned long flags;
315         struct iucv_sock *par = iucv_sk(parent);
316
317         sock_hold(sk);
318         spin_lock_irqsave(&par->accept_q_lock, flags);
319         list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
320         spin_unlock_irqrestore(&par->accept_q_lock, flags);
321         iucv_sk(sk)->parent = parent;
322         parent->sk_ack_backlog++;
323 }
324
325 void iucv_accept_unlink(struct sock *sk)
326 {
327         unsigned long flags;
328         struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
329
330         spin_lock_irqsave(&par->accept_q_lock, flags);
331         list_del_init(&iucv_sk(sk)->accept_q);
332         spin_unlock_irqrestore(&par->accept_q_lock, flags);
333         iucv_sk(sk)->parent->sk_ack_backlog--;
334         iucv_sk(sk)->parent = NULL;
335         sock_put(sk);
336 }
337
338 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
339 {
340         struct iucv_sock *isk, *n;
341         struct sock *sk;
342
343         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
344                 sk = (struct sock *) isk;
345                 lock_sock(sk);
346
347                 if (sk->sk_state == IUCV_CLOSED) {
348                         iucv_accept_unlink(sk);
349                         release_sock(sk);
350                         continue;
351                 }
352
353                 if (sk->sk_state == IUCV_CONNECTED ||
354                     sk->sk_state == IUCV_SEVERED ||
355                     !newsock) {
356                         iucv_accept_unlink(sk);
357                         if (newsock)
358                                 sock_graft(sk, newsock);
359
360                         if (sk->sk_state == IUCV_SEVERED)
361                                 sk->sk_state = IUCV_DISCONN;
362
363                         release_sock(sk);
364                         return sk;
365                 }
366
367                 release_sock(sk);
368         }
369         return NULL;
370 }
371
372 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
373                          unsigned long timeo)
374 {
375         DECLARE_WAITQUEUE(wait, current);
376         int err = 0;
377
378         add_wait_queue(sk->sk_sleep, &wait);
379         while (sk->sk_state != state && sk->sk_state != state2) {
380                 set_current_state(TASK_INTERRUPTIBLE);
381
382                 if (!timeo) {
383                         err = -EAGAIN;
384                         break;
385                 }
386
387                 if (signal_pending(current)) {
388                         err = sock_intr_errno(timeo);
389                         break;
390                 }
391
392                 release_sock(sk);
393                 timeo = schedule_timeout(timeo);
394                 lock_sock(sk);
395
396                 err = sock_error(sk);
397                 if (err)
398                         break;
399         }
400         set_current_state(TASK_RUNNING);
401         remove_wait_queue(sk->sk_sleep, &wait);
402         return err;
403 }
404
405 /* Bind an unbound socket */
406 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
407                           int addr_len)
408 {
409         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
410         struct sock *sk = sock->sk;
411         struct iucv_sock *iucv;
412         int err;
413
414         /* Verify the input sockaddr */
415         if (!addr || addr->sa_family != AF_IUCV)
416                 return -EINVAL;
417
418         lock_sock(sk);
419         if (sk->sk_state != IUCV_OPEN) {
420                 err = -EBADFD;
421                 goto done;
422         }
423
424         write_lock_bh(&iucv_sk_list.lock);
425
426         iucv = iucv_sk(sk);
427         if (__iucv_get_sock_by_name(sa->siucv_name)) {
428                 err = -EADDRINUSE;
429                 goto done_unlock;
430         }
431         if (iucv->path) {
432                 err = 0;
433                 goto done_unlock;
434         }
435
436         /* Bind the socket */
437         memcpy(iucv->src_name, sa->siucv_name, 8);
438
439         /* Copy the user id */
440         memcpy(iucv->src_user_id, iucv_userid, 8);
441         sk->sk_state = IUCV_BOUND;
442         err = 0;
443
444 done_unlock:
445         /* Release the socket list lock */
446         write_unlock_bh(&iucv_sk_list.lock);
447 done:
448         release_sock(sk);
449         return err;
450 }
451
452 /* Automatically bind an unbound socket */
453 static int iucv_sock_autobind(struct sock *sk)
454 {
455         struct iucv_sock *iucv = iucv_sk(sk);
456         char query_buffer[80];
457         char name[12];
458         int err = 0;
459
460         /* Set the userid and name */
461         cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
462         if (unlikely(err))
463                 return -EPROTO;
464
465         memcpy(iucv->src_user_id, query_buffer, 8);
466
467         write_lock_bh(&iucv_sk_list.lock);
468
469         sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
470         while (__iucv_get_sock_by_name(name)) {
471                 sprintf(name, "%08x",
472                         atomic_inc_return(&iucv_sk_list.autobind_name));
473         }
474
475         write_unlock_bh(&iucv_sk_list.lock);
476
477         memcpy(&iucv->src_name, name, 8);
478
479         return err;
480 }
481
482 /* Connect an unconnected socket */
483 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
484                              int alen, int flags)
485 {
486         struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
487         struct sock *sk = sock->sk;
488         struct iucv_sock *iucv;
489         unsigned char user_data[16];
490         int err;
491
492         if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
493                 return -EINVAL;
494
495         if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
496                 return -EBADFD;
497
498         if (sk->sk_type != SOCK_STREAM)
499                 return -EINVAL;
500
501         iucv = iucv_sk(sk);
502
503         if (sk->sk_state == IUCV_OPEN) {
504                 err = iucv_sock_autobind(sk);
505                 if (unlikely(err))
506                         return err;
507         }
508
509         lock_sock(sk);
510
511         /* Set the destination information */
512         memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
513         memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
514
515         high_nmcpy(user_data, sa->siucv_name);
516         low_nmcpy(user_data, iucv_sk(sk)->src_name);
517         ASCEBC(user_data, sizeof(user_data));
518
519         iucv = iucv_sk(sk);
520         /* Create path. */
521         iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
522                                      IUCV_IPRMDATA, GFP_KERNEL);
523         if (!iucv->path) {
524                 err = -ENOMEM;
525                 goto done;
526         }
527         err = iucv_path_connect(iucv->path, &af_iucv_handler,
528                                 sa->siucv_user_id, NULL, user_data, sk);
529         if (err) {
530                 iucv_path_free(iucv->path);
531                 iucv->path = NULL;
532                 switch (err) {
533                 case 0x0b:      /* Target communicator is not logged on */
534                         err = -ENETUNREACH;
535                         break;
536                 case 0x0d:      /* Max connections for this guest exceeded */
537                 case 0x0e:      /* Max connections for target guest exceeded */
538                         err = -EAGAIN;
539                         break;
540                 case 0x0f:      /* Missing IUCV authorization */
541                         err = -EACCES;
542                         break;
543                 default:
544                         err = -ECONNREFUSED;
545                         break;
546                 }
547                 goto done;
548         }
549
550         if (sk->sk_state != IUCV_CONNECTED) {
551                 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
552                                 sock_sndtimeo(sk, flags & O_NONBLOCK));
553         }
554
555         if (sk->sk_state == IUCV_DISCONN) {
556                 err = -ECONNREFUSED;
557         }
558
559         if (err) {
560                 iucv_path_sever(iucv->path, NULL);
561                 iucv_path_free(iucv->path);
562                 iucv->path = NULL;
563         }
564
565 done:
566         release_sock(sk);
567         return err;
568 }
569
570 /* Move a socket into listening state. */
571 static int iucv_sock_listen(struct socket *sock, int backlog)
572 {
573         struct sock *sk = sock->sk;
574         int err;
575
576         lock_sock(sk);
577
578         err = -EINVAL;
579         if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
580                 goto done;
581
582         sk->sk_max_ack_backlog = backlog;
583         sk->sk_ack_backlog = 0;
584         sk->sk_state = IUCV_LISTEN;
585         err = 0;
586
587 done:
588         release_sock(sk);
589         return err;
590 }
591
592 /* Accept a pending connection */
593 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
594                             int flags)
595 {
596         DECLARE_WAITQUEUE(wait, current);
597         struct sock *sk = sock->sk, *nsk;
598         long timeo;
599         int err = 0;
600
601         lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
602
603         if (sk->sk_state != IUCV_LISTEN) {
604                 err = -EBADFD;
605                 goto done;
606         }
607
608         timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
609
610         /* Wait for an incoming connection */
611         add_wait_queue_exclusive(sk->sk_sleep, &wait);
612         while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
613                 set_current_state(TASK_INTERRUPTIBLE);
614                 if (!timeo) {
615                         err = -EAGAIN;
616                         break;
617                 }
618
619                 release_sock(sk);
620                 timeo = schedule_timeout(timeo);
621                 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
622
623                 if (sk->sk_state != IUCV_LISTEN) {
624                         err = -EBADFD;
625                         break;
626                 }
627
628                 if (signal_pending(current)) {
629                         err = sock_intr_errno(timeo);
630                         break;
631                 }
632         }
633
634         set_current_state(TASK_RUNNING);
635         remove_wait_queue(sk->sk_sleep, &wait);
636
637         if (err)
638                 goto done;
639
640         newsock->state = SS_CONNECTED;
641
642 done:
643         release_sock(sk);
644         return err;
645 }
646
647 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
648                              int *len, int peer)
649 {
650         struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
651         struct sock *sk = sock->sk;
652
653         addr->sa_family = AF_IUCV;
654         *len = sizeof(struct sockaddr_iucv);
655
656         if (peer) {
657                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
658                 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
659         } else {
660                 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
661                 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
662         }
663         memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
664         memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
665         memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
666
667         return 0;
668 }
669
670 /**
671  * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
672  * @path:       IUCV path
673  * @msg:        Pointer to a struct iucv_message
674  * @skb:        The socket data to send, skb->len MUST BE <= 7
675  *
676  * Send the socket data in the parameter list in the iucv message
677  * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
678  * list and the socket data len at index 7 (last byte).
679  * See also iucv_msg_length().
680  *
681  * Returns the error code from the iucv_message_send() call.
682  */
683 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
684                           struct sk_buff *skb)
685 {
686         u8 prmdata[8];
687
688         memcpy(prmdata, (void *) skb->data, skb->len);
689         prmdata[7] = 0xff - (u8) skb->len;
690         return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
691                                  (void *) prmdata, 8);
692 }
693
694 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
695                              struct msghdr *msg, size_t len)
696 {
697         struct sock *sk = sock->sk;
698         struct iucv_sock *iucv = iucv_sk(sk);
699         struct sk_buff *skb;
700         struct iucv_message txmsg;
701         char user_id[9];
702         char appl_id[9];
703         int err;
704
705         err = sock_error(sk);
706         if (err)
707                 return err;
708
709         if (msg->msg_flags & MSG_OOB)
710                 return -EOPNOTSUPP;
711
712         lock_sock(sk);
713
714         if (sk->sk_shutdown & SEND_SHUTDOWN) {
715                 err = -EPIPE;
716                 goto out;
717         }
718
719         if (sk->sk_state == IUCV_CONNECTED) {
720                 if (!(skb = sock_alloc_send_skb(sk, len,
721                                                 msg->msg_flags & MSG_DONTWAIT,
722                                                 &err)))
723                         goto out;
724
725                 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
726                         err = -EFAULT;
727                         goto fail;
728                 }
729
730                 txmsg.class = 0;
731                 memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
732                 txmsg.tag = iucv->send_tag++;
733                 memcpy(skb->cb, &txmsg.tag, 4);
734                 skb_queue_tail(&iucv->send_skb_q, skb);
735
736                 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
737                     && skb->len <= 7) {
738                         err = iucv_send_iprm(iucv->path, &txmsg, skb);
739
740                         /* on success: there is no message_complete callback
741                          * for an IPRMDATA msg; remove skb from send queue */
742                         if (err == 0) {
743                                 skb_unlink(skb, &iucv->send_skb_q);
744                                 kfree_skb(skb);
745                         }
746
747                         /* this error should never happen since the
748                          * IUCV_IPRMDATA path flag is set... sever path */
749                         if (err == 0x15) {
750                                 iucv_path_sever(iucv->path, NULL);
751                                 skb_unlink(skb, &iucv->send_skb_q);
752                                 err = -EPIPE;
753                                 goto fail;
754                         }
755                 } else
756                         err = iucv_message_send(iucv->path, &txmsg, 0, 0,
757                                                 (void *) skb->data, skb->len);
758                 if (err) {
759                         if (err == 3) {
760                                 user_id[8] = 0;
761                                 memcpy(user_id, iucv->dst_user_id, 8);
762                                 appl_id[8] = 0;
763                                 memcpy(appl_id, iucv->dst_name, 8);
764                                 pr_err("Application %s on z/VM guest %s"
765                                        " exceeds message limit\n",
766                                        user_id, appl_id);
767                         }
768                         skb_unlink(skb, &iucv->send_skb_q);
769                         err = -EPIPE;
770                         goto fail;
771                 }
772
773         } else {
774                 err = -ENOTCONN;
775                 goto out;
776         }
777
778         release_sock(sk);
779         return len;
780
781 fail:
782         kfree_skb(skb);
783 out:
784         release_sock(sk);
785         return err;
786 }
787
788 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
789 {
790         int dataleft, size, copied = 0;
791         struct sk_buff *nskb;
792
793         dataleft = len;
794         while (dataleft) {
795                 if (dataleft >= sk->sk_rcvbuf / 4)
796                         size = sk->sk_rcvbuf / 4;
797                 else
798                         size = dataleft;
799
800                 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
801                 if (!nskb)
802                         return -ENOMEM;
803
804                 memcpy(nskb->data, skb->data + copied, size);
805                 copied += size;
806                 dataleft -= size;
807
808                 skb_reset_transport_header(nskb);
809                 skb_reset_network_header(nskb);
810                 nskb->len = size;
811
812                 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
813         }
814
815         return 0;
816 }
817
818 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
819                                  struct iucv_path *path,
820                                  struct iucv_message *msg)
821 {
822         int rc;
823         unsigned int len;
824
825         len = iucv_msg_length(msg);
826
827         /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
828         if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
829                 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
830                         skb->data = NULL;
831                         skb->len = 0;
832                 }
833         } else {
834                 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
835                                           skb->data, len, NULL);
836                 if (rc) {
837                         kfree_skb(skb);
838                         return;
839                 }
840                 if (skb->truesize >= sk->sk_rcvbuf / 4) {
841                         rc = iucv_fragment_skb(sk, skb, len);
842                         kfree_skb(skb);
843                         skb = NULL;
844                         if (rc) {
845                                 iucv_path_sever(path, NULL);
846                                 return;
847                         }
848                         skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
849                 } else {
850                         skb_reset_transport_header(skb);
851                         skb_reset_network_header(skb);
852                         skb->len = len;
853                 }
854         }
855
856         if (sock_queue_rcv_skb(sk, skb))
857                 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
858 }
859
860 static void iucv_process_message_q(struct sock *sk)
861 {
862         struct iucv_sock *iucv = iucv_sk(sk);
863         struct sk_buff *skb;
864         struct sock_msg_q *p, *n;
865
866         list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
867                 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
868                 if (!skb)
869                         break;
870                 iucv_process_message(sk, skb, p->path, &p->msg);
871                 list_del(&p->list);
872                 kfree(p);
873                 if (!skb_queue_empty(&iucv->backlog_skb_q))
874                         break;
875         }
876 }
877
878 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
879                              struct msghdr *msg, size_t len, int flags)
880 {
881         int noblock = flags & MSG_DONTWAIT;
882         struct sock *sk = sock->sk;
883         struct iucv_sock *iucv = iucv_sk(sk);
884         int target, copied = 0;
885         struct sk_buff *skb, *rskb, *cskb;
886         int err = 0;
887
888         if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
889             skb_queue_empty(&iucv->backlog_skb_q) &&
890             skb_queue_empty(&sk->sk_receive_queue) &&
891             list_empty(&iucv->message_q.list))
892                 return 0;
893
894         if (flags & (MSG_OOB))
895                 return -EOPNOTSUPP;
896
897         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
898
899         skb = skb_recv_datagram(sk, flags, noblock, &err);
900         if (!skb) {
901                 if (sk->sk_shutdown & RCV_SHUTDOWN)
902                         return 0;
903                 return err;
904         }
905
906         copied = min_t(unsigned int, skb->len, len);
907
908         cskb = skb;
909         if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
910                 skb_queue_head(&sk->sk_receive_queue, skb);
911                 if (copied == 0)
912                         return -EFAULT;
913                 goto done;
914         }
915
916         len -= copied;
917
918         /* Mark read part of skb as used */
919         if (!(flags & MSG_PEEK)) {
920                 skb_pull(skb, copied);
921
922                 if (skb->len) {
923                         skb_queue_head(&sk->sk_receive_queue, skb);
924                         goto done;
925                 }
926
927                 kfree_skb(skb);
928
929                 /* Queue backlog skbs */
930                 rskb = skb_dequeue(&iucv->backlog_skb_q);
931                 while (rskb) {
932                         if (sock_queue_rcv_skb(sk, rskb)) {
933                                 skb_queue_head(&iucv->backlog_skb_q,
934                                                 rskb);
935                                 break;
936                         } else {
937                                 rskb = skb_dequeue(&iucv->backlog_skb_q);
938                         }
939                 }
940                 if (skb_queue_empty(&iucv->backlog_skb_q)) {
941                         spin_lock_bh(&iucv->message_q.lock);
942                         if (!list_empty(&iucv->message_q.list))
943                                 iucv_process_message_q(sk);
944                         spin_unlock_bh(&iucv->message_q.lock);
945                 }
946
947         } else
948                 skb_queue_head(&sk->sk_receive_queue, skb);
949
950 done:
951         return err ? : copied;
952 }
953
954 static inline unsigned int iucv_accept_poll(struct sock *parent)
955 {
956         struct iucv_sock *isk, *n;
957         struct sock *sk;
958
959         list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
960                 sk = (struct sock *) isk;
961
962                 if (sk->sk_state == IUCV_CONNECTED)
963                         return POLLIN | POLLRDNORM;
964         }
965
966         return 0;
967 }
968
969 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
970                             poll_table *wait)
971 {
972         struct sock *sk = sock->sk;
973         unsigned int mask = 0;
974
975         poll_wait(file, sk->sk_sleep, wait);
976
977         if (sk->sk_state == IUCV_LISTEN)
978                 return iucv_accept_poll(sk);
979
980         if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
981                 mask |= POLLERR;
982
983         if (sk->sk_shutdown & RCV_SHUTDOWN)
984                 mask |= POLLRDHUP;
985
986         if (sk->sk_shutdown == SHUTDOWN_MASK)
987                 mask |= POLLHUP;
988
989         if (!skb_queue_empty(&sk->sk_receive_queue) ||
990             (sk->sk_shutdown & RCV_SHUTDOWN))
991                 mask |= POLLIN | POLLRDNORM;
992
993         if (sk->sk_state == IUCV_CLOSED)
994                 mask |= POLLHUP;
995
996         if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
997                 mask |= POLLIN;
998
999         if (sock_writeable(sk))
1000                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1001         else
1002                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1003
1004         return mask;
1005 }
1006
1007 static int iucv_sock_shutdown(struct socket *sock, int how)
1008 {
1009         struct sock *sk = sock->sk;
1010         struct iucv_sock *iucv = iucv_sk(sk);
1011         struct iucv_message txmsg;
1012         int err = 0;
1013
1014         how++;
1015
1016         if ((how & ~SHUTDOWN_MASK) || !how)
1017                 return -EINVAL;
1018
1019         lock_sock(sk);
1020         switch (sk->sk_state) {
1021         case IUCV_CLOSED:
1022                 err = -ENOTCONN;
1023                 goto fail;
1024
1025         default:
1026                 sk->sk_shutdown |= how;
1027                 break;
1028         }
1029
1030         if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1031                 txmsg.class = 0;
1032                 txmsg.tag = 0;
1033                 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
1034                                         (void *) iprm_shutdown, 8);
1035                 if (err) {
1036                         switch (err) {
1037                         case 1:
1038                                 err = -ENOTCONN;
1039                                 break;
1040                         case 2:
1041                                 err = -ECONNRESET;
1042                                 break;
1043                         default:
1044                                 err = -ENOTCONN;
1045                                 break;
1046                         }
1047                 }
1048         }
1049
1050         if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1051                 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
1052                 if (err)
1053                         err = -ENOTCONN;
1054
1055                 skb_queue_purge(&sk->sk_receive_queue);
1056         }
1057
1058         /* Wake up anyone sleeping in poll */
1059         sk->sk_state_change(sk);
1060
1061 fail:
1062         release_sock(sk);
1063         return err;
1064 }
1065
1066 static int iucv_sock_release(struct socket *sock)
1067 {
1068         struct sock *sk = sock->sk;
1069         int err = 0;
1070
1071         if (!sk)
1072                 return 0;
1073
1074         iucv_sock_close(sk);
1075
1076         /* Unregister with IUCV base support */
1077         if (iucv_sk(sk)->path) {
1078                 iucv_path_sever(iucv_sk(sk)->path, NULL);
1079                 iucv_path_free(iucv_sk(sk)->path);
1080                 iucv_sk(sk)->path = NULL;
1081         }
1082
1083         sock_orphan(sk);
1084         iucv_sock_kill(sk);
1085         return err;
1086 }
1087
1088 /* getsockopt and setsockopt */
1089 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1090                                 char __user *optval, int optlen)
1091 {
1092         struct sock *sk = sock->sk;
1093         struct iucv_sock *iucv = iucv_sk(sk);
1094         int val;
1095         int rc;
1096
1097         if (level != SOL_IUCV)
1098                 return -ENOPROTOOPT;
1099
1100         if (optlen < sizeof(int))
1101                 return -EINVAL;
1102
1103         if (get_user(val, (int __user *) optval))
1104                 return -EFAULT;
1105
1106         rc = 0;
1107
1108         lock_sock(sk);
1109         switch (optname) {
1110         case SO_IPRMDATA_MSG:
1111                 if (val)
1112                         iucv->flags |= IUCV_IPRMDATA;
1113                 else
1114                         iucv->flags &= ~IUCV_IPRMDATA;
1115                 break;
1116         default:
1117                 rc = -ENOPROTOOPT;
1118                 break;
1119         }
1120         release_sock(sk);
1121
1122         return rc;
1123 }
1124
1125 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1126                                 char __user *optval, int __user *optlen)
1127 {
1128         struct sock *sk = sock->sk;
1129         struct iucv_sock *iucv = iucv_sk(sk);
1130         int val, len;
1131
1132         if (level != SOL_IUCV)
1133                 return -ENOPROTOOPT;
1134
1135         if (get_user(len, optlen))
1136                 return -EFAULT;
1137
1138         if (len < 0)
1139                 return -EINVAL;
1140
1141         len = min_t(unsigned int, len, sizeof(int));
1142
1143         switch (optname) {
1144         case SO_IPRMDATA_MSG:
1145                 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1146                 break;
1147         default:
1148                 return -ENOPROTOOPT;
1149         }
1150
1151         if (put_user(len, optlen))
1152                 return -EFAULT;
1153         if (copy_to_user(optval, &val, len))
1154                 return -EFAULT;
1155
1156         return 0;
1157 }
1158
1159
1160 /* Callback wrappers - called from iucv base support */
1161 static int iucv_callback_connreq(struct iucv_path *path,
1162                                  u8 ipvmid[8], u8 ipuser[16])
1163 {
1164         unsigned char user_data[16];
1165         unsigned char nuser_data[16];
1166         unsigned char src_name[8];
1167         struct hlist_node *node;
1168         struct sock *sk, *nsk;
1169         struct iucv_sock *iucv, *niucv;
1170         int err;
1171
1172         memcpy(src_name, ipuser, 8);
1173         EBCASC(src_name, 8);
1174         /* Find out if this path belongs to af_iucv. */
1175         read_lock(&iucv_sk_list.lock);
1176         iucv = NULL;
1177         sk = NULL;
1178         sk_for_each(sk, node, &iucv_sk_list.head)
1179                 if (sk->sk_state == IUCV_LISTEN &&
1180                     !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1181                         /*
1182                          * Found a listening socket with
1183                          * src_name == ipuser[0-7].
1184                          */
1185                         iucv = iucv_sk(sk);
1186                         break;
1187                 }
1188         read_unlock(&iucv_sk_list.lock);
1189         if (!iucv)
1190                 /* No socket found, not one of our paths. */
1191                 return -EINVAL;
1192
1193         bh_lock_sock(sk);
1194
1195         /* Check if parent socket is listening */
1196         low_nmcpy(user_data, iucv->src_name);
1197         high_nmcpy(user_data, iucv->dst_name);
1198         ASCEBC(user_data, sizeof(user_data));
1199         if (sk->sk_state != IUCV_LISTEN) {
1200                 err = iucv_path_sever(path, user_data);
1201                 iucv_path_free(path);
1202                 goto fail;
1203         }
1204
1205         /* Check for backlog size */
1206         if (sk_acceptq_is_full(sk)) {
1207                 err = iucv_path_sever(path, user_data);
1208                 iucv_path_free(path);
1209                 goto fail;
1210         }
1211
1212         /* Create the new socket */
1213         nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
1214         if (!nsk) {
1215                 err = iucv_path_sever(path, user_data);
1216                 iucv_path_free(path);
1217                 goto fail;
1218         }
1219
1220         niucv = iucv_sk(nsk);
1221         iucv_sock_init(nsk, sk);
1222
1223         /* Set the new iucv_sock */
1224         memcpy(niucv->dst_name, ipuser + 8, 8);
1225         EBCASC(niucv->dst_name, 8);
1226         memcpy(niucv->dst_user_id, ipvmid, 8);
1227         memcpy(niucv->src_name, iucv->src_name, 8);
1228         memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1229         niucv->path = path;
1230
1231         /* Call iucv_accept */
1232         high_nmcpy(nuser_data, ipuser + 8);
1233         memcpy(nuser_data + 8, niucv->src_name, 8);
1234         ASCEBC(nuser_data + 8, 8);
1235
1236         path->msglim = IUCV_QUEUELEN_DEFAULT;
1237         err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1238         if (err) {
1239                 err = iucv_path_sever(path, user_data);
1240                 iucv_path_free(path);
1241                 iucv_sock_kill(nsk);
1242                 goto fail;
1243         }
1244
1245         iucv_accept_enqueue(sk, nsk);
1246
1247         /* Wake up accept */
1248         nsk->sk_state = IUCV_CONNECTED;
1249         sk->sk_data_ready(sk, 1);
1250         err = 0;
1251 fail:
1252         bh_unlock_sock(sk);
1253         return 0;
1254 }
1255
1256 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1257 {
1258         struct sock *sk = path->private;
1259
1260         sk->sk_state = IUCV_CONNECTED;
1261         sk->sk_state_change(sk);
1262 }
1263
1264 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1265 {
1266         struct sock *sk = path->private;
1267         struct iucv_sock *iucv = iucv_sk(sk);
1268         struct sk_buff *skb;
1269         struct sock_msg_q *save_msg;
1270         int len;
1271
1272         if (sk->sk_shutdown & RCV_SHUTDOWN)
1273                 return;
1274
1275         if (!list_empty(&iucv->message_q.list) ||
1276             !skb_queue_empty(&iucv->backlog_skb_q))
1277                 goto save_message;
1278
1279         len = atomic_read(&sk->sk_rmem_alloc);
1280         len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1281         if (len > sk->sk_rcvbuf)
1282                 goto save_message;
1283
1284         skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1285         if (!skb)
1286                 goto save_message;
1287
1288         spin_lock(&iucv->message_q.lock);
1289         iucv_process_message(sk, skb, path, msg);
1290         spin_unlock(&iucv->message_q.lock);
1291
1292         return;
1293
1294 save_message:
1295         save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1296         if (!save_msg)
1297                 return;
1298         save_msg->path = path;
1299         save_msg->msg = *msg;
1300
1301         spin_lock(&iucv->message_q.lock);
1302         list_add_tail(&save_msg->list, &iucv->message_q.list);
1303         spin_unlock(&iucv->message_q.lock);
1304 }
1305
1306 static void iucv_callback_txdone(struct iucv_path *path,
1307                                  struct iucv_message *msg)
1308 {
1309         struct sock *sk = path->private;
1310         struct sk_buff *this = NULL;
1311         struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1312         struct sk_buff *list_skb = list->next;
1313         unsigned long flags;
1314
1315         if (!skb_queue_empty(list)) {
1316                 spin_lock_irqsave(&list->lock, flags);
1317
1318                 while (list_skb != (struct sk_buff *)list) {
1319                         if (!memcmp(&msg->tag, list_skb->cb, 4)) {
1320                                 this = list_skb;
1321                                 break;
1322                         }
1323                         list_skb = list_skb->next;
1324                 }
1325                 if (this)
1326                         __skb_unlink(this, list);
1327
1328                 spin_unlock_irqrestore(&list->lock, flags);
1329
1330                 kfree_skb(this);
1331         }
1332         BUG_ON(!this);
1333
1334         if (sk->sk_state == IUCV_CLOSING) {
1335                 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1336                         sk->sk_state = IUCV_CLOSED;
1337                         sk->sk_state_change(sk);
1338                 }
1339         }
1340
1341 }
1342
1343 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1344 {
1345         struct sock *sk = path->private;
1346
1347         if (!list_empty(&iucv_sk(sk)->accept_q))
1348                 sk->sk_state = IUCV_SEVERED;
1349         else
1350                 sk->sk_state = IUCV_DISCONN;
1351
1352         sk->sk_state_change(sk);
1353 }
1354
1355 /* called if the other communication side shuts down its RECV direction;
1356  * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1357  */
1358 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1359 {
1360         struct sock *sk = path->private;
1361
1362         bh_lock_sock(sk);
1363         if (sk->sk_state != IUCV_CLOSED) {
1364                 sk->sk_shutdown |= SEND_SHUTDOWN;
1365                 sk->sk_state_change(sk);
1366         }
1367         bh_unlock_sock(sk);
1368 }
1369
1370 static struct proto_ops iucv_sock_ops = {
1371         .family         = PF_IUCV,
1372         .owner          = THIS_MODULE,
1373         .release        = iucv_sock_release,
1374         .bind           = iucv_sock_bind,
1375         .connect        = iucv_sock_connect,
1376         .listen         = iucv_sock_listen,
1377         .accept         = iucv_sock_accept,
1378         .getname        = iucv_sock_getname,
1379         .sendmsg        = iucv_sock_sendmsg,
1380         .recvmsg        = iucv_sock_recvmsg,
1381         .poll           = iucv_sock_poll,
1382         .ioctl          = sock_no_ioctl,
1383         .mmap           = sock_no_mmap,
1384         .socketpair     = sock_no_socketpair,
1385         .shutdown       = iucv_sock_shutdown,
1386         .setsockopt     = iucv_sock_setsockopt,
1387         .getsockopt     = iucv_sock_getsockopt,
1388 };
1389
1390 static struct net_proto_family iucv_sock_family_ops = {
1391         .family = AF_IUCV,
1392         .owner  = THIS_MODULE,
1393         .create = iucv_sock_create,
1394 };
1395
1396 static int __init afiucv_init(void)
1397 {
1398         int err;
1399
1400         if (!MACHINE_IS_VM) {
1401                 pr_err("The af_iucv module cannot be loaded"
1402                        " without z/VM\n");
1403                 err = -EPROTONOSUPPORT;
1404                 goto out;
1405         }
1406         cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1407         if (unlikely(err)) {
1408                 WARN_ON(err);
1409                 err = -EPROTONOSUPPORT;
1410                 goto out;
1411         }
1412
1413         err = iucv_register(&af_iucv_handler, 0);
1414         if (err)
1415                 goto out;
1416         err = proto_register(&iucv_proto, 0);
1417         if (err)
1418                 goto out_iucv;
1419         err = sock_register(&iucv_sock_family_ops);
1420         if (err)
1421                 goto out_proto;
1422         return 0;
1423
1424 out_proto:
1425         proto_unregister(&iucv_proto);
1426 out_iucv:
1427         iucv_unregister(&af_iucv_handler, 0);
1428 out:
1429         return err;
1430 }
1431
1432 static void __exit afiucv_exit(void)
1433 {
1434         sock_unregister(PF_IUCV);
1435         proto_unregister(&iucv_proto);
1436         iucv_unregister(&af_iucv_handler, 0);
1437 }
1438
1439 module_init(afiucv_init);
1440 module_exit(afiucv_exit);
1441
1442 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1443 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1444 MODULE_VERSION(VERSION);
1445 MODULE_LICENSE("GPL");
1446 MODULE_ALIAS_NETPROTO(PF_IUCV);