2 * linux/net/iucv/af_iucv.c
4 * IUCV protocol stack for Linux on zSeries
6 * Copyright 2006 IBM Corporation
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/poll.h>
22 #include <asm/ebcdic.h>
23 #include <asm/cpcmd.h>
24 #include <linux/kmod.h>
26 #include <net/iucv/iucv.h>
27 #include <net/iucv/af_iucv.h>
29 #define CONFIG_IUCV_SOCK_DEBUG 1
34 static char iucv_userid[80];
36 static struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
41 .obj_size = sizeof(struct iucv_sock),
44 static void iucv_sock_kill(struct sock *sk);
45 static void iucv_sock_close(struct sock *sk);
47 /* Call Back functions */
48 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
49 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
50 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
51 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
53 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
55 static struct iucv_sock_list iucv_sk_list = {
56 .lock = RW_LOCK_UNLOCKED,
57 .autobind_name = ATOMIC_INIT(0)
60 static struct iucv_handler af_iucv_handler = {
61 .path_pending = iucv_callback_connreq,
62 .path_complete = iucv_callback_connack,
63 .path_severed = iucv_callback_connrej,
64 .message_pending = iucv_callback_rx,
65 .message_complete = iucv_callback_txdone
68 static inline void high_nmcpy(unsigned char *dst, char *src)
73 static inline void low_nmcpy(unsigned char *dst, char *src)
75 memcpy(&dst[8], src, 8);
79 static void iucv_sock_timeout(unsigned long arg)
81 struct sock *sk = (struct sock *)arg;
84 sk->sk_err = ETIMEDOUT;
85 sk->sk_state_change(sk);
92 static void iucv_sock_clear_timer(struct sock *sk)
94 sk_stop_timer(sk, &sk->sk_timer);
97 static void iucv_sock_init_timer(struct sock *sk)
99 init_timer(&sk->sk_timer);
100 sk->sk_timer.function = iucv_sock_timeout;
101 sk->sk_timer.data = (unsigned long)sk;
104 static struct sock *__iucv_get_sock_by_name(char *nm)
107 struct hlist_node *node;
109 sk_for_each(sk, node, &iucv_sk_list.head)
110 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
116 static void iucv_sock_destruct(struct sock *sk)
118 skb_queue_purge(&sk->sk_receive_queue);
119 skb_queue_purge(&sk->sk_write_queue);
123 static void iucv_sock_cleanup_listen(struct sock *parent)
127 /* Close non-accepted connections */
128 while ((sk = iucv_accept_dequeue(parent, NULL))) {
133 parent->sk_state = IUCV_CLOSED;
134 sock_set_flag(parent, SOCK_ZAPPED);
138 static void iucv_sock_kill(struct sock *sk)
140 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
143 iucv_sock_unlink(&iucv_sk_list, sk);
144 sock_set_flag(sk, SOCK_DEAD);
148 /* Close an IUCV socket */
149 static void iucv_sock_close(struct sock *sk)
151 unsigned char user_data[16];
152 struct iucv_sock *iucv = iucv_sk(sk);
156 iucv_sock_clear_timer(sk);
159 switch (sk->sk_state) {
161 iucv_sock_cleanup_listen(sk);
168 sk->sk_state = IUCV_CLOSING;
169 sk->sk_state_change(sk);
171 if (!skb_queue_empty(&iucv->send_skb_q)) {
172 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
173 timeo = sk->sk_lingertime;
175 timeo = IUCV_DISCONN_TIMEOUT;
176 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
179 sk->sk_state = IUCV_CLOSED;
180 sk->sk_state_change(sk);
183 low_nmcpy(user_data, iucv->src_name);
184 high_nmcpy(user_data, iucv->dst_name);
185 ASCEBC(user_data, sizeof(user_data));
186 err = iucv_path_sever(iucv->path, user_data);
187 iucv_path_free(iucv->path);
191 sk->sk_err = ECONNRESET;
192 sk->sk_state_change(sk);
194 skb_queue_purge(&iucv->send_skb_q);
195 skb_queue_purge(&iucv->backlog_skb_q);
197 sock_set_flag(sk, SOCK_ZAPPED);
201 sock_set_flag(sk, SOCK_ZAPPED);
209 static void iucv_sock_init(struct sock *sk, struct sock *parent)
212 sk->sk_type = parent->sk_type;
215 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
219 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1);
223 sock_init_data(sock, sk);
224 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
225 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
226 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
227 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
228 iucv_sk(sk)->send_tag = 0;
230 sk->sk_destruct = iucv_sock_destruct;
231 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
232 sk->sk_allocation = GFP_DMA;
234 sock_reset_flag(sk, SOCK_ZAPPED);
236 sk->sk_protocol = proto;
237 sk->sk_state = IUCV_OPEN;
239 iucv_sock_init_timer(sk);
241 iucv_sock_link(&iucv_sk_list, sk);
245 /* Create an IUCV socket */
246 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
250 if (sock->type != SOCK_STREAM)
251 return -ESOCKTNOSUPPORT;
253 sock->state = SS_UNCONNECTED;
254 sock->ops = &iucv_sock_ops;
256 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
260 iucv_sock_init(sk, NULL);
265 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
267 write_lock_bh(&l->lock);
268 sk_add_node(sk, &l->head);
269 write_unlock_bh(&l->lock);
272 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
274 write_lock_bh(&l->lock);
275 sk_del_node_init(sk);
276 write_unlock_bh(&l->lock);
279 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
282 struct iucv_sock *par = iucv_sk(parent);
285 spin_lock_irqsave(&par->accept_q_lock, flags);
286 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
287 spin_unlock_irqrestore(&par->accept_q_lock, flags);
288 iucv_sk(sk)->parent = parent;
289 parent->sk_ack_backlog++;
292 void iucv_accept_unlink(struct sock *sk)
295 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
297 spin_lock_irqsave(&par->accept_q_lock, flags);
298 list_del_init(&iucv_sk(sk)->accept_q);
299 spin_unlock_irqrestore(&par->accept_q_lock, flags);
300 iucv_sk(sk)->parent->sk_ack_backlog--;
301 iucv_sk(sk)->parent = NULL;
305 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
307 struct iucv_sock *isk, *n;
310 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
311 sk = (struct sock *) isk;
314 if (sk->sk_state == IUCV_CLOSED) {
315 iucv_accept_unlink(sk);
320 if (sk->sk_state == IUCV_CONNECTED ||
321 sk->sk_state == IUCV_SEVERED ||
323 iucv_accept_unlink(sk);
325 sock_graft(sk, newsock);
327 if (sk->sk_state == IUCV_SEVERED)
328 sk->sk_state = IUCV_DISCONN;
339 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
342 DECLARE_WAITQUEUE(wait, current);
345 add_wait_queue(sk->sk_sleep, &wait);
346 while (sk->sk_state != state && sk->sk_state != state2) {
347 set_current_state(TASK_INTERRUPTIBLE);
354 if (signal_pending(current)) {
355 err = sock_intr_errno(timeo);
360 timeo = schedule_timeout(timeo);
363 err = sock_error(sk);
367 set_current_state(TASK_RUNNING);
368 remove_wait_queue(sk->sk_sleep, &wait);
372 /* Bind an unbound socket */
373 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
376 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
377 struct sock *sk = sock->sk;
378 struct iucv_sock *iucv;
381 /* Verify the input sockaddr */
382 if (!addr || addr->sa_family != AF_IUCV)
386 if (sk->sk_state != IUCV_OPEN) {
391 write_lock_bh(&iucv_sk_list.lock);
394 if (__iucv_get_sock_by_name(sa->siucv_name)) {
403 /* Bind the socket */
404 memcpy(iucv->src_name, sa->siucv_name, 8);
406 /* Copy the user id */
407 memcpy(iucv->src_user_id, iucv_userid, 8);
408 sk->sk_state = IUCV_BOUND;
412 /* Release the socket list lock */
413 write_unlock_bh(&iucv_sk_list.lock);
419 /* Automatically bind an unbound socket */
420 static int iucv_sock_autobind(struct sock *sk)
422 struct iucv_sock *iucv = iucv_sk(sk);
423 char query_buffer[80];
427 /* Set the userid and name */
428 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
432 memcpy(iucv->src_user_id, query_buffer, 8);
434 write_lock_bh(&iucv_sk_list.lock);
436 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
437 while (__iucv_get_sock_by_name(name)) {
438 sprintf(name, "%08x",
439 atomic_inc_return(&iucv_sk_list.autobind_name));
442 write_unlock_bh(&iucv_sk_list.lock);
444 memcpy(&iucv->src_name, name, 8);
449 /* Connect an unconnected socket */
450 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
453 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
454 struct sock *sk = sock->sk;
455 struct iucv_sock *iucv;
456 unsigned char user_data[16];
459 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
462 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
465 if (sk->sk_type != SOCK_STREAM)
470 if (sk->sk_state == IUCV_OPEN) {
471 err = iucv_sock_autobind(sk);
478 /* Set the destination information */
479 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
480 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
482 high_nmcpy(user_data, sa->siucv_name);
483 low_nmcpy(user_data, iucv_sk(sk)->src_name);
484 ASCEBC(user_data, sizeof(user_data));
488 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
489 IPRMDATA, GFP_KERNEL);
490 err = iucv_path_connect(iucv->path, &af_iucv_handler,
491 sa->siucv_user_id, NULL, user_data, sk);
493 iucv_path_free(iucv->path);
499 if (sk->sk_state != IUCV_CONNECTED) {
500 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
501 sock_sndtimeo(sk, flags & O_NONBLOCK));
504 if (sk->sk_state == IUCV_DISCONN) {
506 return -ECONNREFUSED;
513 /* Move a socket into listening state. */
514 static int iucv_sock_listen(struct socket *sock, int backlog)
516 struct sock *sk = sock->sk;
522 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
525 sk->sk_max_ack_backlog = backlog;
526 sk->sk_ack_backlog = 0;
527 sk->sk_state = IUCV_LISTEN;
535 /* Accept a pending connection */
536 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
539 DECLARE_WAITQUEUE(wait, current);
540 struct sock *sk = sock->sk, *nsk;
544 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
546 if (sk->sk_state != IUCV_LISTEN) {
551 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
553 /* Wait for an incoming connection */
554 add_wait_queue_exclusive(sk->sk_sleep, &wait);
555 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
556 set_current_state(TASK_INTERRUPTIBLE);
563 timeo = schedule_timeout(timeo);
564 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
566 if (sk->sk_state != IUCV_LISTEN) {
571 if (signal_pending(current)) {
572 err = sock_intr_errno(timeo);
577 set_current_state(TASK_RUNNING);
578 remove_wait_queue(sk->sk_sleep, &wait);
583 newsock->state = SS_CONNECTED;
590 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
593 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
594 struct sock *sk = sock->sk;
596 addr->sa_family = AF_IUCV;
597 *len = sizeof(struct sockaddr_iucv);
600 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
601 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
603 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
604 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
606 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
607 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
608 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
613 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
614 struct msghdr *msg, size_t len)
616 struct sock *sk = sock->sk;
617 struct iucv_sock *iucv = iucv_sk(sk);
619 struct iucv_message txmsg;
622 err = sock_error(sk);
626 if (msg->msg_flags & MSG_OOB)
631 if (sk->sk_shutdown & SEND_SHUTDOWN) {
636 if (sk->sk_state == IUCV_CONNECTED) {
637 if (!(skb = sock_alloc_send_skb(sk, len,
638 msg->msg_flags & MSG_DONTWAIT,
642 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
648 txmsg.tag = iucv->send_tag++;
649 memcpy(skb->cb, &txmsg.tag, 4);
650 skb_queue_tail(&iucv->send_skb_q, skb);
651 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
652 (void *) skb->data, skb->len);
655 printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
656 skb_unlink(skb, &iucv->send_skb_q);
676 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
677 struct msghdr *msg, size_t len, int flags)
679 int noblock = flags & MSG_DONTWAIT;
680 struct sock *sk = sock->sk;
681 struct iucv_sock *iucv = iucv_sk(sk);
682 int target, copied = 0;
683 struct sk_buff *skb, *rskb, *cskb;
686 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
687 skb_queue_empty(&iucv->backlog_skb_q) &&
688 skb_queue_empty(&sk->sk_receive_queue))
691 if (flags & (MSG_OOB))
694 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
696 skb = skb_recv_datagram(sk, flags, noblock, &err);
698 if (sk->sk_shutdown & RCV_SHUTDOWN)
703 copied = min_t(unsigned int, skb->len, len);
706 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
707 skb_queue_head(&sk->sk_receive_queue, skb);
715 /* Mark read part of skb as used */
716 if (!(flags & MSG_PEEK)) {
717 skb_pull(skb, copied);
720 skb_queue_head(&sk->sk_receive_queue, skb);
726 /* Queue backlog skbs */
727 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
729 if (sock_queue_rcv_skb(sk, rskb)) {
730 skb_queue_head(&iucv_sk(sk)->backlog_skb_q,
734 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
738 skb_queue_head(&sk->sk_receive_queue, skb);
741 return err ? : copied;
744 static inline unsigned int iucv_accept_poll(struct sock *parent)
746 struct iucv_sock *isk, *n;
749 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
750 sk = (struct sock *) isk;
752 if (sk->sk_state == IUCV_CONNECTED)
753 return POLLIN | POLLRDNORM;
759 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
762 struct sock *sk = sock->sk;
763 unsigned int mask = 0;
765 poll_wait(file, sk->sk_sleep, wait);
767 if (sk->sk_state == IUCV_LISTEN)
768 return iucv_accept_poll(sk);
770 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
773 if (sk->sk_shutdown & RCV_SHUTDOWN)
776 if (sk->sk_shutdown == SHUTDOWN_MASK)
779 if (!skb_queue_empty(&sk->sk_receive_queue) ||
780 (sk->sk_shutdown & RCV_SHUTDOWN))
781 mask |= POLLIN | POLLRDNORM;
783 if (sk->sk_state == IUCV_CLOSED)
786 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
789 if (sock_writeable(sk))
790 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
792 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
797 static int iucv_sock_shutdown(struct socket *sock, int how)
799 struct sock *sk = sock->sk;
800 struct iucv_sock *iucv = iucv_sk(sk);
801 struct iucv_message txmsg;
803 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
807 if ((how & ~SHUTDOWN_MASK) || !how)
811 switch (sk->sk_state) {
817 sk->sk_shutdown |= how;
821 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
824 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
841 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
842 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
846 skb_queue_purge(&sk->sk_receive_queue);
849 /* Wake up anyone sleeping in poll */
850 sk->sk_state_change(sk);
857 static int iucv_sock_release(struct socket *sock)
859 struct sock *sk = sock->sk;
867 /* Unregister with IUCV base support */
868 if (iucv_sk(sk)->path) {
869 iucv_path_sever(iucv_sk(sk)->path, NULL);
870 iucv_path_free(iucv_sk(sk)->path);
871 iucv_sk(sk)->path = NULL;
879 /* Callback wrappers - called from iucv base support */
880 static int iucv_callback_connreq(struct iucv_path *path,
881 u8 ipvmid[8], u8 ipuser[16])
883 unsigned char user_data[16];
884 unsigned char nuser_data[16];
885 unsigned char src_name[8];
886 struct hlist_node *node;
887 struct sock *sk, *nsk;
888 struct iucv_sock *iucv, *niucv;
891 memcpy(src_name, ipuser, 8);
893 /* Find out if this path belongs to af_iucv. */
894 read_lock(&iucv_sk_list.lock);
897 sk_for_each(sk, node, &iucv_sk_list.head)
898 if (sk->sk_state == IUCV_LISTEN &&
899 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
901 * Found a listening socket with
902 * src_name == ipuser[0-7].
907 read_unlock(&iucv_sk_list.lock);
909 /* No socket found, not one of our paths. */
914 /* Check if parent socket is listening */
915 low_nmcpy(user_data, iucv->src_name);
916 high_nmcpy(user_data, iucv->dst_name);
917 ASCEBC(user_data, sizeof(user_data));
918 if (sk->sk_state != IUCV_LISTEN) {
919 err = iucv_path_sever(path, user_data);
923 /* Check for backlog size */
924 if (sk_acceptq_is_full(sk)) {
925 err = iucv_path_sever(path, user_data);
929 /* Create the new socket */
930 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
932 err = iucv_path_sever(path, user_data);
936 niucv = iucv_sk(nsk);
937 iucv_sock_init(nsk, sk);
939 /* Set the new iucv_sock */
940 memcpy(niucv->dst_name, ipuser + 8, 8);
941 EBCASC(niucv->dst_name, 8);
942 memcpy(niucv->dst_user_id, ipvmid, 8);
943 memcpy(niucv->src_name, iucv->src_name, 8);
944 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
947 /* Call iucv_accept */
948 high_nmcpy(nuser_data, ipuser + 8);
949 memcpy(nuser_data + 8, niucv->src_name, 8);
950 ASCEBC(nuser_data + 8, 8);
952 path->msglim = IUCV_QUEUELEN_DEFAULT;
953 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
955 err = iucv_path_sever(path, user_data);
959 iucv_accept_enqueue(sk, nsk);
962 nsk->sk_state = IUCV_CONNECTED;
963 sk->sk_data_ready(sk, 1);
970 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
972 struct sock *sk = path->private;
974 sk->sk_state = IUCV_CONNECTED;
975 sk->sk_state_change(sk);
978 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len,
979 struct sk_buff_head *fragmented_skb_q)
981 int dataleft, size, copied = 0;
982 struct sk_buff *nskb;
986 if (dataleft >= sk->sk_rcvbuf / 4)
987 size = sk->sk_rcvbuf / 4;
991 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
995 memcpy(nskb->data, skb->data + copied, size);
999 skb_reset_transport_header(nskb);
1000 skb_reset_network_header(nskb);
1003 skb_queue_tail(fragmented_skb_q, nskb);
1009 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1011 struct sock *sk = path->private;
1012 struct iucv_sock *iucv = iucv_sk(sk);
1013 struct sk_buff *skb, *fskb;
1014 struct sk_buff_head fragmented_skb_q;
1017 skb_queue_head_init(&fragmented_skb_q);
1019 if (sk->sk_shutdown & RCV_SHUTDOWN)
1022 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1024 iucv_path_sever(path, NULL);
1028 if (msg->flags & IPRMDATA) {
1032 rc = iucv_message_receive(path, msg, 0, skb->data,
1038 if (skb->truesize >= sk->sk_rcvbuf / 4) {
1039 rc = iucv_fragment_skb(sk, skb, msg->length,
1044 iucv_path_sever(path, NULL);
1048 skb_reset_transport_header(skb);
1049 skb_reset_network_header(skb);
1050 skb->len = msg->length;
1053 /* Queue the fragmented skb */
1054 fskb = skb_dequeue(&fragmented_skb_q);
1056 if (!skb_queue_empty(&iucv->backlog_skb_q))
1057 skb_queue_tail(&iucv->backlog_skb_q, fskb);
1058 else if (sock_queue_rcv_skb(sk, fskb))
1059 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb);
1060 fskb = skb_dequeue(&fragmented_skb_q);
1063 /* Queue the original skb if it exists (was not fragmented) */
1065 if (!skb_queue_empty(&iucv->backlog_skb_q))
1066 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1067 else if (sock_queue_rcv_skb(sk, skb))
1068 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1073 static void iucv_callback_txdone(struct iucv_path *path,
1074 struct iucv_message *msg)
1076 struct sock *sk = path->private;
1077 struct sk_buff *this;
1078 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1079 struct sk_buff *list_skb = list->next;
1080 unsigned long flags;
1083 spin_lock_irqsave(&list->lock, flags);
1087 list_skb = list_skb->next;
1088 } while (memcmp(&msg->tag, this->cb, 4) && list_skb);
1090 spin_unlock_irqrestore(&list->lock, flags);
1092 skb_unlink(this, &iucv_sk(sk)->send_skb_q);
1096 if (sk->sk_state == IUCV_CLOSING) {
1097 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1098 sk->sk_state = IUCV_CLOSED;
1099 sk->sk_state_change(sk);
1105 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1107 struct sock *sk = path->private;
1109 if (!list_empty(&iucv_sk(sk)->accept_q))
1110 sk->sk_state = IUCV_SEVERED;
1112 sk->sk_state = IUCV_DISCONN;
1114 sk->sk_state_change(sk);
1117 static struct proto_ops iucv_sock_ops = {
1119 .owner = THIS_MODULE,
1120 .release = iucv_sock_release,
1121 .bind = iucv_sock_bind,
1122 .connect = iucv_sock_connect,
1123 .listen = iucv_sock_listen,
1124 .accept = iucv_sock_accept,
1125 .getname = iucv_sock_getname,
1126 .sendmsg = iucv_sock_sendmsg,
1127 .recvmsg = iucv_sock_recvmsg,
1128 .poll = iucv_sock_poll,
1129 .ioctl = sock_no_ioctl,
1130 .mmap = sock_no_mmap,
1131 .socketpair = sock_no_socketpair,
1132 .shutdown = iucv_sock_shutdown,
1133 .setsockopt = sock_no_setsockopt,
1134 .getsockopt = sock_no_getsockopt
1137 static struct net_proto_family iucv_sock_family_ops = {
1139 .owner = THIS_MODULE,
1140 .create = iucv_sock_create,
1143 static int __init afiucv_init(void)
1147 if (!MACHINE_IS_VM) {
1148 printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
1149 err = -EPROTONOSUPPORT;
1152 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1153 if (unlikely(err)) {
1154 printk(KERN_ERR "AF_IUCV needs the VM userid\n");
1155 err = -EPROTONOSUPPORT;
1159 err = iucv_register(&af_iucv_handler, 0);
1162 err = proto_register(&iucv_proto, 0);
1165 err = sock_register(&iucv_sock_family_ops);
1168 printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
1172 proto_unregister(&iucv_proto);
1174 iucv_unregister(&af_iucv_handler, 0);
1179 static void __exit afiucv_exit(void)
1181 sock_unregister(PF_IUCV);
1182 proto_unregister(&iucv_proto);
1183 iucv_unregister(&af_iucv_handler, 0);
1185 printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
1188 module_init(afiucv_init);
1189 module_exit(afiucv_exit);
1191 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1192 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1193 MODULE_VERSION(VERSION);
1194 MODULE_LICENSE("GPL");
1195 MODULE_ALIAS_NETPROTO(PF_IUCV);