CRIS: Merge machine dependent boot/compressed and boot/rescue
[safe/jmp/linux-2.6] / net / iucv / af_iucv.c
index a485496..49e7865 100644 (file)
@@ -8,6 +8,9 @@
  *  Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
  */
 
+#define KMSG_COMPONENT "af_iucv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/list.h>
@@ -41,15 +44,19 @@ static struct proto iucv_proto = {
        .obj_size       = sizeof(struct iucv_sock),
 };
 
+static void iucv_sock_kill(struct sock *sk);
+static void iucv_sock_close(struct sock *sk);
+
 /* Call Back functions */
 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
-static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
+static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
+                                u8 ipuser[16]);
 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
 
 static struct iucv_sock_list iucv_sk_list = {
-       .lock = RW_LOCK_UNLOCKED,
+       .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
        .autobind_name = ATOMIC_INIT(0)
 };
 
@@ -90,13 +97,6 @@ static void iucv_sock_clear_timer(struct sock *sk)
        sk_stop_timer(sk, &sk->sk_timer);
 }
 
-static void iucv_sock_init_timer(struct sock *sk)
-{
-       init_timer(&sk->sk_timer);
-       sk->sk_timer.function = iucv_sock_timeout;
-       sk->sk_timer.data = (unsigned long)sk;
-}
-
 static struct sock *__iucv_get_sock_by_name(char *nm)
 {
        struct sock *sk;
@@ -147,11 +147,12 @@ static void iucv_sock_close(struct sock *sk)
        unsigned char user_data[16];
        struct iucv_sock *iucv = iucv_sk(sk);
        int err;
+       unsigned long timeo;
 
        iucv_sock_clear_timer(sk);
        lock_sock(sk);
 
-       switch(sk->sk_state) {
+       switch (sk->sk_state) {
        case IUCV_LISTEN:
                iucv_sock_cleanup_listen(sk);
                break;
@@ -159,6 +160,21 @@ static void iucv_sock_close(struct sock *sk)
        case IUCV_CONNECTED:
        case IUCV_DISCONN:
                err = 0;
+
+               sk->sk_state = IUCV_CLOSING;
+               sk->sk_state_change(sk);
+
+               if (!skb_queue_empty(&iucv->send_skb_q)) {
+                       if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+                               timeo = sk->sk_lingertime;
+                       else
+                               timeo = IUCV_DISCONN_TIMEOUT;
+                       err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
+               }
+
+               sk->sk_state = IUCV_CLOSED;
+               sk->sk_state_change(sk);
+
                if (iucv->path) {
                        low_nmcpy(user_data, iucv->src_name);
                        high_nmcpy(user_data, iucv->dst_name);
@@ -168,12 +184,11 @@ static void iucv_sock_close(struct sock *sk)
                        iucv->path = NULL;
                }
 
-               sk->sk_state = IUCV_CLOSED;
-               sk->sk_state_change(sk);
                sk->sk_err = ECONNRESET;
                sk->sk_state_change(sk);
 
                skb_queue_purge(&iucv->send_skb_q);
+               skb_queue_purge(&iucv->backlog_skb_q);
 
                sock_set_flag(sk, SOCK_ZAPPED);
                break;
@@ -181,7 +196,7 @@ static void iucv_sock_close(struct sock *sk)
        default:
                sock_set_flag(sk, SOCK_ZAPPED);
                break;
-       };
+       }
 
        release_sock(sk);
        iucv_sock_kill(sk);
@@ -197,13 +212,17 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
 {
        struct sock *sk;
 
-       sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1);
+       sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
        if (!sk)
                return NULL;
 
        sock_init_data(sock, sk);
        INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
+       spin_lock_init(&iucv_sk(sk)->accept_q_lock);
        skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
+       INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
+       spin_lock_init(&iucv_sk(sk)->message_q.lock);
+       skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
        iucv_sk(sk)->send_tag = 0;
 
        sk->sk_destruct = iucv_sock_destruct;
@@ -215,14 +234,14 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
        sk->sk_protocol = proto;
        sk->sk_state    = IUCV_OPEN;
 
-       iucv_sock_init_timer(sk);
+       setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
 
        iucv_sock_link(&iucv_sk_list, sk);
        return sk;
 }
 
 /* Create an IUCV socket */
-static int iucv_sock_create(struct socket *sock, int protocol)
+static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
 {
        struct sock *sk;
 
@@ -257,15 +276,25 @@ void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
 
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
 {
+       unsigned long flags;
+       struct iucv_sock *par = iucv_sk(parent);
+
        sock_hold(sk);
-       list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
+       spin_lock_irqsave(&par->accept_q_lock, flags);
+       list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
+       spin_unlock_irqrestore(&par->accept_q_lock, flags);
        iucv_sk(sk)->parent = parent;
        parent->sk_ack_backlog++;
 }
 
 void iucv_accept_unlink(struct sock *sk)
 {
+       unsigned long flags;
+       struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
+
+       spin_lock_irqsave(&par->accept_q_lock, flags);
        list_del_init(&iucv_sk(sk)->accept_q);
+       spin_unlock_irqrestore(&par->accept_q_lock, flags);
        iucv_sk(sk)->parent->sk_ack_backlog--;
        iucv_sk(sk)->parent = NULL;
        sock_put(sk);
@@ -276,13 +305,13 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
        struct iucv_sock *isk, *n;
        struct sock *sk;
 
-       list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
+       list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
                sk = (struct sock *) isk;
                lock_sock(sk);
 
                if (sk->sk_state == IUCV_CLOSED) {
-                       release_sock(sk);
                        iucv_accept_unlink(sk);
+                       release_sock(sk);
                        continue;
                }
 
@@ -456,12 +485,30 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
        /* Create path. */
        iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
                                     IPRMDATA, GFP_KERNEL);
+       if (!iucv->path) {
+               err = -ENOMEM;
+               goto done;
+       }
        err = iucv_path_connect(iucv->path, &af_iucv_handler,
                                sa->siucv_user_id, NULL, user_data, sk);
        if (err) {
                iucv_path_free(iucv->path);
                iucv->path = NULL;
-               err = -ECONNREFUSED;
+               switch (err) {
+               case 0x0b:      /* Target communicator is not logged on */
+                       err = -ENETUNREACH;
+                       break;
+               case 0x0d:      /* Max connections for this guest exceeded */
+               case 0x0e:      /* Max connections for target guest exceeded */
+                       err = -EAGAIN;
+                       break;
+               case 0x0f:      /* Missing IUCV authorization */
+                       err = -EACCES;
+                       break;
+               default:
+                       err = -ECONNREFUSED;
+                       break;
+               }
                goto done;
        }
 
@@ -474,6 +521,13 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
                release_sock(sk);
                return -ECONNREFUSED;
        }
+
+       if (err) {
+               iucv_path_sever(iucv->path, NULL);
+               iucv_path_free(iucv->path);
+               iucv->path = NULL;
+       }
+
 done:
        release_sock(sk);
        return err;
@@ -510,7 +564,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
        long timeo;
        int err = 0;
 
-       lock_sock(sk);
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 
        if (sk->sk_state != IUCV_LISTEN) {
                err = -EBADFD;
@@ -521,7 +575,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
 
        /* Wait for an incoming connection */
        add_wait_queue_exclusive(sk->sk_sleep, &wait);
-       while (!(nsk = iucv_accept_dequeue(sk, newsock))){
+       while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
                set_current_state(TASK_INTERRUPTIBLE);
                if (!timeo) {
                        err = -EAGAIN;
@@ -530,7 +584,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
 
                release_sock(sk);
                timeo = schedule_timeout(timeo);
-               lock_sock(sk);
+               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 
                if (sk->sk_state != IUCV_LISTEN) {
                        err = -EBADFD;
@@ -586,6 +640,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct iucv_sock *iucv = iucv_sk(sk);
        struct sk_buff *skb;
        struct iucv_message txmsg;
+       char user_id[9];
+       char appl_id[9];
        int err;
 
        err = sock_error(sk);
@@ -602,26 +658,34 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                goto out;
        }
 
-       if (sk->sk_state == IUCV_CONNECTED){
-               if(!(skb = sock_alloc_send_skb(sk, len,
-                                      msg->msg_flags & MSG_DONTWAIT,
-                                      &err)))
-                       return err;
+       if (sk->sk_state == IUCV_CONNECTED) {
+               if (!(skb = sock_alloc_send_skb(sk, len,
+                                               msg->msg_flags & MSG_DONTWAIT,
+                                               &err)))
+                       goto out;
 
-               if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){
+               if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
                        err = -EFAULT;
                        goto fail;
                }
 
                txmsg.class = 0;
+               memcpy(&txmsg.class, skb->data, skb->len >= 4 ? 4 : skb->len);
                txmsg.tag = iucv->send_tag++;
                memcpy(skb->cb, &txmsg.tag, 4);
                skb_queue_tail(&iucv->send_skb_q, skb);
                err = iucv_message_send(iucv->path, &txmsg, 0, 0,
                                        (void *) skb->data, skb->len);
                if (err) {
-                       if (err == 3)
-                               printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
+                       if (err == 3) {
+                               user_id[8] = 0;
+                               memcpy(user_id, iucv->dst_user_id, 8);
+                               appl_id[8] = 0;
+                               memcpy(appl_id, iucv->dst_name, 8);
+                               pr_err("Application %s on z/VM guest %s"
+                                      " exceeds message limit\n",
+                                      user_id, appl_id);
+                       }
                        skb_unlink(skb, &iucv->send_skb_q);
                        err = -EPIPE;
                        goto fail;
@@ -642,15 +706,106 @@ out:
        return err;
 }
 
+static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
+{
+       int dataleft, size, copied = 0;
+       struct sk_buff *nskb;
+
+       dataleft = len;
+       while (dataleft) {
+               if (dataleft >= sk->sk_rcvbuf / 4)
+                       size = sk->sk_rcvbuf / 4;
+               else
+                       size = dataleft;
+
+               nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
+               if (!nskb)
+                       return -ENOMEM;
+
+               memcpy(nskb->data, skb->data + copied, size);
+               copied += size;
+               dataleft -= size;
+
+               skb_reset_transport_header(nskb);
+               skb_reset_network_header(nskb);
+               nskb->len = size;
+
+               skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
+       }
+
+       return 0;
+}
+
+static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
+                                struct iucv_path *path,
+                                struct iucv_message *msg)
+{
+       int rc;
+
+       if (msg->flags & IPRMDATA) {
+               skb->data = NULL;
+               skb->len = 0;
+       } else {
+               rc = iucv_message_receive(path, msg, 0, skb->data,
+                                         msg->length, NULL);
+               if (rc) {
+                       kfree_skb(skb);
+                       return;
+               }
+               if (skb->truesize >= sk->sk_rcvbuf / 4) {
+                       rc = iucv_fragment_skb(sk, skb, msg->length);
+                       kfree_skb(skb);
+                       skb = NULL;
+                       if (rc) {
+                               iucv_path_sever(path, NULL);
+                               return;
+                       }
+                       skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
+               } else {
+                       skb_reset_transport_header(skb);
+                       skb_reset_network_header(skb);
+                       skb->len = msg->length;
+               }
+       }
+
+       if (sock_queue_rcv_skb(sk, skb))
+               skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
+}
+
+static void iucv_process_message_q(struct sock *sk)
+{
+       struct iucv_sock *iucv = iucv_sk(sk);
+       struct sk_buff *skb;
+       struct sock_msg_q *p, *n;
+
+       list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
+               skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
+               if (!skb)
+                       break;
+               iucv_process_message(sk, skb, p->path, &p->msg);
+               list_del(&p->list);
+               kfree(p);
+               if (!skb_queue_empty(&iucv->backlog_skb_q))
+                       break;
+       }
+}
+
 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                             struct msghdr *msg, size_t len, int flags)
 {
        int noblock = flags & MSG_DONTWAIT;
        struct sock *sk = sock->sk;
+       struct iucv_sock *iucv = iucv_sk(sk);
        int target, copied = 0;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *rskb, *cskb;
        int err = 0;
 
+       if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+           skb_queue_empty(&iucv->backlog_skb_q) &&
+           skb_queue_empty(&sk->sk_receive_queue) &&
+           list_empty(&iucv->message_q.list))
+               return 0;
+
        if (flags & (MSG_OOB))
                return -EOPNOTSUPP;
 
@@ -665,10 +820,12 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        copied = min_t(unsigned int, skb->len, len);
 
-       if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) {
+       cskb = skb;
+       if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
                skb_queue_head(&sk->sk_receive_queue, skb);
                if (copied == 0)
                        return -EFAULT;
+               goto done;
        }
 
        len -= copied;
@@ -683,6 +840,25 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                }
 
                kfree_skb(skb);
+
+               /* Queue backlog skbs */
+               rskb = skb_dequeue(&iucv->backlog_skb_q);
+               while (rskb) {
+                       if (sock_queue_rcv_skb(sk, rskb)) {
+                               skb_queue_head(&iucv->backlog_skb_q,
+                                               rskb);
+                               break;
+                       } else {
+                               rskb = skb_dequeue(&iucv->backlog_skb_q);
+                       }
+               }
+               if (skb_queue_empty(&iucv->backlog_skb_q)) {
+                       spin_lock_bh(&iucv->message_q.lock);
+                       if (!list_empty(&iucv->message_q.list))
+                               iucv_process_message_q(sk);
+                       spin_unlock_bh(&iucv->message_q.lock);
+               }
+
        } else
                skb_queue_head(&sk->sk_receive_queue, skb);
 
@@ -695,7 +871,7 @@ static inline unsigned int iucv_accept_poll(struct sock *parent)
        struct iucv_sock *isk, *n;
        struct sock *sk;
 
-       list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
+       list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
                sk = (struct sock *) isk;
 
                if (sk->sk_state == IUCV_CONNECTED)
@@ -726,12 +902,15 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
                mask |= POLLHUP;
 
        if (!skb_queue_empty(&sk->sk_receive_queue) ||
-                       (sk->sk_shutdown & RCV_SHUTDOWN))
+           (sk->sk_shutdown & RCV_SHUTDOWN))
                mask |= POLLIN | POLLRDNORM;
 
        if (sk->sk_state == IUCV_CLOSED)
                mask |= POLLHUP;
 
+       if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
+               mask |= POLLIN;
+
        if (sock_writeable(sk))
                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
        else
@@ -754,7 +933,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
                return -EINVAL;
 
        lock_sock(sk);
-       switch(sk->sk_state) {
+       switch (sk->sk_state) {
        case IUCV_CLOSED:
                err = -ENOTCONN;
                goto fail;
@@ -770,7 +949,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
                err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
                                        (void *) prmmsg, 8);
                if (err) {
-                       switch(err) {
+                       switch (err) {
                        case 1:
                                err = -ENOTCONN;
                                break;
@@ -817,13 +996,6 @@ static int iucv_sock_release(struct socket *sock)
                iucv_sk(sk)->path = NULL;
        }
 
-       if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){
-               lock_sock(sk);
-               err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0,
-                                          sk->sk_lingertime);
-               release_sock(sk);
-       }
-
        sock_orphan(sk);
        iucv_sock_kill(sk);
        return err;
@@ -846,6 +1018,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
        /* Find out if this path belongs to af_iucv. */
        read_lock(&iucv_sk_list.lock);
        iucv = NULL;
+       sk = NULL;
        sk_for_each(sk, node, &iucv_sk_list.head)
                if (sk->sk_state == IUCV_LISTEN &&
                    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
@@ -869,19 +1042,22 @@ static int iucv_callback_connreq(struct iucv_path *path,
        ASCEBC(user_data, sizeof(user_data));
        if (sk->sk_state != IUCV_LISTEN) {
                err = iucv_path_sever(path, user_data);
+               iucv_path_free(path);
                goto fail;
        }
 
        /* Check for backlog size */
        if (sk_acceptq_is_full(sk)) {
                err = iucv_path_sever(path, user_data);
+               iucv_path_free(path);
                goto fail;
        }
 
        /* Create the new socket */
        nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
-       if (!nsk){
+       if (!nsk) {
                err = iucv_path_sever(path, user_data);
+               iucv_path_free(path);
                goto fail;
        }
 
@@ -903,8 +1079,10 @@ static int iucv_callback_connreq(struct iucv_path *path,
 
        path->msglim = IUCV_QUEUELEN_DEFAULT;
        err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
-       if (err){
+       if (err) {
                err = iucv_path_sever(path, user_data);
+               iucv_path_free(path);
+               iucv_sock_kill(nsk);
                goto fail;
        }
 
@@ -930,58 +1108,80 @@ static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
 {
        struct sock *sk = path->private;
+       struct iucv_sock *iucv = iucv_sk(sk);
        struct sk_buff *skb;
-       int rc;
+       struct sock_msg_q *save_msg;
+       int len;
 
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                return;
 
+       if (!list_empty(&iucv->message_q.list) ||
+           !skb_queue_empty(&iucv->backlog_skb_q))
+               goto save_message;
+
+       len = atomic_read(&sk->sk_rmem_alloc);
+       len += msg->length + sizeof(struct sk_buff);
+       if (len > sk->sk_rcvbuf)
+               goto save_message;
+
        skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
-       if (!skb) {
-               iucv_message_reject(path, msg);
-               return;
-       }
+       if (!skb)
+               goto save_message;
 
-       if (msg->flags & IPRMDATA) {
-               skb->data = NULL;
-               skb->len = 0;
-       } else {
-               rc = iucv_message_receive(path, msg, 0, skb->data,
-                                         msg->length, NULL);
-               if (rc) {
-                       kfree_skb(skb);
-                       return;
-               }
+       spin_lock(&iucv->message_q.lock);
+       iucv_process_message(sk, skb, path, msg);
+       spin_unlock(&iucv->message_q.lock);
 
-               skb->h.raw = skb->data;
-               skb_reset_network_header(skb);
-               skb->len = msg->length;
-       }
+       return;
 
-       if (sock_queue_rcv_skb(sk, skb))
-               kfree_skb(skb);
+save_message:
+       save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
+       if (!save_msg)
+               return;
+       save_msg->path = path;
+       save_msg->msg = *msg;
+
+       spin_lock(&iucv->message_q.lock);
+       list_add_tail(&save_msg->list, &iucv->message_q.list);
+       spin_unlock(&iucv->message_q.lock);
 }
 
 static void iucv_callback_txdone(struct iucv_path *path,
                                 struct iucv_message *msg)
 {
        struct sock *sk = path->private;
-       struct sk_buff *this;
+       struct sk_buff *this = NULL;
        struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
        struct sk_buff *list_skb = list->next;
        unsigned long flags;
 
-       spin_lock_irqsave(&list->lock, flags);
+       if (!skb_queue_empty(list)) {
+               spin_lock_irqsave(&list->lock, flags);
+
+               while (list_skb != (struct sk_buff *)list) {
+                       if (!memcmp(&msg->tag, list_skb->cb, 4)) {
+                               this = list_skb;
+                               break;
+                       }
+                       list_skb = list_skb->next;
+               }
+               if (this)
+                       __skb_unlink(this, list);
+
+               spin_unlock_irqrestore(&list->lock, flags);
 
-       do {
-               this = list_skb;
-               list_skb = list_skb->next;
-       } while (memcmp(&msg->tag, this->cb, 4));
+               kfree_skb(this);
+       }
+       BUG_ON(!this);
 
-       spin_unlock_irqrestore(&list->lock, flags);
+       if (sk->sk_state == IUCV_CLOSING) {
+               if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
+                       sk->sk_state = IUCV_CLOSED;
+                       sk->sk_state_change(sk);
+               }
+       }
 
-       skb_unlink(this, &iucv_sk(sk)->send_skb_q);
-       kfree_skb(this);
 }
 
 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
@@ -1022,18 +1222,19 @@ static struct net_proto_family iucv_sock_family_ops = {
        .create = iucv_sock_create,
 };
 
-static int afiucv_init(void)
+static int __init afiucv_init(void)
 {
        int err;
 
        if (!MACHINE_IS_VM) {
-               printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
+               pr_err("The af_iucv module cannot be loaded"
+                      " without z/VM\n");
                err = -EPROTONOSUPPORT;
                goto out;
        }
        cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
        if (unlikely(err)) {
-               printk(KERN_ERR "AF_IUCV needs the VM userid\n");
+               WARN_ON(err);
                err = -EPROTONOSUPPORT;
                goto out;
        }
@@ -1047,7 +1248,6 @@ static int afiucv_init(void)
        err = sock_register(&iucv_sock_family_ops);
        if (err)
                goto out_proto;
-       printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
        return 0;
 
 out_proto:
@@ -1063,8 +1263,6 @@ static void __exit afiucv_exit(void)
        sock_unregister(PF_IUCV);
        proto_unregister(&iucv_proto);
        iucv_unregister(&af_iucv_handler, 0);
-
-       printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
 }
 
 module_init(afiucv_init);