Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / net / core / stream.c
index 755bacb..cc196f4 100644 (file)
@@ -9,7 +9,7 @@
  *
  *     Authors:        Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  *                     (from old tcp.c code)
- *                     Alan Cox <alan@redhat.com> (Borrowed comments 8-))
+ *                     Alan Cox <alan@lxorguk.ukuu.org.uk> (Borrowed comments 8-))
  */
 
 #include <linux/module.h>
 void sk_stream_write_space(struct sock *sk)
 {
        struct socket *sock = sk->sk_socket;
+       struct socket_wq *wq;
 
        if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
                clear_bit(SOCK_NOSPACE, &sock->flags);
 
-               if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
-                       wake_up_interruptible(sk->sk_sleep);
-               if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
-                       sock_wake_async(sock, 2, POLL_OUT);
+               rcu_read_lock();
+               wq = rcu_dereference(sk->sk_wq);
+               if (wq_has_sleeper(wq))
+                       wake_up_interruptible_poll(&wq->wait, POLLOUT |
+                                               POLLWRNORM | POLLWRBAND);
+               if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
+                       sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
+               rcu_read_unlock();
        }
 }
 
@@ -65,13 +70,13 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
                if (signal_pending(tsk))
                        return sock_intr_errno(*timeo_p);
 
-               prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
                sk->sk_write_pending++;
                done = sk_wait_event(sk, timeo_p,
                                     !sk->sk_err &&
                                     !((1 << sk->sk_state) &
                                       ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
-               finish_wait(sk->sk_sleep, &wait);
+               finish_wait(sk_sleep(sk), &wait);
                sk->sk_write_pending--;
        } while (!done);
        return 0;
@@ -95,13 +100,13 @@ void sk_stream_wait_close(struct sock *sk, long timeout)
                DEFINE_WAIT(wait);
 
                do {
-                       prepare_to_wait(sk->sk_sleep, &wait,
+                       prepare_to_wait(sk_sleep(sk), &wait,
                                        TASK_INTERRUPTIBLE);
                        if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
                                break;
                } while (!signal_pending(current) && timeout);
 
-               finish_wait(sk->sk_sleep, &wait);
+               finish_wait(sk_sleep(sk), &wait);
        }
 }
 
@@ -125,7 +130,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
        while (1) {
                set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 
-               prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 
                if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                        goto do_error;
@@ -156,7 +161,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
                *timeo_p = current_timeo;
        }
 out:
-       finish_wait(sk->sk_sleep, &wait);
+       finish_wait(sk_sleep(sk), &wait);
        return err;
 
 do_error:
@@ -172,17 +177,6 @@ do_interrupted:
 
 EXPORT_SYMBOL(sk_stream_wait_memory);
 
-void sk_stream_rfree(struct sk_buff *skb)
-{
-       struct sock *sk = skb->sk;
-
-       skb_truesize_check(skb);
-       atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
-       sk->sk_forward_alloc += skb->truesize;
-}
-
-EXPORT_SYMBOL(sk_stream_rfree);
-
 int sk_stream_error(struct sock *sk, int flags, int err)
 {
        if (err == -EPIPE)
@@ -194,76 +188,6 @@ int sk_stream_error(struct sock *sk, int flags, int err)
 
 EXPORT_SYMBOL(sk_stream_error);
 
-void __sk_stream_mem_reclaim(struct sock *sk)
-{
-       atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM,
-                  sk->sk_prot->memory_allocated);
-       sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1;
-       if (*sk->sk_prot->memory_pressure &&
-           (atomic_read(sk->sk_prot->memory_allocated) <
-            sk->sk_prot->sysctl_mem[0]))
-               *sk->sk_prot->memory_pressure = 0;
-}
-
-EXPORT_SYMBOL(__sk_stream_mem_reclaim);
-
-int sk_stream_mem_schedule(struct sock *sk, int size, int kind)
-{
-       int amt = sk_stream_pages(size);
-
-       sk->sk_forward_alloc += amt * SK_STREAM_MEM_QUANTUM;
-       atomic_add(amt, sk->sk_prot->memory_allocated);
-
-       /* Under limit. */
-       if (atomic_read(sk->sk_prot->memory_allocated) < sk->sk_prot->sysctl_mem[0]) {
-               if (*sk->sk_prot->memory_pressure)
-                       *sk->sk_prot->memory_pressure = 0;
-               return 1;
-       }
-
-       /* Over hard limit. */
-       if (atomic_read(sk->sk_prot->memory_allocated) > sk->sk_prot->sysctl_mem[2]) {
-               sk->sk_prot->enter_memory_pressure();
-               goto suppress_allocation;
-       }
-
-       /* Under pressure. */
-       if (atomic_read(sk->sk_prot->memory_allocated) > sk->sk_prot->sysctl_mem[1])
-               sk->sk_prot->enter_memory_pressure();
-
-       if (kind) {
-               if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_prot->sysctl_rmem[0])
-                       return 1;
-       } else if (sk->sk_wmem_queued < sk->sk_prot->sysctl_wmem[0])
-               return 1;
-
-       if (!*sk->sk_prot->memory_pressure ||
-           sk->sk_prot->sysctl_mem[2] > atomic_read(sk->sk_prot->sockets_allocated) *
-                               sk_stream_pages(sk->sk_wmem_queued +
-                                               atomic_read(&sk->sk_rmem_alloc) +
-                                               sk->sk_forward_alloc))
-               return 1;
-
-suppress_allocation:
-
-       if (!kind) {
-               sk_stream_moderate_sndbuf(sk);
-
-               /* Fail only if socket is _under_ its sndbuf.
-                * In this case we cannot block, so that we have to fail.
-                */
-               if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
-                       return 1;
-       }
-
-       /* Alas. Undo changes. */
-       sk->sk_forward_alloc -= amt * SK_STREAM_MEM_QUANTUM;
-       atomic_sub(amt, sk->sk_prot->memory_allocated);
-       return 0;
-}
-
-EXPORT_SYMBOL(sk_stream_mem_schedule);
-
 void sk_stream_kill_queues(struct sock *sk)
 {
        /* First the read buffer. */
@@ -273,13 +197,13 @@ void sk_stream_kill_queues(struct sock *sk)
        __skb_queue_purge(&sk->sk_error_queue);
 
        /* Next, the write queue. */
-       BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
+       WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
 
        /* Account for returned memory. */
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
-       BUG_TRAP(!sk->sk_wmem_queued);
-       BUG_TRAP(!sk->sk_forward_alloc);
+       WARN_ON(sk->sk_wmem_queued);
+       WARN_ON(sk->sk_forward_alloc);
 
        /* It is _impossible_ for the backlog to contain anything
         * when we get here.  All user references to this socket