[NETFILTER]: x_tables: add xt_{match,target} arguments to match/target functions
[safe/jmp/linux-2.6] / net / ipv4 / tcp_timer.c
index b614ad4..7c1bde3 100644 (file)
@@ -58,7 +58,7 @@ static void tcp_write_err(struct sock *sk)
  * to prevent DoS attacks. It is called when a retransmission timeout
  * or zero probe timeout occurs on orphaned socket.
  *
- * Criterium is still not confirmed experimentally and may change.
+ * Criteria is still not confirmed experimentally and may change.
  * We kill the socket, if:
  * 1. If number of orphaned sockets exceeds an administratively configured
  *    limit.
@@ -119,8 +119,10 @@ static int tcp_orphan_retries(struct sock *sk, int alive)
 /* A write timeout has occurred. Process the after effects. */
 static int tcp_write_timeout(struct sock *sk)
 {
-       const struct inet_connection_sock *icsk = inet_csk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
        int retry_until;
+       int mss;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                if (icsk->icsk_retransmits)
@@ -128,25 +130,19 @@ static int tcp_write_timeout(struct sock *sk)
                retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
        } else {
                if (icsk->icsk_retransmits >= sysctl_tcp_retries1) {
-                       /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black
-                          hole detection. :-(
-
-                          It is place to make it. It is not made. I do not want
-                          to make it. It is disguisting. It does not work in any
-                          case. Let me to cite the same draft, which requires for
-                          us to implement this:
-
-   "The one security concern raised by this memo is that ICMP black holes
-   are often caused by over-zealous security administrators who block
-   all ICMP messages.  It is vitally important that those who design and
-   deploy security systems understand the impact of strict filtering on
-   upper-layer protocols.  The safest web site in the world is worthless
-   if most TCP implementations cannot transfer data from it.  It would
-   be far nicer to have all of the black holes fixed rather than fixing
-   all of the TCP implementations."
-
-                           Golden words :-).
-                  */
+                       /* Black hole detection */
+                       if (sysctl_tcp_mtu_probing) {
+                               if (!icsk->icsk_mtup.enabled) {
+                                       icsk->icsk_mtup.enabled = 1;
+                                       tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+                               } else {
+                                       mss = min(sysctl_tcp_base_mss,
+                                                 tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)/2);
+                                       mss = max(mss, 68 - tp->tcp_header_len);
+                                       icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
+                                       tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+                               }
+                       }
 
                        dst_negative_advice(&sk->sk_dst_cache);
                }
@@ -233,11 +229,12 @@ out_unlock:
 
 static void tcp_probe_timer(struct sock *sk)
 {
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int max_probes;
 
        if (tp->packets_out || !sk->sk_send_head) {
-               tp->probes_out = 0;
+               icsk->icsk_probes_out = 0;
                return;
        }
 
@@ -248,7 +245,7 @@ static void tcp_probe_timer(struct sock *sk)
         * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
         * this behaviour in Solaris down as a bug fix. [AC]
         *
-        * Let me to explain. probes_out is zeroed by incoming ACKs
+        * Let me to explain. icsk_probes_out is zeroed by incoming ACKs
         * even if they advertise zero window. Hence, connection is killed only
         * if we received no ACKs for normal connection timeout. It is not killed
         * only because window stays zero for some time, window may be zero
@@ -259,16 +256,15 @@ static void tcp_probe_timer(struct sock *sk)
        max_probes = sysctl_tcp_retries2;
 
        if (sock_flag(sk, SOCK_DEAD)) {
-               const struct inet_connection_sock *icsk = inet_csk(sk);
                const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
  
                max_probes = tcp_orphan_retries(sk, alive);
 
-               if (tcp_out_of_resources(sk, alive || tp->probes_out <= max_probes))
+               if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
                        return;
        }
 
-       if (tp->probes_out > max_probes) {
+       if (icsk->icsk_probes_out > max_probes) {
                tcp_write_err(sk);
        } else {
                /* Only send another probe if we didn't close things up. */
@@ -319,19 +315,20 @@ static void tcp_retransmit_timer(struct sock *sk)
                goto out;
 
        if (icsk->icsk_retransmits == 0) {
-               if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
+               if (icsk->icsk_ca_state == TCP_CA_Disorder ||
+                   icsk->icsk_ca_state == TCP_CA_Recovery) {
                        if (tp->rx_opt.sack_ok) {
-                               if (tp->ca_state == TCP_CA_Recovery)
+                               if (icsk->icsk_ca_state == TCP_CA_Recovery)
                                        NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
                                else
                                        NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
                        } else {
-                               if (tp->ca_state == TCP_CA_Recovery)
+                               if (icsk->icsk_ca_state == TCP_CA_Recovery)
                                        NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
                                else
                                        NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
                        }
-               } else if (tp->ca_state == TCP_CA_Loss) {
+               } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
                        NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
                } else {
                        NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
@@ -424,103 +421,14 @@ out_unlock:
        sock_put(sk);
 }
 
-void reqsk_queue_prune(struct request_sock_queue *queue, struct sock *parent,
-                      const unsigned long interval, const unsigned long timeout,
-                      const unsigned long max_rto, int max_retries)
-{
-       struct inet_connection_sock *icsk = inet_csk(parent);
-       struct listen_sock *lopt = queue->listen_opt;
-       int thresh = max_retries;
-       unsigned long now = jiffies;
-       struct request_sock **reqp, *req;
-       int i, budget;
-
-       if (lopt == NULL || lopt->qlen == 0)
-               return;
-
-       /* Normally all the openreqs are young and become mature
-        * (i.e. converted to established socket) for first timeout.
-        * If synack was not acknowledged for 3 seconds, it means
-        * one of the following things: synack was lost, ack was lost,
-        * rtt is high or nobody planned to ack (i.e. synflood).
-        * When server is a bit loaded, queue is populated with old
-        * open requests, reducing effective size of queue.
-        * When server is well loaded, queue size reduces to zero
-        * after several minutes of work. It is not synflood,
-        * it is normal operation. The solution is pruning
-        * too old entries overriding normal timeout, when
-        * situation becomes dangerous.
-        *
-        * Essentially, we reserve half of room for young
-        * embrions; and abort old ones without pity, if old
-        * ones are about to clog our table.
-        */
-       if (lopt->qlen>>(lopt->max_qlen_log-1)) {
-               int young = (lopt->qlen_young<<1);
-
-               while (thresh > 2) {
-                       if (lopt->qlen < young)
-                               break;
-                       thresh--;
-                       young <<= 1;
-               }
-       }
-
-       if (queue->rskq_defer_accept)
-               max_retries = queue->rskq_defer_accept;
-
-       budget = 2 * (lopt->nr_table_entries / (timeout / interval));
-       i = lopt->clock_hand;
-
-       do {
-               reqp=&lopt->syn_table[i];
-               while ((req = *reqp) != NULL) {
-                       if (time_after_eq(now, req->expires)) {
-                               if ((req->retrans < thresh ||
-                                    (inet_rsk(req)->acked && req->retrans < max_retries))
-                                   && !req->rsk_ops->rtx_syn_ack(parent, req, NULL)) {
-                                       unsigned long timeo;
-
-                                       if (req->retrans++ == 0)
-                                               lopt->qlen_young--;
-                                       timeo = min((timeout << req->retrans), max_rto);
-                                       req->expires = now + timeo;
-                                       reqp = &req->dl_next;
-                                       continue;
-                               }
-
-                               /* Drop this request */
-                               inet_csk_reqsk_queue_unlink(parent, req, reqp);
-                               reqsk_queue_removed(&icsk->icsk_accept_queue, req);
-                               reqsk_free(req);
-                               continue;
-                       }
-                       reqp = &req->dl_next;
-               }
-
-               i = (i + 1) & (lopt->nr_table_entries - 1);
-
-       } while (--budget > 0);
-
-       lopt->clock_hand = i;
-
-       if (lopt->qlen)
-               inet_csk_reset_keepalive_timer(parent, interval);
-}
-
-EXPORT_SYMBOL_GPL(reqsk_queue_prune);
-
 /*
  *     Timer for listening sockets
  */
 
 static void tcp_synack_timer(struct sock *sk)
 {
-       struct inet_connection_sock *icsk = inet_csk(sk);
-       const int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
-
-       reqsk_queue_prune(&icsk->icsk_accept_queue, sk, TCP_SYNQ_INTERVAL,
-                         TCP_TIMEOUT_INIT, TCP_RTO_MAX, max_retries);
+       inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
+                                  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 }
 
 void tcp_set_keepalive(struct sock *sk, int val)
@@ -538,6 +446,7 @@ void tcp_set_keepalive(struct sock *sk, int val)
 static void tcp_keepalive_timer (unsigned long data)
 {
        struct sock *sk = (struct sock *) data;
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 elapsed;
 
@@ -579,14 +488,14 @@ static void tcp_keepalive_timer (unsigned long data)
        elapsed = tcp_time_stamp - tp->rcv_tstamp;
 
        if (elapsed >= keepalive_time_when(tp)) {
-               if ((!tp->keepalive_probes && tp->probes_out >= sysctl_tcp_keepalive_probes) ||
-                    (tp->keepalive_probes && tp->probes_out >= tp->keepalive_probes)) {
+               if ((!tp->keepalive_probes && icsk->icsk_probes_out >= sysctl_tcp_keepalive_probes) ||
+                    (tp->keepalive_probes && icsk->icsk_probes_out >= tp->keepalive_probes)) {
                        tcp_send_active_reset(sk, GFP_ATOMIC);
                        tcp_write_err(sk);
                        goto out;
                }
                if (tcp_write_wakeup(sk) <= 0) {
-                       tp->probes_out++;
+                       icsk->icsk_probes_out++;
                        elapsed = keepalive_intvl_when(tp);
                } else {
                        /* If keepalive was lost due to local congestion,