[NETFILTER]: x_tables: add xt_{match,target} arguments to match/target functions
[safe/jmp/linux-2.6] / net / ipv4 / tcp_vegas.c
index 054de24..3b74034 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
-#include <linux/tcp_diag.h>
+#include <linux/inet_diag.h>
 
 #include <net/tcp.h>
 
@@ -215,14 +215,6 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
                vegas->beg_snd_nxt  = tp->snd_nxt;
                vegas->beg_snd_cwnd = tp->snd_cwnd;
 
-               /* Take into account the current RTT sample too, to
-                * decrease the impact of delayed acks. This double counts
-                * this sample since we count it for the next window as well,
-                * but that's not too awful, since we're taking the min,
-                * rather than averaging.
-                */
-               tcp_vegas_rtt_calc(sk, seq_rtt * 1000);
-
                /* We do the Vegas calculations only if we got enough RTT
                 * samples that we can be reasonably sure that we got
                 * at least one RTT sample that wasn't from a delayed ACK.
@@ -236,8 +228,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
                        /* We don't have enough RTT samples to do the Vegas
                         * calculation, so we'll behave like Reno.
                         */
-                       if (tp->snd_cwnd > tp->snd_ssthresh)
-                               tp->snd_cwnd++;
+                       tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
                } else {
                        u32 rtt, target_cwnd, diff;
 
@@ -275,7 +266,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
                         */
                        diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
 
-                       if (tp->snd_cwnd < tp->snd_ssthresh) {
+                       if (tp->snd_cwnd <= tp->snd_ssthresh) {
                                /* Slow start.  */
                                if (diff > gamma) {
                                        /* Going too fast. Time to slow down
@@ -295,6 +286,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
                                                            V_PARAM_SHIFT)+1);
 
                                }
+                               tcp_slow_start(tp);
                        } else {
                                /* Congestion avoidance. */
                                u32 next_snd_cwnd;
@@ -327,37 +319,21 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
                                else if (next_snd_cwnd < tp->snd_cwnd)
                                        tp->snd_cwnd--;
                        }
+
+                       if (tp->snd_cwnd < 2)
+                               tp->snd_cwnd = 2;
+                       else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
+                               tp->snd_cwnd = tp->snd_cwnd_clamp;
                }
 
                /* Wipe the slate clean for the next RTT. */
                vegas->cntRTT = 0;
                vegas->minRTT = 0x7fffffff;
        }
-
-       /* The following code is executed for every ack we receive,
-        * except for conditions checked in should_advance_cwnd()
-        * before the call to tcp_cong_avoid(). Mainly this means that
-        * we only execute this code if the ack actually acked some
-        * data.
-        */
-
-       /* If we are in slow start, increase our cwnd in response to this ACK.
-        * (If we are not in slow start then we are in congestion avoidance,
-        * and adjust our congestion window only once per RTT. See the code
-        * above.)
-        */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
-               tp->snd_cwnd++;
-
-       /* to keep cwnd from growing without bound */
-       tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
-
-       /* Make sure that we are never so timid as to reduce our cwnd below
-        * 2 MSS.
-        *
-        * Going below 2 MSS would risk huge delayed ACKs from our receiver.
-        */
-       tp->snd_cwnd = max(tp->snd_cwnd, 2U);
+       /* Use normal slow start */
+       else if (tp->snd_cwnd <= tp->snd_ssthresh) 
+               tcp_slow_start(tp);
+       
 }
 
 /* Extract info for Tcp socket info provided via netlink. */
@@ -365,10 +341,10 @@ static void tcp_vegas_get_info(struct sock *sk, u32 ext,
                               struct sk_buff *skb)
 {
        const struct vegas *ca = inet_csk_ca(sk);
-       if (ext & (1<<(TCPDIAG_VEGASINFO-1))) {
+       if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
                struct tcpvegas_info *info;
 
-               info = RTA_DATA(__RTA_PUT(skb, TCPDIAG_VEGASINFO,
+               info = RTA_DATA(__RTA_PUT(skb, INET_DIAG_VEGASINFO,
                                          sizeof(*info)));
 
                info->tcpv_enabled = ca->doing_vegas_now;