tcp: add helper for AI algorithm
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Sat, 28 Feb 2009 04:44:37 +0000 (04:44 +0000)
committerDavid S. Miller <davem@davemloft.net>
Mon, 2 Mar 2009 11:00:15 +0000 (03:00 -0800)
It seems that implementation in yeah was inconsistent to what
other did as it would increase cwnd one ack earlier than the
others do.

Size benefits:

  bictcp_cong_avoid |  -36
  tcp_cong_avoid_ai |  +52
  bictcp_cong_avoid |  -34
  tcp_scalable_cong_avoid |  -36
  tcp_veno_cong_avoid |  -12
  tcp_yeah_cong_avoid |  -38

= -104 bytes total

Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/tcp.h
net/ipv4/tcp_bic.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_veno.c
net/ipv4/tcp_yeah.c

index 218235d..0366a55 100644 (file)
@@ -685,6 +685,7 @@ extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
 extern int tcp_set_allowed_congestion_control(char *allowed);
 extern int tcp_set_congestion_control(struct sock *sk, const char *name);
 extern void tcp_slow_start(struct tcp_sock *tp);
+extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 
 extern struct tcp_congestion_ops tcp_init_congestion_ops;
 extern u32 tcp_reno_ssthresh(struct sock *sk);
index 7eb7636..3b53fd1 100644 (file)
@@ -149,16 +149,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                tcp_slow_start(tp);
        else {
                bictcp_update(ca, tp->snd_cwnd);
-
-               /* In dangerous area, increase slowly.
-                * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
-                */
-               if (tp->snd_cwnd_cnt >= ca->cnt) {
-                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                               tp->snd_cwnd++;
-                       tp->snd_cwnd_cnt = 0;
-               } else
-                       tp->snd_cwnd_cnt++;
+               tcp_cong_avoid_ai(tp, ca->cnt);
        }
 
 }
index 4ec5b4e..e92beb9 100644 (file)
@@ -336,6 +336,19 @@ void tcp_slow_start(struct tcp_sock *tp)
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
+/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
+{
+       if (tp->snd_cwnd_cnt >= w) {
+               if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+                       tp->snd_cwnd++;
+               tp->snd_cwnd_cnt = 0;
+       } else {
+               tp->snd_cwnd_cnt++;
+       }
+}
+EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
+
 /*
  * TCP Reno congestion control
  * This is special case used for fallback as well.
@@ -365,13 +378,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                                tp->snd_cwnd++;
                }
        } else {
-               /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */
-               if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                               tp->snd_cwnd++;
-                       tp->snd_cwnd_cnt = 0;
-               } else
-                       tp->snd_cwnd_cnt++;
+               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
        }
 }
 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
index ee467ec..71d5f2f 100644 (file)
@@ -294,16 +294,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                tcp_slow_start(tp);
        } else {
                bictcp_update(ca, tp->snd_cwnd);
-
-               /* In dangerous area, increase slowly.
-                * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
-                */
-               if (tp->snd_cwnd_cnt >= ca->cnt) {
-                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                               tp->snd_cwnd++;
-                       tp->snd_cwnd_cnt = 0;
-               } else
-                       tp->snd_cwnd_cnt++;
+               tcp_cong_avoid_ai(tp, ca->cnt);
        }
 
 }
index 4660b08..a765137 100644 (file)
@@ -24,14 +24,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
                tcp_slow_start(tp);
-       else {
-               tp->snd_cwnd_cnt++;
-               if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
-                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                               tp->snd_cwnd++;
-                       tp->snd_cwnd_cnt = 0;
-               }
-       }
+       else
+               tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
 }
 
 static u32 tcp_scalable_ssthresh(struct sock *sk)
index d08b2e8..e9bbff7 100644 (file)
@@ -159,12 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                                /* In the "non-congestive state", increase cwnd
                                 *  every rtt.
                                 */
-                               if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-                                       if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                                               tp->snd_cwnd++;
-                                       tp->snd_cwnd_cnt = 0;
-                               } else
-                                       tp->snd_cwnd_cnt++;
+                               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
                        } else {
                                /* In the "congestive state", increase cwnd
                                 * every other rtt.
index 9ec843a..66b6821 100644 (file)
@@ -94,14 +94,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
 
        } else {
                /* Reno */
-
-               if (tp->snd_cwnd_cnt < tp->snd_cwnd)
-                       tp->snd_cwnd_cnt++;
-
-               if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-                       tp->snd_cwnd++;
-                       tp->snd_cwnd_cnt = 0;
-               }
+               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
        }
 
        /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.