mac80211: fix modprobe deadlock by not calling wep_init under rtnl_lock
[safe/jmp/linux-2.6] / net / ipv4 / tcp_input.c
index d67b6e9..c96a6bb 100644 (file)
@@ -64,6 +64,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
+#include <linux/kernel.h>
 #include <net/dst.h>
 #include <net/tcp.h>
 #include <net/inet_common.h>
@@ -701,13 +702,10 @@ static inline void tcp_set_rto(struct sock *sk)
         *    all the algo is pure shit and should be replaced
         *    with correct one. It is exactly, which we pretend to do.
         */
-}
 
-/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
- * guarantees that rto is higher.
- */
-static inline void tcp_bound_rto(struct sock *sk)
-{
+       /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
+        * guarantees that rto is higher.
+        */
        if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
                inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 }
@@ -928,9 +926,10 @@ static void tcp_init_metrics(struct sock *sk)
                tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
        }
        tcp_set_rto(sk);
-       tcp_bound_rto(sk);
        if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
                goto reset;
+
+cwnd:
        tp->snd_cwnd = tcp_init_cwnd(tp, dst);
        tp->snd_cwnd_stamp = tcp_time_stamp;
        return;
@@ -945,6 +944,7 @@ reset:
                tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
                inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
        }
+       goto cwnd;
 }
 
 static void tcp_update_reordering(struct sock *sk, const int metric,
@@ -1182,10 +1182,18 @@ static void tcp_mark_lost_retrans(struct sock *sk)
                if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
                        continue;
 
-               if (after(received_upto, ack_seq) &&
-                   (tcp_is_fack(tp) ||
-                    !before(received_upto,
-                            ack_seq + tp->reordering * tp->mss_cache))) {
+               /* TODO: We would like to get rid of tcp_is_fack(tp) only
+                * constraint here (see above) but figuring out that at
+                * least tp->reordering SACK blocks reside between ack_seq
+                * and received_upto is not easy task to do cheaply with
+                * the available datastructures.
+                *
+                * Whether FACK should check here for tp->reordering segs
+                * in-between one could argue for either way (it would be
+                * rather simple to implement as we could count fack_count
+                * during the walk and do tp->fackets_out - fack_count).
+                */
+               if (after(received_upto, ack_seq)) {
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                        tp->retrans_out -= tcp_skb_pcount(skb);
 
@@ -1237,6 +1245,12 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
        return dup_sack;
 }
 
+struct tcp_sacktag_state {
+       int reord;
+       int fack_count;
+       int flag;
+};
+
 /* Check if skb is fully within the SACK block. In presence of GSO skbs,
  * the incoming SACK may not exactly match but we can find smaller MSS
  * aligned portion of it that matches. Therefore we might need to fragment
@@ -1290,25 +1304,25 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
        return in_sack;
 }
 
-static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
-                          int *reord, int dup_sack, int fack_count,
-                          u8 *sackedto, int pcount)
+static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
+                         struct tcp_sacktag_state *state,
+                         int dup_sack, int pcount)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        u8 sacked = TCP_SKB_CB(skb)->sacked;
-       int flag = 0;
+       int fack_count = state->fack_count;
 
        /* Account D-SACK for retransmitted packet. */
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
                if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
                        tp->undo_retrans--;
                if (sacked & TCPCB_SACKED_ACKED)
-                       *reord = min(fack_count, *reord);
+                       state->reord = min(fack_count, state->reord);
        }
 
        /* Nothing to do; acked frame is about to be dropped (was ACKed). */
        if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
-               return flag;
+               return sacked;
 
        if (!(sacked & TCPCB_SACKED_ACKED)) {
                if (sacked & TCPCB_SACKED_RETRANS) {
@@ -1317,7 +1331,7 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
                         * that retransmission is still in flight.
                         */
                        if (sacked & TCPCB_LOST) {
-                               *sackedto &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+                               sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
                                tp->lost_out -= pcount;
                                tp->retrans_out -= pcount;
                        }
@@ -1328,21 +1342,22 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
                                 */
                                if (before(TCP_SKB_CB(skb)->seq,
                                           tcp_highest_sack_seq(tp)))
-                                       *reord = min(fack_count, *reord);
+                                       state->reord = min(fack_count,
+                                                          state->reord);
 
                                /* SACK enhanced F-RTO (RFC4138; Appendix B) */
                                if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
-                                       flag |= FLAG_ONLY_ORIG_SACKED;
+                                       state->flag |= FLAG_ONLY_ORIG_SACKED;
                        }
 
                        if (sacked & TCPCB_LOST) {
-                               *sackedto &= ~TCPCB_LOST;
+                               sacked &= ~TCPCB_LOST;
                                tp->lost_out -= pcount;
                        }
                }
 
-               *sackedto |= TCPCB_SACKED_ACKED;
-               flag |= FLAG_DATA_SACKED;
+               sacked |= TCPCB_SACKED_ACKED;
+               state->flag |= FLAG_DATA_SACKED;
                tp->sacked_out += pcount;
 
                fack_count += pcount;
@@ -1361,21 +1376,21 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
         * frames and clear it. undo_retrans is decreased above, L|R frames
         * are accounted above as well.
         */
-       if (dup_sack && (*sackedto & TCPCB_SACKED_RETRANS)) {
-               *sackedto &= ~TCPCB_SACKED_RETRANS;
+       if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
+               sacked &= ~TCPCB_SACKED_RETRANS;
                tp->retrans_out -= pcount;
        }
 
-       return flag;
+       return sacked;
 }
 
-static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
-                          struct sk_buff *skb, unsigned int pcount,
-                          int shifted, int fack_count, int *reord,
-                          int *flag, int mss)
+static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+                          struct tcp_sacktag_state *state,
+                          unsigned int pcount, int shifted, int mss,
+                          int dup_sack)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       u8 dummy_sacked = TCP_SKB_CB(skb)->sacked;      /* We discard results */
+       struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
 
        BUG_ON(!pcount);
 
@@ -1407,8 +1422,8 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
                skb_shinfo(skb)->gso_type = 0;
        }
 
-       *flag |= tcp_sacktag_one(skb, sk, reord, 0, fack_count, &dummy_sacked,
-                                pcount);
+       /* We discard results */
+       tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
 
        /* Difference in this won't matter, both ACKed by the same cumul. ACK */
        TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1445,14 +1460,9 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
 /* I wish gso_size would have a bit more sane initialization than
  * something-or-zero which complicates things
  */
-static int tcp_shift_mss(struct sk_buff *skb)
+static int tcp_skb_seglen(struct sk_buff *skb)
 {
-       int mss = tcp_skb_mss(skb);
-
-       if (!mss)
-               mss = skb->len;
-
-       return mss;
+       return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
 }
 
 /* Shifting pages past head area doesn't work */
@@ -1465,9 +1475,9 @@ static int skb_can_shift(struct sk_buff *skb)
  * skb.
  */
 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
+                                         struct tcp_sacktag_state *state,
                                          u32 start_seq, u32 end_seq,
-                                         int dup_sack, int *fack_count,
-                                         int *reord, int *flag)
+                                         int dup_sack)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *prev;
@@ -1481,7 +1491,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
 
        /* Normally R but no L won't result in plain S */
        if (!dup_sack &&
-           (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) == TCPCB_SACKED_RETRANS)
+           (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
                goto fallback;
        if (!skb_can_shift(skb))
                goto fallback;
@@ -1503,12 +1513,12 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
        if (in_sack) {
                len = skb->len;
                pcount = tcp_skb_pcount(skb);
-               mss = tcp_shift_mss(skb);
+               mss = tcp_skb_seglen(skb);
 
                /* TODO: Fix DSACKs to not fragment already SACKed and we can
                 * drop this restriction as unnecessary
                 */
-               if (mss != tcp_shift_mss(prev))
+               if (mss != tcp_skb_seglen(prev))
                        goto fallback;
        } else {
                if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
@@ -1549,7 +1559,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
                /* TODO: Fix DSACKs to not fragment already SACKed and we can
                 * drop this restriction as unnecessary
                 */
-               if (mss != tcp_shift_mss(prev))
+               if (mss != tcp_skb_seglen(prev))
                        goto fallback;
 
                if (len == mss) {
@@ -1564,8 +1574,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
 
        if (!skb_shift(prev, skb, len))
                goto fallback;
-       if (!tcp_shifted_skb(sk, prev, skb, pcount, len, *fack_count, reord,
-                            flag, mss))
+       if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
                goto out;
 
        /* Hole filled allows collapsing with the next as well, this is very
@@ -1575,22 +1584,20 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
                goto out;
        skb = tcp_write_queue_next(sk, prev);
 
-       if (!skb_can_shift(skb))
-               goto out;
-       if (skb == tcp_send_head(sk))
-               goto out;
-       if ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
+       if (!skb_can_shift(skb) ||
+           (skb == tcp_send_head(sk)) ||
+           ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
+           (mss != tcp_skb_seglen(skb)))
                goto out;
 
        len = skb->len;
        if (skb_shift(prev, skb, len)) {
                pcount += tcp_skb_pcount(skb);
-               tcp_shifted_skb(sk, prev, skb, tcp_skb_pcount(skb), len,
-                               *fack_count, reord, flag, mss);
+               tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
        }
 
 out:
-       *fack_count += pcount;
+       state->fack_count += pcount;
        return prev;
 
 noop:
@@ -1603,9 +1610,9 @@ fallback:
 
 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                                        struct tcp_sack_block *next_dup,
+                                       struct tcp_sacktag_state *state,
                                        u32 start_seq, u32 end_seq,
-                                       int dup_sack_in, int *fack_count,
-                                       int *reord, int *flag)
+                                       int dup_sack_in)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *tmp;
@@ -1635,9 +1642,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                 * so not even _safe variant of the loop is enough.
                 */
                if (in_sack <= 0) {
-                       tmp = tcp_shift_skb_data(sk, skb, start_seq,
-                                                end_seq, dup_sack,
-                                                fack_count, reord, flag);
+                       tmp = tcp_shift_skb_data(sk, skb, state,
+                                                start_seq, end_seq, dup_sack);
                        if (tmp != NULL) {
                                if (tmp != skb) {
                                        skb = tmp;
@@ -1656,17 +1662,17 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                        break;
 
                if (in_sack) {
-                       *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack,
-                                                *fack_count,
-                                                &(TCP_SKB_CB(skb)->sacked),
-                                                tcp_skb_pcount(skb));
+                       TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
+                                                                 state,
+                                                                 dup_sack,
+                                                                 tcp_skb_pcount(skb));
 
                        if (!before(TCP_SKB_CB(skb)->seq,
                                    tcp_highest_sack_seq(tp)))
                                tcp_advance_highest_sack(sk, skb);
                }
 
-               *fack_count += tcp_skb_pcount(skb);
+               state->fack_count += tcp_skb_pcount(skb);
        }
        return skb;
 }
@@ -1675,7 +1681,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
  * a normal way
  */
 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
-                                       u32 skip_to_seq, int *fack_count)
+                                       struct tcp_sacktag_state *state,
+                                       u32 skip_to_seq)
 {
        tcp_for_write_queue_from(skb, sk) {
                if (skb == tcp_send_head(sk))
@@ -1684,7 +1691,7 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
                if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
                        break;
 
-               *fack_count += tcp_skb_pcount(skb);
+               state->fack_count += tcp_skb_pcount(skb);
        }
        return skb;
 }
@@ -1692,18 +1699,17 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
                                                struct sock *sk,
                                                struct tcp_sack_block *next_dup,
-                                               u32 skip_to_seq,
-                                               int *fack_count, int *reord,
-                                               int *flag)
+                                               struct tcp_sacktag_state *state,
+                                               u32 skip_to_seq)
 {
        if (next_dup == NULL)
                return skb;
 
        if (before(next_dup->start_seq, skip_to_seq)) {
-               skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count);
-               skb = tcp_sacktag_walk(skb, sk, NULL,
-                                    next_dup->start_seq, next_dup->end_seq,
-                                    1, fack_count, reord, flag);
+               skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
+               skb = tcp_sacktag_walk(skb, sk, NULL, state,
+                                      next_dup->start_seq, next_dup->end_seq,
+                                      1);
        }
 
        return skb;
@@ -1725,16 +1731,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
        struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
        struct tcp_sack_block sp[TCP_NUM_SACKS];
        struct tcp_sack_block *cache;
+       struct tcp_sacktag_state state;
        struct sk_buff *skb;
        int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
        int used_sacks;
-       int reord = tp->packets_out;
-       int flag = 0;
        int found_dup_sack = 0;
-       int fack_count;
        int i, j;
        int first_sack_index;
 
+       state.flag = 0;
+       state.reord = tp->packets_out;
+
        if (!tp->sacked_out) {
                if (WARN_ON(tp->fackets_out))
                        tp->fackets_out = 0;
@@ -1744,7 +1751,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
        found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
                                         num_sacks, prior_snd_una);
        if (found_dup_sack)
-               flag |= FLAG_DSACKING_ACK;
+               state.flag |= FLAG_DSACKING_ACK;
 
        /* Eliminate too old ACKs, but take into
         * account more or less fresh ones, they can
@@ -1799,11 +1806,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
        for (i = used_sacks - 1; i > 0; i--) {
                for (j = 0; j < i; j++) {
                        if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
-                               struct tcp_sack_block tmp;
-
-                               tmp = sp[j];
-                               sp[j] = sp[j + 1];
-                               sp[j + 1] = tmp;
+                               swap(sp[j], sp[j + 1]);
 
                                /* Track where the first SACK block goes to */
                                if (j == first_sack_index)
@@ -1813,7 +1816,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
        }
 
        skb = tcp_write_queue_head(sk);
-       fack_count = 0;
+       state.fack_count = 0;
        i = 0;
 
        if (!tp->sacked_out) {
@@ -1838,7 +1841,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
 
                /* Event "B" in the comment above. */
                if (after(end_seq, tp->high_seq))
-                       flag |= FLAG_DATA_LOST;
+                       state.flag |= FLAG_DATA_LOST;
 
                /* Skip too early cached blocks */
                while (tcp_sack_cache_ok(tp, cache) &&
@@ -1851,13 +1854,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
 
                        /* Head todo? */
                        if (before(start_seq, cache->start_seq)) {
-                               skb = tcp_sacktag_skip(skb, sk, start_seq,
-                                                      &fack_count);
+                               skb = tcp_sacktag_skip(skb, sk, &state,
+                                                      start_seq);
                                skb = tcp_sacktag_walk(skb, sk, next_dup,
+                                                      &state,
                                                       start_seq,
                                                       cache->start_seq,
-                                                      dup_sack, &fack_count,
-                                                      &reord, &flag);
+                                                      dup_sack);
                        }
 
                        /* Rest of the block already fully processed? */
@@ -1865,9 +1868,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                                goto advance_sp;
 
                        skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
-                                                      cache->end_seq,
-                                                      &fack_count, &reord,
-                                                      &flag);
+                                                      &state,
+                                                      cache->end_seq);
 
                        /* ...tail remains todo... */
                        if (tcp_highest_sack_seq(tp) == cache->end_seq) {
@@ -1875,13 +1877,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                                skb = tcp_highest_sack(sk);
                                if (skb == NULL)
                                        break;
-                               fack_count = tp->fackets_out;
+                               state.fack_count = tp->fackets_out;
                                cache++;
                                goto walk;
                        }
 
-                       skb = tcp_sacktag_skip(skb, sk, cache->end_seq,
-                                              &fack_count);
+                       skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
                        /* Check overlap against next cached too (past this one already) */
                        cache++;
                        continue;
@@ -1891,20 +1892,20 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
                        skb = tcp_highest_sack(sk);
                        if (skb == NULL)
                                break;
-                       fack_count = tp->fackets_out;
+                       state.fack_count = tp->fackets_out;
                }
-               skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count);
+               skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
 
 walk:
-               skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq,
-                                      dup_sack, &fack_count, &reord, &flag);
+               skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
+                                      start_seq, end_seq, dup_sack);
 
 advance_sp:
                /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
                 * due to in-order walk
                 */
                if (after(end_seq, tp->frto_highmark))
-                       flag &= ~FLAG_ONLY_ORIG_SACKED;
+                       state.flag &= ~FLAG_ONLY_ORIG_SACKED;
 
                i++;
        }
@@ -1921,10 +1922,10 @@ advance_sp:
 
        tcp_verify_left_out(tp);
 
-       if ((reord < tp->fackets_out) &&
+       if ((state.reord < tp->fackets_out) &&
            ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) &&
            (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
-               tcp_update_reordering(sk, tp->fackets_out - reord, 0);
+               tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
 
 out:
 
@@ -1934,7 +1935,7 @@ out:
        WARN_ON((int)tp->retrans_out < 0);
        WARN_ON((int)tcp_packets_in_flight(tp) < 0);
 #endif
-       return flag;
+       return state.flag;
 }
 
 /* Limits sacked_out so that sum with lost_out isn't ever larger than
@@ -2460,6 +2461,44 @@ static int tcp_time_to_recover(struct sock *sk)
        return 0;
 }
 
+/* New heuristics: it is possible only after we switched to restart timer
+ * each time when something is ACKed. Hence, we can detect timed out packets
+ * during fast retransmit without falling to slow start.
+ *
+ * Usefulness of this as is very questionable, since we should know which of
+ * the segments is the next to timeout which is relatively expensive to find
+ * in general case unless we add some data structure just for that. The
+ * current approach certainly won't find the right one too often and when it
+ * finally does find _something_ it usually marks large part of the window
+ * right away (because a retransmission with a larger timestamp blocks the
+ * loop from advancing). -ij
+ */
+static void tcp_timeout_skbs(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
+               return;
+
+       skb = tp->scoreboard_skb_hint;
+       if (tp->scoreboard_skb_hint == NULL)
+               skb = tcp_write_queue_head(sk);
+
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
+               if (!tcp_skb_timedout(sk, skb))
+                       break;
+
+               tcp_skb_mark_lost(tp, skb);
+       }
+
+       tp->scoreboard_skb_hint = skb;
+
+       tcp_verify_left_out(tp);
+}
+
 /* Mark head of queue up as lost. With RFC3517 SACK, the packets is
  * is against sacked "cnt", otherwise it's against facked "cnt"
  */
@@ -2532,30 +2571,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
                tcp_mark_head_lost(sk, sacked_upto);
        }
 
-       /* New heuristics: it is possible only after we switched
-        * to restart timer each time when something is ACKed.
-        * Hence, we can detect timed out packets during fast
-        * retransmit without falling to slow start.
-        */
-       if (tcp_is_fack(tp) && tcp_head_timedout(sk)) {
-               struct sk_buff *skb;
-
-               skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
-                       : tcp_write_queue_head(sk);
-
-               tcp_for_write_queue_from(skb, sk) {
-                       if (skb == tcp_send_head(sk))
-                               break;
-                       if (!tcp_skb_timedout(sk, skb))
-                               break;
-
-                       tcp_skb_mark_lost(tp, skb);
-               }
-
-               tp->scoreboard_skb_hint = skb;
-
-               tcp_verify_left_out(tp);
-       }
+       tcp_timeout_skbs(sk);
 }
 
 /* CWND moderation, preventing bursts due to too big ACKs
@@ -2820,7 +2836,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
        icsk->icsk_mtup.probe_size = 0;
 }
 
-static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
+static void tcp_mtup_probe_success(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2848,13 +2864,13 @@ void tcp_simple_retransmit(struct sock *sk)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       unsigned int mss = tcp_current_mss(sk, 0);
+       unsigned int mss = tcp_current_mss(sk);
        u32 prior_lost = tp->lost_out;
 
        tcp_for_write_queue(skb, sk) {
                if (skb == tcp_send_head(sk))
                        break;
-               if (skb->len > mss &&
+               if (tcp_skb_seglen(skb) > mss &&
                    !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
                        if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
                                TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
@@ -3060,6 +3076,13 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
        tcp_xmit_retransmit_queue(sk);
 }
 
+static void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
+{
+       tcp_rtt_estimator(sk, seq_rtt);
+       tcp_set_rto(sk);
+       inet_csk(sk)->icsk_backoff = 0;
+}
+
 /* Read draft-ietf-tcplw-high-performance before mucking
  * with this code. (Supersedes RFC1323)
  */
@@ -3081,11 +3104,8 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
         * in window is lost... Voila.                          --ANK (010210)
         */
        struct tcp_sock *tp = tcp_sk(sk);
-       const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
-       tcp_rtt_estimator(sk, seq_rtt);
-       tcp_set_rto(sk);
-       inet_csk(sk)->icsk_backoff = 0;
-       tcp_bound_rto(sk);
+
+       tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
 }
 
 static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
@@ -3102,10 +3122,7 @@ static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
        if (flag & FLAG_RETRANS_DATA_ACKED)
                return;
 
-       tcp_rtt_estimator(sk, seq_rtt);
-       tcp_set_rto(sk);
-       inet_csk(sk)->icsk_backoff = 0;
-       tcp_bound_rto(sk);
+       tcp_valid_rtt_meas(sk, seq_rtt);
 }
 
 static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
@@ -3184,7 +3201,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 
        while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
-               u32 end_seq;
                u32 acked_pcount;
                u8 sacked = scb->sacked;
 
@@ -3199,16 +3215,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                                break;
 
                        fully_acked = 0;
-                       end_seq = tp->snd_una;
                } else {
                        acked_pcount = tcp_skb_pcount(skb);
-                       end_seq = scb->end_seq;
-               }
-
-               /* MTU probing checks */
-               if (fully_acked && icsk->icsk_mtup.probe_size &&
-                   !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) {
-                       tcp_mtup_probe_success(sk, skb);
                }
 
                if (sacked & TCPCB_RETRANS) {
@@ -3273,24 +3281,26 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                const struct tcp_congestion_ops *ca_ops
                        = inet_csk(sk)->icsk_ca_ops;
 
+               if (unlikely(icsk->icsk_mtup.probe_size &&
+                            !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
+                       tcp_mtup_probe_success(sk);
+               }
+
                tcp_ack_update_rtt(sk, flag, seq_rtt);
                tcp_rearm_rto(sk);
 
                if (tcp_is_reno(tp)) {
                        tcp_remove_reno_sacks(sk, pkts_acked);
                } else {
+                       int delta;
+
                        /* Non-retransmitted hole got filled? That's reordering */
                        if (reord < prior_fackets)
                                tcp_update_reordering(sk, tp->fackets_out - reord, 0);
 
-                       /* No need to care for underflows here because
-                        * the lost_skb_hint gets NULLed if we're past it
-                        * (or something non-trivial happened)
-                        */
-                       if (tcp_is_fack(tp))
-                               tp->lost_cnt_hint -= pkts_acked;
-                       else
-                               tp->lost_cnt_hint -= prior_sacked - tp->sacked_out;
+                       delta = tcp_is_fack(tp) ? pkts_acked :
+                                                 prior_sacked - tp->sacked_out;
+                       tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
                }
 
                tp->fackets_out -= min(pkts_acked, tp->fackets_out);
@@ -3402,7 +3412,7 @@ static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
 
        if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
                flag |= FLAG_WIN_UPDATE;
-               tcp_update_wl(tp, ack, ack_seq);
+               tcp_update_wl(tp, ack_seq);
 
                if (tp->snd_wnd != nwin) {
                        tp->snd_wnd = nwin;
@@ -3578,15 +3588,18 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        int prior_packets;
        int frto_cwnd = 0;
 
-       /* If the ack is newer than sent or older than previous acks
+       /* If the ack is older than previous acks
         * then we can probably ignore it.
         */
-       if (after(ack, tp->snd_nxt))
-               goto uninteresting_ack;
-
        if (before(ack, prior_snd_una))
                goto old_ack;
 
+       /* If the ack includes data we haven't sent yet, discard
+        * this segment (RFC793 Section 3.9).
+        */
+       if (after(ack, tp->snd_nxt))
+               goto invalid_ack;
+
        if (after(ack, prior_snd_una))
                flag |= FLAG_SND_UNA_ADVANCED;
 
@@ -3607,7 +3620,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
                 * No more checks are required.
                 * Note, we use the fact that SND.UNA>=SND.WL2.
                 */
-               tcp_update_wl(tp, ack, ack_seq);
+               tcp_update_wl(tp, ack_seq);
                tp->snd_una = ack;
                flag |= FLAG_WIN_UPDATE;
 
@@ -3676,6 +3689,10 @@ no_queue:
                tcp_ack_probe(sk);
        return 1;
 
+invalid_ack:
+       SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+       return -1;
+
 old_ack:
        if (TCP_SKB_CB(skb)->sacked) {
                tcp_sacktag_write_queue(sk, skb, prior_snd_una);
@@ -3683,8 +3700,7 @@ old_ack:
                        tcp_try_keep_open(sk);
        }
 
-uninteresting_ack:
-       SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+       SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
        return 0;
 }
 
@@ -3872,8 +3888,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
                 * Not only, also it occurs for expired timestamps.
                 */
 
-               if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
-                  get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
+               if (tcp_paws_check(&tp->rx_opt, 0))
                        tcp_store_ts_recent(tp);
        }
 }
@@ -3925,9 +3940,9 @@ static inline int tcp_paws_discard(const struct sock *sk,
                                   const struct sk_buff *skb)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
-       return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
-               get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
-               !tcp_disordered_ack(sk, skb));
+
+       return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
+              !tcp_disordered_ack(sk, skb);
 }
 
 /* Check segment sequence number for validity.
@@ -4085,7 +4100,6 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
                tp->rx_opt.dsack = 1;
                tp->duplicate_sack[0].start_seq = seq;
                tp->duplicate_sack[0].end_seq = end_seq;
-               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
        }
 }
 
@@ -4140,8 +4154,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
                         * Decrease num_sacks.
                         */
                        tp->rx_opt.num_sacks--;
-                       tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
-                                              tp->rx_opt.dsack;
                        for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
                                sp[i] = sp[i + 1];
                        continue;
@@ -4150,20 +4162,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
        }
 }
 
-static inline void tcp_sack_swap(struct tcp_sack_block *sack1,
-                                struct tcp_sack_block *sack2)
-{
-       __u32 tmp;
-
-       tmp = sack1->start_seq;
-       sack1->start_seq = sack2->start_seq;
-       sack2->start_seq = tmp;
-
-       tmp = sack1->end_seq;
-       sack1->end_seq = sack2->end_seq;
-       sack2->end_seq = tmp;
-}
-
 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -4178,7 +4176,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
                if (tcp_sack_extend(sp, seq, end_seq)) {
                        /* Rotate this_sack to the first one. */
                        for (; this_sack > 0; this_sack--, sp--)
-                               tcp_sack_swap(sp, sp - 1);
+                               swap(*sp, *(sp - 1));
                        if (cur_sacks > 1)
                                tcp_sack_maybe_coalesce(tp);
                        return;
@@ -4204,7 +4202,6 @@ new_sack:
        sp->start_seq = seq;
        sp->end_seq = end_seq;
        tp->rx_opt.num_sacks++;
-       tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 }
 
 /* RCV.NXT advances, some SACKs should be eaten. */
@@ -4218,7 +4215,6 @@ static void tcp_sack_remove(struct tcp_sock *tp)
        /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
        if (skb_queue_empty(&tp->out_of_order_queue)) {
                tp->rx_opt.num_sacks = 0;
-               tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
                return;
        }
 
@@ -4239,11 +4235,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
                this_sack++;
                sp++;
        }
-       if (num_sacks != tp->rx_opt.num_sacks) {
-               tp->rx_opt.num_sacks = num_sacks;
-               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
-                                      tp->rx_opt.dsack;
-       }
+       tp->rx_opt.num_sacks = num_sacks;
 }
 
 /* This one checks to see if we can put data from the
@@ -4319,10 +4311,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 
        TCP_ECN_accept_cwr(tp, skb);
 
-       if (tp->rx_opt.dsack) {
-               tp->rx_opt.dsack = 0;
-               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
-       }
+       tp->rx_opt.dsack = 0;
 
        /*  Queue data for delivery to the user.
         *  Packets in sequence go to the receive queue.
@@ -4441,8 +4430,6 @@ drop:
                /* Initial out of order segment, build 1 SACK. */
                if (tcp_is_sack(tp)) {
                        tp->rx_opt.num_sacks = 1;
-                       tp->rx_opt.dsack     = 0;
-                       tp->rx_opt.eff_sacks = 1;
                        tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
                        tp->selective_acks[0].end_seq =
                                                TCP_SKB_CB(skb)->end_seq;
@@ -5012,7 +4999,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
                return 0;
 
        if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-               tp->ucopy.dma_chan = get_softnet_dma();
+               tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 
        if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 
@@ -5163,7 +5150,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
         */
 
        if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
-           TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
+           TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
+           !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
                int tcp_header_len = tp->tcp_header_len;
 
                /* Timestamp header prediction: tcp_header_len
@@ -5316,8 +5304,8 @@ slow_path:
                return -res;
 
 step5:
-       if (th->ack)
-               tcp_ack(sk, skb, FLAG_SLOWPATH);
+       if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+               goto discard;
 
        tcp_rcv_rtt_measure_ts(sk, skb);
 
@@ -5415,7 +5403,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * never scaled.
                 */
                tp->snd_wnd = ntohs(th->window);
-               tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
+               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                if (!tp->rx_opt.wscale_ok) {
                        tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
@@ -5516,7 +5504,7 @@ discard:
 
        /* PAWS check. */
        if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
-           tcp_paws_check(&tp->rx_opt, 0))
+           tcp_paws_reject(&tp->rx_opt, 0))
                goto discard_and_undo;
 
        if (th->syn) {
@@ -5654,7 +5642,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
        /* step 5: check the ACK field */
        if (th->ack) {
-               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
+               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
 
                switch (sk->sk_state) {
                case TCP_SYN_RECV:
@@ -5676,8 +5664,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
                                tp->snd_wnd = ntohs(th->window) <<
                                              tp->rx_opt.snd_wscale;
-                               tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq,
-                                           TCP_SKB_CB(skb)->seq);
+                               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                                /* tcp_ack considers this ACK as duplicate
                                 * and does not calculate rtt.