nfsd4.1: common slot allocation size calculation
[safe/jmp/linux-2.6] / net / ipv4 / tcp_input.c
index 47f4f35..d86784b 100644 (file)
@@ -5,8 +5,6 @@
  *
  *             Implementation of the Transmission Control Protocol(TCP).
  *
- * Version:    $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $
- *
  * Authors:    Ross Biro
  *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  *             Mark Evans, <evansmp@uhura.aston.ac.uk>
@@ -66,6 +64,8 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
+#include <linux/kernel.h>
+#include <net/dst.h>
 #include <net/tcp.h>
 #include <net/inet_common.h>
 #include <linux/ipsec.h>
@@ -77,7 +77,7 @@ int sysctl_tcp_window_scaling __read_mostly = 1;
 int sysctl_tcp_sack __read_mostly = 1;
 int sysctl_tcp_fack __read_mostly = 1;
 int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
-int sysctl_tcp_ecn __read_mostly;
+int sysctl_tcp_ecn __read_mostly = 2;
 int sysctl_tcp_dsack __read_mostly = 1;
 int sysctl_tcp_app_win __read_mostly = 31;
 int sysctl_tcp_adv_win_scale __read_mostly = 2;
@@ -105,6 +105,7 @@ int sysctl_tcp_abc __read_mostly;
 #define FLAG_SND_UNA_ADVANCED  0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
 #define FLAG_DSACKING_ACK      0x800 /* SACK blocks contained D-SACK info */
 #define FLAG_NONHEAD_RETRANS_ACKED     0x1000 /* Non-head rexmitted data was ACKed */
+#define FLAG_SACK_RENEGING     0x2000 /* snd_una advanced to a sacked seq */
 
 #define FLAG_ACKED             (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_NOT_DUP           (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -112,16 +113,13 @@ int sysctl_tcp_abc __read_mostly;
 #define FLAG_FORWARD_PROGRESS  (FLAG_ACKED|FLAG_DATA_SACKED)
 #define FLAG_ANY_PROGRESS      (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
 
-#define IsSackFrto() (sysctl_tcp_frto == 0x2)
-
 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
 
 /* Adapt the MSS value used to make delayed ack decision to the
  * real world.
  */
-static void tcp_measure_rcv_mss(struct sock *sk,
-                               const struct sk_buff *skb)
+static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        const unsigned int lss = icsk->icsk_ack.last_seg_size;
@@ -132,7 +130,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
        /* skb->len may jitter because of SACKs, even if peer
         * sends good full-sized frames.
         */
-       len = skb_shinfo(skb)->gso_size ?: skb->len;
+       len = skb_shinfo(skb)->gso_size ? : skb->len;
        if (len >= icsk->icsk_ack.rcv_mss) {
                icsk->icsk_ack.rcv_mss = len;
        } else {
@@ -172,8 +170,8 @@ static void tcp_incr_quickack(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
 
-       if (quickacks==0)
-               quickacks=2;
+       if (quickacks == 0)
+               quickacks = 2;
        if (quickacks > icsk->icsk_ack.quick)
                icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
 }
@@ -198,7 +196,7 @@ static inline int tcp_in_quickack_mode(const struct sock *sk)
 
 static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
 {
-       if (tp->ecn_flags&TCP_ECN_OK)
+       if (tp->ecn_flags & TCP_ECN_OK)
                tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
 }
 
@@ -215,7 +213,7 @@ static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
 
 static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
 {
-       if (tp->ecn_flags&TCP_ECN_OK) {
+       if (tp->ecn_flags & TCP_ECN_OK) {
                if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
                        tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
                /* Funny extension: if ECT is not set on a segment,
@@ -228,19 +226,19 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
 
 static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th)
 {
-       if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || th->cwr))
+       if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
                tp->ecn_flags &= ~TCP_ECN_OK;
 }
 
 static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th)
 {
-       if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || !th->cwr))
+       if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
                tp->ecn_flags &= ~TCP_ECN_OK;
 }
 
 static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
 {
-       if (th->ece && !th->syn && (tp->ecn_flags&TCP_ECN_OK))
+       if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
                return 1;
        return 0;
 }
@@ -289,8 +287,8 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        /* Optimize this! */
-       int truesize = tcp_win_from_space(skb->truesize)/2;
-       int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
+       int truesize = tcp_win_from_space(skb->truesize) >> 1;
+       int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1;
 
        while (tp->rcv_ssthresh <= window) {
                if (truesize <= skb->len)
@@ -302,8 +300,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
        return 0;
 }
 
-static void tcp_grow_window(struct sock *sk,
-                           struct sk_buff *skb)
+static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -317,12 +314,13 @@ static void tcp_grow_window(struct sock *sk,
                 * will fit to rcvbuf in future.
                 */
                if (tcp_win_from_space(skb->truesize) <= skb->len)
-                       incr = 2*tp->advmss;
+                       incr = 2 * tp->advmss;
                else
                        incr = __tcp_grow_window(sk, skb);
 
                if (incr) {
-                       tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
+                       tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
+                                              tp->window_clamp);
                        inet_csk(sk)->icsk_ack.quick |= 1;
                }
        }
@@ -397,10 +395,9 @@ static void tcp_clamp_window(struct sock *sk)
                                    sysctl_tcp_rmem[2]);
        }
        if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
-               tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
+               tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
 }
 
-
 /* Initialize RCV_MSS value.
  * RCV_MSS is an our guess about MSS used by the peer.
  * We haven't any direct information about the MSS.
@@ -413,7 +410,7 @@ void tcp_initialize_rcv_mss(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
 
-       hint = min(hint, tp->rcv_wnd/2);
+       hint = min(hint, tp->rcv_wnd / 2);
        hint = min(hint, TCP_MIN_RCVMSS);
        hint = max(hint, TCP_MIN_MSS);
 
@@ -470,16 +467,15 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
                goto new_measure;
        if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
                return;
-       tcp_rcv_rtt_update(tp,
-                          jiffies - tp->rcv_rtt_est.time,
-                          1);
+       tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
 
 new_measure:
        tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
        tp->rcv_rtt_est.time = tcp_time_stamp;
 }
 
-static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb)
+static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
+                                         const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        if (tp->rx_opt.rcv_tsecr &&
@@ -502,8 +498,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
                goto new_measure;
 
        time = tcp_time_stamp - tp->rcvq_space.time;
-       if (time < (tp->rcv_rtt_est.rtt >> 3) ||
-           tp->rcv_rtt_est.rtt == 0)
+       if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
                return;
 
        space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
@@ -579,7 +574,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
        } else {
                int m = now - icsk->icsk_ack.lrcvtime;
 
-               if (m <= TCP_ATO_MIN/2) {
+               if (m <= TCP_ATO_MIN / 2) {
                        /* The fastest case is the first. */
                        icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
                } else if (m < icsk->icsk_ack.ato) {
@@ -591,7 +586,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
                         * restart window, so that we send ACKs quickly.
                         */
                        tcp_incr_quickack(sk);
-                       sk_stream_mem_reclaim(sk);
+                       sk_mem_reclaim(sk);
                }
        }
        icsk->icsk_ack.lrcvtime = now;
@@ -602,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
                tcp_grow_window(sk, skb);
 }
 
-static u32 tcp_rto_min(struct sock *sk)
-{
-       struct dst_entry *dst = __sk_dst_get(sk);
-       u32 rto_min = TCP_RTO_MIN;
-
-       if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
-               rto_min = dst->metrics[RTAX_RTO_MIN-1];
-       return rto_min;
-}
-
 /* Called to compute a smoothed rtt estimate. The data fed to this
  * routine either comes from timestamps, or from segments that were
  * known _not_ to have been retransmitted [see Karn/Partridge
@@ -671,14 +656,14 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
                }
                if (after(tp->snd_una, tp->rtt_seq)) {
                        if (tp->mdev_max < tp->rttvar)
-                               tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
+                               tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2;
                        tp->rtt_seq = tp->snd_nxt;
                        tp->mdev_max = tcp_rto_min(sk);
                }
        } else {
                /* no previous measure. */
-               tp->srtt = m<<3;        /* take the measured time to be rtt */
-               tp->mdev = m<<1;        /* make sure rto = 3*rtt */
+               tp->srtt = m << 3;      /* take the measured time to be rtt */
+               tp->mdev = m << 1;      /* make sure rto = 3*rtt */
                tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
                tp->rtt_seq = tp->snd_nxt;
        }
@@ -700,22 +685,18 @@ static inline void tcp_set_rto(struct sock *sk)
         *    is invisible. Actually, Linux-2.4 also generates erratic
         *    ACKs in some circumstances.
         */
-       inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
+       inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
 
        /* 2. Fixups made earlier cannot be right.
         *    If we do not estimate RTO correctly without them,
         *    all the algo is pure shit and should be replaced
         *    with correct one. It is exactly, which we pretend to do.
         */
-}
 
-/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
- * guarantees that rto is higher.
- */
-static inline void tcp_bound_rto(struct sock *sk)
-{
-       if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
-               inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+       /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
+        * guarantees that rto is higher.
+        */
+       tcp_bound_rto(sk);
 }
 
 /* Save metrics learned by this TCP session.
@@ -732,9 +713,10 @@ void tcp_update_metrics(struct sock *sk)
 
        dst_confirm(dst);
 
-       if (dst && (dst->flags&DST_HOST)) {
+       if (dst && (dst->flags & DST_HOST)) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
                int m;
+               unsigned long rtt;
 
                if (icsk->icsk_backoff || !tp->srtt) {
                        /* This session failed to estimate rtt. Why?
@@ -742,11 +724,12 @@ void tcp_update_metrics(struct sock *sk)
                         * Reset our results.
                         */
                        if (!(dst_metric_locked(dst, RTAX_RTT)))
-                               dst->metrics[RTAX_RTT-1] = 0;
+                               dst->metrics[RTAX_RTT - 1] = 0;
                        return;
                }
 
-               m = dst_metric(dst, RTAX_RTT) - tp->srtt;
+               rtt = dst_metric_rtt(dst, RTAX_RTT);
+               m = rtt - tp->srtt;
 
                /* If newly calculated rtt larger than stored one,
                 * store new one. Otherwise, use EWMA. Remember,
@@ -754,12 +737,13 @@ void tcp_update_metrics(struct sock *sk)
                 */
                if (!(dst_metric_locked(dst, RTAX_RTT))) {
                        if (m <= 0)
-                               dst->metrics[RTAX_RTT-1] = tp->srtt;
+                               set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
                        else
-                               dst->metrics[RTAX_RTT-1] -= (m>>3);
+                               set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
                }
 
                if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
+                       unsigned long var;
                        if (m < 0)
                                m = -m;
 
@@ -768,14 +752,16 @@ void tcp_update_metrics(struct sock *sk)
                        if (m < tp->mdev)
                                m = tp->mdev;
 
-                       if (m >= dst_metric(dst, RTAX_RTTVAR))
-                               dst->metrics[RTAX_RTTVAR-1] = m;
+                       var = dst_metric_rtt(dst, RTAX_RTTVAR);
+                       if (m >= var)
+                               var = m;
                        else
-                               dst->metrics[RTAX_RTTVAR-1] -=
-                                       (dst->metrics[RTAX_RTTVAR-1] - m)>>2;
+                               var -= (var - m) >> 2;
+
+                       set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
                }
 
-               if (tp->snd_ssthresh >= 0xFFFF) {
+               if (tcp_in_initial_slowstart(tp)) {
                        /* Slow start still did not finish. */
                        if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
@@ -783,7 +769,7 @@ void tcp_update_metrics(struct sock *sk)
                                dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
                        if (!dst_metric_locked(dst, RTAX_CWND) &&
                            tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND-1] = tp->snd_cwnd;
+                               dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd;
                } else if (tp->snd_cwnd > tp->snd_ssthresh &&
                           icsk->icsk_ca_state == TCP_CA_Open) {
                        /* Cong. avoidance phase, cwnd is reliable. */
@@ -791,21 +777,21 @@ void tcp_update_metrics(struct sock *sk)
                                dst->metrics[RTAX_SSTHRESH-1] =
                                        max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
                        if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_cwnd) >> 1;
+                               dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1;
                } else {
                        /* Else slow start did not finish, cwnd is non-sense,
                           ssthresh may be also invalid.
                         */
                        if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_ssthresh) >> 1;
-                       if (dst->metrics[RTAX_SSTHRESH-1] &&
+                               dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1;
+                       if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
-                           tp->snd_ssthresh > dst->metrics[RTAX_SSTHRESH-1])
+                           tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
                                dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
                }
 
                if (!dst_metric_locked(dst, RTAX_REORDERING)) {
-                       if (dst->metrics[RTAX_REORDERING-1] < tp->reordering &&
+                       if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
                            tp->reordering != sysctl_tcp_reordering)
                                dst->metrics[RTAX_REORDERING-1] = tp->reordering;
                }
@@ -863,6 +849,9 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
  */
 static void tcp_disable_fack(struct tcp_sock *tp)
 {
+       /* RFC3517 uses different metric in lost marker => reset on change */
+       if (tcp_is_fack(tp))
+               tp->lost_skb_hint = NULL;
        tp->rx_opt.sack_ok &= ~2;
 }
 
@@ -900,7 +889,7 @@ static void tcp_init_metrics(struct sock *sk)
        if (dst_metric(dst, RTAX_RTT) == 0)
                goto reset;
 
-       if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
+       if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
                goto reset;
 
        /* Initial rtt is determined from SYN,SYN-ACK.
@@ -917,18 +906,19 @@ static void tcp_init_metrics(struct sock *sk)
         * to low value, and then abruptly stops to do it and starts to delay
         * ACKs, wait for troubles.
         */
-       if (dst_metric(dst, RTAX_RTT) > tp->srtt) {
-               tp->srtt = dst_metric(dst, RTAX_RTT);
+       if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
+               tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
                tp->rtt_seq = tp->snd_nxt;
        }
-       if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) {
-               tp->mdev = dst_metric(dst, RTAX_RTTVAR);
-               tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
+       if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
+               tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
+               tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
        }
        tcp_set_rto(sk);
-       tcp_bound_rto(sk);
        if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
                goto reset;
+
+cwnd:
        tp->snd_cwnd = tcp_init_cwnd(tp, dst);
        tp->snd_cwnd_stamp = tcp_time_stamp;
        return;
@@ -943,6 +933,7 @@ reset:
                tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
                inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
        }
+       goto cwnd;
 }
 
 static void tcp_update_reordering(struct sock *sk, const int metric,
@@ -950,17 +941,21 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        if (metric > tp->reordering) {
+               int mib_idx;
+
                tp->reordering = min(TCP_MAX_REORDERING, metric);
 
                /* This exciting event is worth to be remembered. 8) */
                if (ts)
-                       NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
+                       mib_idx = LINUX_MIB_TCPTSREORDER;
                else if (tcp_is_reno(tp))
-                       NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
+                       mib_idx = LINUX_MIB_TCPRENOREORDER;
                else if (tcp_is_fack(tp))
-                       NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
+                       mib_idx = LINUX_MIB_TCPFACKREORDER;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
+                       mib_idx = LINUX_MIB_TCPSACKREORDER;
+
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
 #if FASTRETRANS_DEBUG > 1
                printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
                       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -973,6 +968,40 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
        }
 }
 
+/* This must be called before lost_out is incremented */
+static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
+{
+       if ((tp->retransmit_skb_hint == NULL) ||
+           before(TCP_SKB_CB(skb)->seq,
+                  TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
+               tp->retransmit_skb_hint = skb;
+
+       if (!tp->lost_out ||
+           after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
+               tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
+}
+
+static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
+{
+       if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
+               tcp_verify_retransmit_hint(tp, skb);
+
+               tp->lost_out += tcp_skb_pcount(skb);
+               TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
+       }
+}
+
+static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
+                                           struct sk_buff *skb)
+{
+       tcp_verify_retransmit_hint(tp, skb);
+
+       if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
+               tp->lost_out += tcp_skb_pcount(skb);
+               TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
+       }
+}
+
 /* This procedure tags the retransmission queue when SACKs arrive.
  *
  * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
@@ -1112,16 +1141,22 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
  *
  * Search retransmitted skbs from write_queue that were sent when snd_nxt was
  * less than what is now known to be received by the other end (derived from
- * SACK blocks by the caller). Also calculate the lowest snd_nxt among the
- * remaining retransmitted skbs to avoid some costly processing per ACKs.
+ * highest SACK block). Also calculate the lowest snd_nxt among the remaining
+ * retransmitted skbs to avoid some costly processing per ACKs.
  */
-static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
+static void tcp_mark_lost_retrans(struct sock *sk)
 {
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int flag = 0;
        int cnt = 0;
        u32 new_low_seq = tp->snd_nxt;
+       u32 received_upto = tcp_highest_sack_seq(tp);
+
+       if (!tcp_is_fack(tp) || !tp->retrans_out ||
+           !after(received_upto, tp->lost_retrans_low) ||
+           icsk->icsk_ca_state != TCP_CA_Recovery)
+               return;
 
        tcp_for_write_queue(skb, sk) {
                u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
@@ -1136,22 +1171,23 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
                if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
                        continue;
 
-               if (after(received_upto, ack_seq) &&
-                   (tcp_is_fack(tp) ||
-                    !before(received_upto,
-                            ack_seq + tp->reordering * tp->mss_cache))) {
+               /* TODO: We would like to get rid of tcp_is_fack(tp) only
+                * constraint here (see above) but figuring out that at
+                * least tp->reordering SACK blocks reside between ack_seq
+                * and received_upto is not easy task to do cheaply with
+                * the available datastructures.
+                *
+                * Whether FACK should check here for tp->reordering segs
+                * in-between one could argue for either way (it would be
+                * rather simple to implement as we could count fack_count
+                * during the walk and do tp->fackets_out - fack_count).
+                */
+               if (after(received_upto, ack_seq)) {
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                        tp->retrans_out -= tcp_skb_pcount(skb);
 
-                       /* clear lost hint */
-                       tp->retransmit_skb_hint = NULL;
-
-                       if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
-                               tp->lost_out += tcp_skb_pcount(skb);
-                               TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
-                               flag |= FLAG_DATA_SACKED;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
-                       }
+                       tcp_skb_mark_lost_uncond_verify(tp, skb);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
                } else {
                        if (before(ack_seq, new_low_seq))
                                new_low_seq = ack_seq;
@@ -1161,31 +1197,31 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto)
 
        if (tp->retrans_out)
                tp->lost_retrans_low = new_low_seq;
-
-       return flag;
 }
 
-static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
+static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
                           struct tcp_sack_block_wire *sp, int num_sacks,
                           u32 prior_snd_una)
 {
-       u32 start_seq_0 = ntohl(get_unaligned(&sp[0].start_seq));
-       u32 end_seq_0 = ntohl(get_unaligned(&sp[0].end_seq));
+       struct tcp_sock *tp = tcp_sk(sk);
+       u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
+       u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
        int dup_sack = 0;
 
        if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
                dup_sack = 1;
                tcp_dsack_seen(tp);
-               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
        } else if (num_sacks > 1) {
-               u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq));
-               u32 start_seq_1 = ntohl(get_unaligned(&sp[1].start_seq));
+               u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
+               u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
 
                if (!after(end_seq_0, end_seq_1) &&
                    !before(start_seq_0, start_seq_1)) {
                        dup_sack = 1;
                        tcp_dsack_seen(tp);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                       LINUX_MIB_TCPDSACKOFORECV);
                }
        }
 
@@ -1198,31 +1234,58 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
        return dup_sack;
 }
 
+struct tcp_sacktag_state {
+       int reord;
+       int fack_count;
+       int flag;
+};
+
 /* Check if skb is fully within the SACK block. In presence of GSO skbs,
  * the incoming SACK may not exactly match but we can find smaller MSS
  * aligned portion of it that matches. Therefore we might need to fragment
  * which may fail and creates some hassle (caller must handle error case
  * returns).
+ *
+ * FIXME: this could be merged to shift decision code
  */
-int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
-                         u32 start_seq, u32 end_seq)
+static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
+                                u32 start_seq, u32 end_seq)
 {
        int in_sack, err;
        unsigned int pkt_len;
+       unsigned int mss;
 
        in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
                  !before(end_seq, TCP_SKB_CB(skb)->end_seq);
 
        if (tcp_skb_pcount(skb) > 1 && !in_sack &&
            after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
-
+               mss = tcp_skb_mss(skb);
                in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
 
-               if (!in_sack)
+               if (!in_sack) {
                        pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
-               else
+                       if (pkt_len < mss)
+                               pkt_len = mss;
+               } else {
                        pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
-               err = tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size);
+                       if (pkt_len < mss)
+                               return -EINVAL;
+               }
+
+               /* Round if necessary so that SACKs cover only full MSSes
+                * and/or the remaining small portion (if present)
+                */
+               if (pkt_len > mss) {
+                       unsigned int new_len = (pkt_len / mss) * mss;
+                       if (!in_sack && new_len < pkt_len) {
+                               new_len += mss;
+                               if (new_len > skb->len)
+                                       return 0;
+                       }
+                       pkt_len = new_len;
+               }
+               err = tcp_fragment(sk, skb, pkt_len, mss);
                if (err < 0)
                        return err;
        }
@@ -1230,36 +1293,454 @@ int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
        return in_sack;
 }
 
+static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
+                         struct tcp_sacktag_state *state,
+                         int dup_sack, int pcount)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       u8 sacked = TCP_SKB_CB(skb)->sacked;
+       int fack_count = state->fack_count;
+
+       /* Account D-SACK for retransmitted packet. */
+       if (dup_sack && (sacked & TCPCB_RETRANS)) {
+               if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
+                       tp->undo_retrans--;
+               if (sacked & TCPCB_SACKED_ACKED)
+                       state->reord = min(fack_count, state->reord);
+       }
+
+       /* Nothing to do; acked frame is about to be dropped (was ACKed). */
+       if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
+               return sacked;
+
+       if (!(sacked & TCPCB_SACKED_ACKED)) {
+               if (sacked & TCPCB_SACKED_RETRANS) {
+                       /* If the segment is not tagged as lost,
+                        * we do not clear RETRANS, believing
+                        * that retransmission is still in flight.
+                        */
+                       if (sacked & TCPCB_LOST) {
+                               sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+                               tp->lost_out -= pcount;
+                               tp->retrans_out -= pcount;
+                       }
+               } else {
+                       if (!(sacked & TCPCB_RETRANS)) {
+                               /* New sack for not retransmitted frame,
+                                * which was in hole. It is reordering.
+                                */
+                               if (before(TCP_SKB_CB(skb)->seq,
+                                          tcp_highest_sack_seq(tp)))
+                                       state->reord = min(fack_count,
+                                                          state->reord);
+
+                               /* SACK enhanced F-RTO (RFC4138; Appendix B) */
+                               if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
+                                       state->flag |= FLAG_ONLY_ORIG_SACKED;
+                       }
+
+                       if (sacked & TCPCB_LOST) {
+                               sacked &= ~TCPCB_LOST;
+                               tp->lost_out -= pcount;
+                       }
+               }
+
+               sacked |= TCPCB_SACKED_ACKED;
+               state->flag |= FLAG_DATA_SACKED;
+               tp->sacked_out += pcount;
+
+               fack_count += pcount;
+
+               /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
+               if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
+                   before(TCP_SKB_CB(skb)->seq,
+                          TCP_SKB_CB(tp->lost_skb_hint)->seq))
+                       tp->lost_cnt_hint += pcount;
+
+               if (fack_count > tp->fackets_out)
+                       tp->fackets_out = fack_count;
+       }
+
+       /* D-SACK. We can detect redundant retransmission in S|R and plain R
+        * frames and clear it. undo_retrans is decreased above, L|R frames
+        * are accounted above as well.
+        */
+       if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
+               sacked &= ~TCPCB_SACKED_RETRANS;
+               tp->retrans_out -= pcount;
+       }
+
+       return sacked;
+}
+
+static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+                          struct tcp_sacktag_state *state,
+                          unsigned int pcount, int shifted, int mss,
+                          int dup_sack)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
+
+       BUG_ON(!pcount);
+
+       /* Tweak before seqno plays */
+       if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
+           !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+               tp->lost_cnt_hint += pcount;
+
+       TCP_SKB_CB(prev)->end_seq += shifted;
+       TCP_SKB_CB(skb)->seq += shifted;
+
+       skb_shinfo(prev)->gso_segs += pcount;
+       BUG_ON(skb_shinfo(skb)->gso_segs < pcount);
+       skb_shinfo(skb)->gso_segs -= pcount;
+
+       /* When we're adding to gso_segs == 1, gso_size will be zero,
+        * in theory this shouldn't be necessary but as long as DSACK
+        * code can come after this skb later on it's better to keep
+        * setting gso_size to something.
+        */
+       if (!skb_shinfo(prev)->gso_size) {
+               skb_shinfo(prev)->gso_size = mss;
+               skb_shinfo(prev)->gso_type = sk->sk_gso_type;
+       }
+
+       /* CHECKME: To clear or not to clear? Mimics normal skb currently */
+       if (skb_shinfo(skb)->gso_segs <= 1) {
+               skb_shinfo(skb)->gso_size = 0;
+               skb_shinfo(skb)->gso_type = 0;
+       }
+
+       /* We discard results */
+       tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
+
+       /* Difference in this won't matter, both ACKed by the same cumul. ACK */
+       TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
+
+       if (skb->len > 0) {
+               BUG_ON(!tcp_skb_pcount(skb));
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
+               return 0;
+       }
+
+       /* Whole SKB was eaten :-) */
+
+       if (skb == tp->retransmit_skb_hint)
+               tp->retransmit_skb_hint = prev;
+       if (skb == tp->scoreboard_skb_hint)
+               tp->scoreboard_skb_hint = prev;
+       if (skb == tp->lost_skb_hint) {
+               tp->lost_skb_hint = prev;
+               tp->lost_cnt_hint -= tcp_skb_pcount(prev);
+       }
+
+       TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags;
+       if (skb == tcp_highest_sack(sk))
+               tcp_advance_highest_sack(sk, skb);
+
+       tcp_unlink_write_queue(skb, sk);
+       sk_wmem_free_skb(sk, skb);
+
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
+
+       return 1;
+}
+
+/* I wish gso_size would have a bit more sane initialization than
+ * something-or-zero which complicates things
+ */
+static int tcp_skb_seglen(struct sk_buff *skb)
+{
+       return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
+}
+
+/* Shifting pages past head area doesn't work */
+static int skb_can_shift(struct sk_buff *skb)
+{
+       return !skb_headlen(skb) && skb_is_nonlinear(skb);
+}
+
+/* Try collapsing SACK blocks spanning across multiple skbs to a single
+ * skb.
+ */
+static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
+                                         struct tcp_sacktag_state *state,
+                                         u32 start_seq, u32 end_seq,
+                                         int dup_sack)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *prev;
+       int mss;
+       int pcount = 0;
+       int len;
+       int in_sack;
+
+       if (!sk_can_gso(sk))
+               goto fallback;
+
+       /* Normally R but no L won't result in plain S */
+       if (!dup_sack &&
+           (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
+               goto fallback;
+       if (!skb_can_shift(skb))
+               goto fallback;
+       /* This frame is about to be dropped (was ACKed). */
+       if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
+               goto fallback;
+
+       /* Can only happen with delayed DSACK + discard craziness */
+       if (unlikely(skb == tcp_write_queue_head(sk)))
+               goto fallback;
+       prev = tcp_write_queue_prev(sk, skb);
+
+       if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
+               goto fallback;
+
+       in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
+                 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
+
+       if (in_sack) {
+               len = skb->len;
+               pcount = tcp_skb_pcount(skb);
+               mss = tcp_skb_seglen(skb);
+
+               /* TODO: Fix DSACKs to not fragment already SACKed and we can
+                * drop this restriction as unnecessary
+                */
+               if (mss != tcp_skb_seglen(prev))
+                       goto fallback;
+       } else {
+               if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
+                       goto noop;
+               /* CHECKME: This is non-MSS split case only?, this will
+                * cause skipped skbs due to advancing loop btw, original
+                * has that feature too
+                */
+               if (tcp_skb_pcount(skb) <= 1)
+                       goto noop;
+
+               in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
+               if (!in_sack) {
+                       /* TODO: head merge to next could be attempted here
+                        * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
+                        * though it might not be worth of the additional hassle
+                        *
+                        * ...we can probably just fallback to what was done
+                        * previously. We could try merging non-SACKed ones
+                        * as well but it probably isn't going to buy off
+                        * because later SACKs might again split them, and
+                        * it would make skb timestamp tracking considerably
+                        * harder problem.
+                        */
+                       goto fallback;
+               }
+
+               len = end_seq - TCP_SKB_CB(skb)->seq;
+               BUG_ON(len < 0);
+               BUG_ON(len > skb->len);
+
+               /* MSS boundaries should be honoured or else pcount will
+                * severely break even though it makes things bit trickier.
+                * Optimize common case to avoid most of the divides
+                */
+               mss = tcp_skb_mss(skb);
+
+               /* TODO: Fix DSACKs to not fragment already SACKed and we can
+                * drop this restriction as unnecessary
+                */
+               if (mss != tcp_skb_seglen(prev))
+                       goto fallback;
+
+               if (len == mss) {
+                       pcount = 1;
+               } else if (len < mss) {
+                       goto noop;
+               } else {
+                       pcount = len / mss;
+                       len = pcount * mss;
+               }
+       }
+
+       if (!skb_shift(prev, skb, len))
+               goto fallback;
+       if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
+               goto out;
+
+       /* Hole filled allows collapsing with the next as well, this is very
+        * useful when hole on every nth skb pattern happens
+        */
+       if (prev == tcp_write_queue_tail(sk))
+               goto out;
+       skb = tcp_write_queue_next(sk, prev);
+
+       if (!skb_can_shift(skb) ||
+           (skb == tcp_send_head(sk)) ||
+           ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
+           (mss != tcp_skb_seglen(skb)))
+               goto out;
+
+       len = skb->len;
+       if (skb_shift(prev, skb, len)) {
+               pcount += tcp_skb_pcount(skb);
+               tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
+       }
+
+out:
+       state->fack_count += pcount;
+       return prev;
+
+noop:
+       return skb;
+
+fallback:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
+       return NULL;
+}
+
+static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
+                                       struct tcp_sack_block *next_dup,
+                                       struct tcp_sacktag_state *state,
+                                       u32 start_seq, u32 end_seq,
+                                       int dup_sack_in)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *tmp;
+
+       tcp_for_write_queue_from(skb, sk) {
+               int in_sack = 0;
+               int dup_sack = dup_sack_in;
+
+               if (skb == tcp_send_head(sk))
+                       break;
+
+               /* queue is in-order => we can short-circuit the walk early */
+               if (!before(TCP_SKB_CB(skb)->seq, end_seq))
+                       break;
+
+               if ((next_dup != NULL) &&
+                   before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
+                       in_sack = tcp_match_skb_to_sack(sk, skb,
+                                                       next_dup->start_seq,
+                                                       next_dup->end_seq);
+                       if (in_sack > 0)
+                               dup_sack = 1;
+               }
+
+               /* skb reference here is a bit tricky to get right, since
+                * shifting can eat and free both this skb and the next,
+                * so not even _safe variant of the loop is enough.
+                */
+               if (in_sack <= 0) {
+                       tmp = tcp_shift_skb_data(sk, skb, state,
+                                                start_seq, end_seq, dup_sack);
+                       if (tmp != NULL) {
+                               if (tmp != skb) {
+                                       skb = tmp;
+                                       continue;
+                               }
+
+                               in_sack = 0;
+                       } else {
+                               in_sack = tcp_match_skb_to_sack(sk, skb,
+                                                               start_seq,
+                                                               end_seq);
+                       }
+               }
+
+               if (unlikely(in_sack < 0))
+                       break;
+
+               if (in_sack) {
+                       TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
+                                                                 state,
+                                                                 dup_sack,
+                                                                 tcp_skb_pcount(skb));
+
+                       if (!before(TCP_SKB_CB(skb)->seq,
+                                   tcp_highest_sack_seq(tp)))
+                               tcp_advance_highest_sack(sk, skb);
+               }
+
+               state->fack_count += tcp_skb_pcount(skb);
+       }
+       return skb;
+}
+
+/* Avoid all extra work that is being done by sacktag while walking in
+ * a normal way
+ */
+static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
+                                       struct tcp_sacktag_state *state,
+                                       u32 skip_to_seq)
+{
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
+
+               if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
+                       break;
+
+               state->fack_count += tcp_skb_pcount(skb);
+       }
+       return skb;
+}
+
+static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
+                                               struct sock *sk,
+                                               struct tcp_sack_block *next_dup,
+                                               struct tcp_sacktag_state *state,
+                                               u32 skip_to_seq)
+{
+       if (next_dup == NULL)
+               return skb;
+
+       if (before(next_dup->start_seq, skip_to_seq)) {
+               skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
+               skb = tcp_sacktag_walk(skb, sk, NULL, state,
+                                      next_dup->start_seq, next_dup->end_seq,
+                                      1);
+       }
+
+       return skb;
+}
+
+static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
+{
+       return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
+}
+
 static int
-tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
+tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
+                       u32 prior_snd_una)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned char *ptr = (skb_transport_header(ack_skb) +
                              TCP_SKB_CB(ack_skb)->sacked);
-       struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
-       struct sk_buff *cached_skb;
-       int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
-       int reord = tp->packets_out;
-       int prior_fackets;
-       u32 highest_sack_end_seq = tp->lost_retrans_low;
-       int flag = 0;
+       struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
+       struct tcp_sack_block sp[TCP_NUM_SACKS];
+       struct tcp_sack_block *cache;
+       struct tcp_sacktag_state state;
+       struct sk_buff *skb;
+       int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
+       int used_sacks;
        int found_dup_sack = 0;
-       int cached_fack_count;
-       int i;
+       int i, j;
        int first_sack_index;
 
+       state.flag = 0;
+       state.reord = tp->packets_out;
+
        if (!tp->sacked_out) {
                if (WARN_ON(tp->fackets_out))
                        tp->fackets_out = 0;
-               tp->highest_sack = tp->snd_una;
+               tcp_highest_sack_reset(sk);
        }
-       prior_fackets = tp->fackets_out;
 
-       found_dup_sack = tcp_check_dsack(tp, ack_skb, sp,
+       found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
                                         num_sacks, prior_snd_una);
        if (found_dup_sack)
-               flag |= FLAG_DSACKING_ACK;
+               state.flag |= FLAG_DSACKING_ACK;
 
        /* Eliminate too old ACKs, but take into
         * account more or less fresh ones, they can
@@ -1268,254 +1749,189 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
                return 0;
 
-       /* SACK fastpath:
-        * if the only SACK change is the increase of the end_seq of
-        * the first block then only apply that SACK block
-        * and use retrans queue hinting otherwise slowpath */
-       flag = 1;
-       for (i = 0; i < num_sacks; i++) {
-               __be32 start_seq = sp[i].start_seq;
-               __be32 end_seq = sp[i].end_seq;
-
-               if (i == 0) {
-                       if (tp->recv_sack_cache[i].start_seq != start_seq)
-                               flag = 0;
-               } else {
-                       if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
-                           (tp->recv_sack_cache[i].end_seq != end_seq))
-                               flag = 0;
-               }
-               tp->recv_sack_cache[i].start_seq = start_seq;
-               tp->recv_sack_cache[i].end_seq = end_seq;
-       }
-       /* Clear the rest of the cache sack blocks so they won't match mistakenly. */
-       for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
-               tp->recv_sack_cache[i].start_seq = 0;
-               tp->recv_sack_cache[i].end_seq = 0;
-       }
+       if (!tp->packets_out)
+               goto out;
 
+       used_sacks = 0;
        first_sack_index = 0;
-       if (flag)
-               num_sacks = 1;
-       else {
-               int j;
-               tp->fastpath_skb_hint = NULL;
-
-               /* order SACK blocks to allow in order walk of the retrans queue */
-               for (i = num_sacks-1; i > 0; i--) {
-                       for (j = 0; j < i; j++){
-                               if (after(ntohl(sp[j].start_seq),
-                                         ntohl(sp[j+1].start_seq))){
-                                       struct tcp_sack_block_wire tmp;
-
-                                       tmp = sp[j];
-                                       sp[j] = sp[j+1];
-                                       sp[j+1] = tmp;
-
-                                       /* Track where the first SACK block goes to */
-                                       if (j == first_sack_index)
-                                               first_sack_index = j+1;
-                               }
-
-                       }
-               }
-       }
+       for (i = 0; i < num_sacks; i++) {
+               int dup_sack = !i && found_dup_sack;
 
-       /* clear flag as used for different purpose in following code */
-       flag = 0;
+               sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
+               sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
 
-       /* Use SACK fastpath hint if valid */
-       cached_skb = tp->fastpath_skb_hint;
-       cached_fack_count = tp->fastpath_cnt_hint;
-       if (!cached_skb) {
-               cached_skb = tcp_write_queue_head(sk);
-               cached_fack_count = 0;
-       }
+               if (!tcp_is_sackblock_valid(tp, dup_sack,
+                                           sp[used_sacks].start_seq,
+                                           sp[used_sacks].end_seq)) {
+                       int mib_idx;
 
-       for (i=0; i<num_sacks; i++, sp++) {
-               struct sk_buff *skb;
-               __u32 start_seq = ntohl(sp->start_seq);
-               __u32 end_seq = ntohl(sp->end_seq);
-               int fack_count;
-               int dup_sack = (found_dup_sack && (i == first_sack_index));
-
-               if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) {
                        if (dup_sack) {
                                if (!tp->undo_marker)
-                                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
+                                       mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
                                else
-                                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD);
+                                       mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
                        } else {
                                /* Don't count olds caused by ACK reordering */
                                if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
-                                   !after(end_seq, tp->snd_una))
+                                   !after(sp[used_sacks].end_seq, tp->snd_una))
                                        continue;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
+                               mib_idx = LINUX_MIB_TCPSACKDISCARD;
                        }
+
+                       NET_INC_STATS_BH(sock_net(sk), mib_idx);
+                       if (i == 0)
+                               first_sack_index = -1;
                        continue;
                }
 
-               skb = cached_skb;
-               fack_count = cached_fack_count;
-
-               /* Event "B" in the comment above. */
-               if (after(end_seq, tp->high_seq))
-                       flag |= FLAG_DATA_LOST;
+               /* Ignore very old stuff early */
+               if (!after(sp[used_sacks].end_seq, prior_snd_una))
+                       continue;
 
-               tcp_for_write_queue_from(skb, sk) {
-                       int in_sack;
-                       u8 sacked;
+               used_sacks++;
+       }
 
-                       if (skb == tcp_send_head(sk))
-                               break;
+       /* order SACK blocks to allow in order walk of the retrans queue */
+       for (i = used_sacks - 1; i > 0; i--) {
+               for (j = 0; j < i; j++) {
+                       if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
+                               swap(sp[j], sp[j + 1]);
 
-                       cached_skb = skb;
-                       cached_fack_count = fack_count;
-                       if (i == first_sack_index) {
-                               tp->fastpath_skb_hint = skb;
-                               tp->fastpath_cnt_hint = fack_count;
+                               /* Track where the first SACK block goes to */
+                               if (j == first_sack_index)
+                                       first_sack_index = j + 1;
                        }
+               }
+       }
 
-                       /* The retransmission queue is always in order, so
-                        * we can short-circuit the walk early.
-                        */
-                       if (!before(TCP_SKB_CB(skb)->seq, end_seq))
-                               break;
+       skb = tcp_write_queue_head(sk);
+       state.fack_count = 0;
+       i = 0;
 
-                       in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
-                       if (in_sack < 0)
-                               break;
+       if (!tp->sacked_out) {
+               /* It's already past, so skip checking against it */
+               cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
+       } else {
+               cache = tp->recv_sack_cache;
+               /* Skip empty blocks in at head of the cache */
+               while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
+                      !cache->end_seq)
+                       cache++;
+       }
 
-                       fack_count += tcp_skb_pcount(skb);
-
-                       sacked = TCP_SKB_CB(skb)->sacked;
-
-                       /* Account D-SACK for retransmitted packet. */
-                       if ((dup_sack && in_sack) &&
-                           (sacked & TCPCB_RETRANS) &&
-                           after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
-                               tp->undo_retrans--;
-
-                       /* The frame is ACKed. */
-                       if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) {
-                               if (sacked&TCPCB_RETRANS) {
-                                       if ((dup_sack && in_sack) &&
-                                           (sacked&TCPCB_SACKED_ACKED))
-                                               reord = min(fack_count, reord);
-                               } else {
-                                       /* If it was in a hole, we detected reordering. */
-                                       if (fack_count < prior_fackets &&
-                                           !(sacked&TCPCB_SACKED_ACKED))
-                                               reord = min(fack_count, reord);
-                               }
+       while (i < used_sacks) {
+               u32 start_seq = sp[i].start_seq;
+               u32 end_seq = sp[i].end_seq;
+               int dup_sack = (found_dup_sack && (i == first_sack_index));
+               struct tcp_sack_block *next_dup = NULL;
 
-                               /* Nothing to do; acked frame is about to be dropped. */
-                               continue;
+               if (found_dup_sack && ((i + 1) == first_sack_index))
+                       next_dup = &sp[i + 1];
+
+               /* Event "B" in the comment above. */
+               if (after(end_seq, tp->high_seq))
+                       state.flag |= FLAG_DATA_LOST;
+
+               /* Skip too early cached blocks */
+               while (tcp_sack_cache_ok(tp, cache) &&
+                      !before(start_seq, cache->end_seq))
+                       cache++;
+
+               /* Can skip some work by looking recv_sack_cache? */
+               if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
+                   after(end_seq, cache->start_seq)) {
+
+                       /* Head todo? */
+                       if (before(start_seq, cache->start_seq)) {
+                               skb = tcp_sacktag_skip(skb, sk, &state,
+                                                      start_seq);
+                               skb = tcp_sacktag_walk(skb, sk, next_dup,
+                                                      &state,
+                                                      start_seq,
+                                                      cache->start_seq,
+                                                      dup_sack);
                        }
 
-                       if (!in_sack)
-                               continue;
+                       /* Rest of the block already fully processed? */
+                       if (!after(end_seq, cache->end_seq))
+                               goto advance_sp;
 
-                       if (!(sacked&TCPCB_SACKED_ACKED)) {
-                               if (sacked & TCPCB_SACKED_RETRANS) {
-                                       /* If the segment is not tagged as lost,
-                                        * we do not clear RETRANS, believing
-                                        * that retransmission is still in flight.
-                                        */
-                                       if (sacked & TCPCB_LOST) {
-                                               TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
-                                               tp->lost_out -= tcp_skb_pcount(skb);
-                                               tp->retrans_out -= tcp_skb_pcount(skb);
+                       skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
+                                                      &state,
+                                                      cache->end_seq);
 
-                                               /* clear lost hint */
-                                               tp->retransmit_skb_hint = NULL;
-                                       }
-                               } else {
-                                       /* New sack for not retransmitted frame,
-                                        * which was in hole. It is reordering.
-                                        */
-                                       if (!(sacked & TCPCB_RETRANS) &&
-                                           fack_count < prior_fackets)
-                                               reord = min(fack_count, reord);
+                       /* ...tail remains todo... */
+                       if (tcp_highest_sack_seq(tp) == cache->end_seq) {
+                               /* ...but better entrypoint exists! */
+                               skb = tcp_highest_sack(sk);
+                               if (skb == NULL)
+                                       break;
+                               state.fack_count = tp->fackets_out;
+                               cache++;
+                               goto walk;
+                       }
 
-                                       if (sacked & TCPCB_LOST) {
-                                               TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
-                                               tp->lost_out -= tcp_skb_pcount(skb);
+                       skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
+                       /* Check overlap against next cached too (past this one already) */
+                       cache++;
+                       continue;
+               }
 
-                                               /* clear lost hint */
-                                               tp->retransmit_skb_hint = NULL;
-                                       }
-                                       /* SACK enhanced F-RTO detection.
-                                        * Set flag if and only if non-rexmitted
-                                        * segments below frto_highmark are
-                                        * SACKed (RFC4138; Appendix B).
-                                        * Clearing correct due to in-order walk
-                                        */
-                                       if (after(end_seq, tp->frto_highmark)) {
-                                               flag &= ~FLAG_ONLY_ORIG_SACKED;
-                                       } else {
-                                               if (!(sacked & TCPCB_RETRANS))
-                                                       flag |= FLAG_ONLY_ORIG_SACKED;
-                                       }
-                               }
+               if (!before(start_seq, tcp_highest_sack_seq(tp))) {
+                       skb = tcp_highest_sack(sk);
+                       if (skb == NULL)
+                               break;
+                       state.fack_count = tp->fackets_out;
+               }
+               skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
 
-                               TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
-                               flag |= FLAG_DATA_SACKED;
-                               tp->sacked_out += tcp_skb_pcount(skb);
+walk:
+               skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
+                                      start_seq, end_seq, dup_sack);
 
-                               if (fack_count > tp->fackets_out)
-                                       tp->fackets_out = fack_count;
+advance_sp:
+               /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
+                * due to in-order walk
+                */
+               if (after(end_seq, tp->frto_highmark))
+                       state.flag &= ~FLAG_ONLY_ORIG_SACKED;
 
-                               if (after(TCP_SKB_CB(skb)->seq, tp->highest_sack)) {
-                                       tp->highest_sack = TCP_SKB_CB(skb)->seq;
-                                       highest_sack_end_seq = TCP_SKB_CB(skb)->end_seq;
-                               }
-                       } else {
-                               if (dup_sack && (sacked&TCPCB_RETRANS))
-                                       reord = min(fack_count, reord);
-                       }
+               i++;
+       }
 
-                       /* D-SACK. We can detect redundant retransmission
-                        * in S|R and plain R frames and clear it.
-                        * undo_retrans is decreased above, L|R frames
-                        * are accounted above as well.
-                        */
-                       if (dup_sack &&
-                           (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS)) {
-                               TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
-                               tp->retrans_out -= tcp_skb_pcount(skb);
-                               tp->retransmit_skb_hint = NULL;
-                       }
-               }
+       /* Clear the head of the cache sack blocks so we can skip it next time */
+       for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
+               tp->recv_sack_cache[i].start_seq = 0;
+               tp->recv_sack_cache[i].end_seq = 0;
        }
+       for (j = 0; j < used_sacks; j++)
+               tp->recv_sack_cache[i++] = sp[j];
 
-       if (tp->retrans_out &&
-           after(highest_sack_end_seq, tp->lost_retrans_low) &&
-           icsk->icsk_ca_state == TCP_CA_Recovery)
-               flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq);
+       tcp_mark_lost_retrans(sk);
 
        tcp_verify_left_out(tp);
 
-       if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
+       if ((state.reord < tp->fackets_out) &&
+           ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) &&
            (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
-               tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
+               tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
+
+out:
 
 #if FASTRETRANS_DEBUG > 0
-       BUG_TRAP((int)tp->sacked_out >= 0);
-       BUG_TRAP((int)tp->lost_out >= 0);
-       BUG_TRAP((int)tp->retrans_out >= 0);
-       BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0);
+       WARN_ON((int)tp->sacked_out < 0);
+       WARN_ON((int)tp->lost_out < 0);
+       WARN_ON((int)tp->retrans_out < 0);
+       WARN_ON((int)tcp_packets_in_flight(tp) < 0);
 #endif
-       return flag;
+       return state.flag;
 }
 
-/* If we receive more dupacks than we expected counting segments
- * in assumption of absent reordering, interpret this as reordering.
- * The only another reason could be bug in receiver TCP.
+/* Limits sacked_out so that sum with lost_out isn't ever larger than
+ * packets_out. Returns zero if sacked_out adjustement wasn't necessary.
  */
-static void tcp_check_reno_reordering(struct sock *sk, const int addend)
+static int tcp_limit_reno_sacked(struct tcp_sock *tp)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
        u32 holes;
 
        holes = max(tp->lost_out, 1U);
@@ -1523,8 +1939,20 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
 
        if ((tp->sacked_out + holes) > tp->packets_out) {
                tp->sacked_out = tp->packets_out - holes;
-               tcp_update_reordering(sk, tp->packets_out + addend, 0);
+               return 1;
        }
+       return 0;
+}
+
+/* If we receive more dupacks than we expected counting segments
+ * in assumption of absent reordering, interpret this as reordering.
+ * The only another reason could be bug in receiver TCP.
+ */
+static void tcp_check_reno_reordering(struct sock *sk, const int addend)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       if (tcp_limit_reno_sacked(tp))
+               tcp_update_reordering(sk, tp->packets_out + addend, 0);
 }
 
 /* Emulate SACKs for SACKless connection: account for a new dupack. */
@@ -1545,10 +1973,10 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked)
 
        if (acked > 0) {
                /* One ACK acked hole. The rest eat duplicate ACKs. */
-               if (acked-1 >= tp->sacked_out)
+               if (acked - 1 >= tp->sacked_out)
                        tp->sacked_out = 0;
                else
-                       tp->sacked_out -= acked-1;
+                       tp->sacked_out -= acked - 1;
        }
        tcp_check_reno_reordering(sk, acked);
        tcp_verify_left_out(tp);
@@ -1559,18 +1987,28 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
        tp->sacked_out = 0;
 }
 
+static int tcp_is_sackfrto(const struct tcp_sock *tp)
+{
+       return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp);
+}
+
 /* F-RTO can only be used if TCP has never retransmitted anything other than
  * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
  */
 int tcp_use_frto(struct sock *sk)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct sk_buff *skb;
 
        if (!sysctl_tcp_frto)
                return 0;
 
-       if (IsSackFrto())
+       /* MTU probe and F-RTO won't really play nicely along currently */
+       if (icsk->icsk_mtup.probe_size)
+               return 0;
+
+       if (tcp_is_sackfrto(tp))
                return 1;
 
        /* Avoid expensive walking of rexmit queue if possible */
@@ -1578,14 +2016,16 @@ int tcp_use_frto(struct sock *sk)
                return 0;
 
        skb = tcp_write_queue_head(sk);
+       if (tcp_skb_is_last(sk, skb))
+               return 1;
        skb = tcp_write_queue_next(sk, skb);    /* Skips head */
        tcp_for_write_queue_from(skb, sk) {
                if (skb == tcp_send_head(sk))
                        break;
-               if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
+               if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
                        return 0;
                /* Short-circuit when first non-SACKed skb has been checked */
-               if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED))
+               if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
                        break;
        }
        return 1;
@@ -1654,10 +2094,13 @@ void tcp_enter_frto(struct sock *sk)
        }
        tcp_verify_left_out(tp);
 
+       /* Too bad if TCP was application limited */
+       tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
+
        /* Earlier loss recovery underway (see RFC4138; Appendix B).
         * The last condition is necessary at least in tp->frto_counter case.
         */
-       if (IsSackFrto() && (tp->frto_counter ||
+       if (tcp_is_sackfrto(tp) && (tp->frto_counter ||
            ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
            after(tp->high_seq, tp->snd_una)) {
                tp->frto_highmark = tp->high_seq;
@@ -1686,11 +2129,13 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
        tcp_for_write_queue(skb, sk) {
                if (skb == tcp_send_head(sk))
                        break;
+
+               TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
                /*
                 * Count the retransmission made on RTO correctly (only when
                 * waiting for the first ACK and did not get it)...
                 */
-               if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) {
+               if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) {
                        /* For some reason this R-bit might get cleared? */
                        if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
                                tp->retrans_out += tcp_skb_pcount(skb);
@@ -1699,14 +2144,22 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
                } else {
                        if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
                                tp->undo_marker = 0;
-                       TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+                       TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                }
 
-               /* Don't lost mark skbs that were fwd transmitted after RTO */
-               if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) &&
-                   !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) {
+               /* Marking forward transmissions that were made after RTO lost
+                * can cause unnecessary retransmissions in some scenarios,
+                * SACK blocks will mitigate that in some but not in all cases.
+                * We used to not mark them but it was causing break-ups with
+                * receivers that do only in-order receival.
+                *
+                * TODO: we could detect presence of such receiver and select
+                * different behavior per flow.
+                */
+               if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
                        TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
                        tp->lost_out += tcp_skb_pcount(skb);
+                       tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
                }
        }
        tcp_verify_left_out(tp);
@@ -1718,12 +2171,12 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
        tp->bytes_acked = 0;
 
        tp->reordering = min_t(unsigned int, tp->reordering,
-                                            sysctl_tcp_reordering);
+                              sysctl_tcp_reordering);
        tcp_set_ca_state(sk, TCP_CA_Loss);
-       tp->high_seq = tp->frto_highmark;
+       tp->high_seq = tp->snd_nxt;
        TCP_ECN_queue_cwr(tp);
 
-       tcp_clear_retrans_hints_partial(tp);
+       tcp_clear_all_retrans_hints(tp);
 }
 
 static void tcp_clear_retrans_partial(struct tcp_sock *tp)
@@ -1774,30 +2227,30 @@ void tcp_enter_loss(struct sock *sk, int how)
                /* Push undo marker, if it was plain RTO and nothing
                 * was retransmitted. */
                tp->undo_marker = tp->snd_una;
-               tcp_clear_retrans_hints_partial(tp);
        } else {
                tp->sacked_out = 0;
                tp->fackets_out = 0;
-               tcp_clear_all_retrans_hints(tp);
        }
+       tcp_clear_all_retrans_hints(tp);
 
        tcp_for_write_queue(skb, sk) {
                if (skb == tcp_send_head(sk))
                        break;
 
-               if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
+               if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
                        tp->undo_marker = 0;
                TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
                if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
                        TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
                        tp->lost_out += tcp_skb_pcount(skb);
+                       tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
                }
        }
        tcp_verify_left_out(tp);
 
        tp->reordering = min_t(unsigned int, tp->reordering,
-                                            sysctl_tcp_reordering);
+                              sysctl_tcp_reordering);
        tcp_set_ca_state(sk, TCP_CA_Loss);
        tp->high_seq = tp->snd_nxt;
        TCP_ECN_queue_cwr(tp);
@@ -1805,20 +2258,17 @@ void tcp_enter_loss(struct sock *sk, int how)
        tp->frto_counter = 0;
 }
 
-static int tcp_check_sack_reneging(struct sock *sk)
+/* If ACK arrived pointing to a remembered SACK, it means that our
+ * remembered SACKs do not reflect real state of receiver i.e.
+ * receiver _host_ is heavily congested (or buggy).
+ *
+ * Do processing similar to RTO timeout.
+ */
+static int tcp_check_sack_reneging(struct sock *sk, int flag)
 {
-       struct sk_buff *skb;
-
-       /* If ACK arrived pointing to a remembered SACK,
-        * it means that our remembered SACKs do not reflect
-        * real state of receiver i.e.
-        * receiver _host_ is heavily congested (or buggy).
-        * Do processing similar to RTO timeout.
-        */
-       if ((skb = tcp_write_queue_head(sk)) != NULL &&
-           (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
+       if (flag & FLAG_SACK_RENEGING) {
                struct inet_connection_sock *icsk = inet_csk(sk);
-               NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
 
                tcp_enter_loss(sk, 1);
                icsk->icsk_retransmits++;
@@ -1832,7 +2282,27 @@ static int tcp_check_sack_reneging(struct sock *sk)
 
 static inline int tcp_fackets_out(struct tcp_sock *tp)
 {
-       return tcp_is_reno(tp) ? tp->sacked_out+1 : tp->fackets_out;
+       return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
+}
+
+/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
+ * counter when SACK is enabled (without SACK, sacked_out is used for
+ * that purpose).
+ *
+ * Instead, with FACK TCP uses fackets_out that includes both SACKed
+ * segments up to the highest received SACK block so far and holes in
+ * between them.
+ *
+ * With reordering, holes may still be in flight, so RFC3517 recovery
+ * uses pure sacked_out (total number of SACKed segments) even though
+ * it violates the RFC that uses duplicate ACKs, often these are equal
+ * but when e.g. out-of-window ACKs or packet duplication occurs,
+ * they differ. Since neither occurs due to loss, TCP should really
+ * ignore them.
+ */
+static inline int tcp_dupack_heurestics(struct tcp_sock *tp)
+{
+       return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
 }
 
 static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
@@ -1955,13 +2425,13 @@ static int tcp_time_to_recover(struct sock *sk)
                return 1;
 
        /* Not-A-Trick#2 : Classic rule... */
-       if (tcp_fackets_out(tp) > tp->reordering)
+       if (tcp_dupack_heurestics(tp) > tp->reordering)
                return 1;
 
        /* Trick#3 : when we use RFC2988 timer restart, fast
         * retransmit can be triggered by timeout of queue head.
         */
-       if (tcp_head_timedout(sk))
+       if (tcp_is_fack(tp) && tcp_head_timedout(sk))
                return 1;
 
        /* Trick#4: It is still not OK... But will it be useful to delay
@@ -1980,28 +2450,56 @@ static int tcp_time_to_recover(struct sock *sk)
        return 0;
 }
 
-/* RFC: This is from the original, I doubt that this is necessary at all:
- * clear xmit_retrans hint if seq of this skb is beyond hint. How could we
- * retransmitted past LOST markings in the first place? I'm not fully sure
- * about undo and end of connection cases, which can cause R without L?
+/* New heuristics: it is possible only after we switched to restart timer
+ * each time when something is ACKed. Hence, we can detect timed out packets
+ * during fast retransmit without falling to slow start.
+ *
+ * Usefulness of this as is very questionable, since we should know which of
+ * the segments is the next to timeout which is relatively expensive to find
+ * in general case unless we add some data structure just for that. The
+ * current approach certainly won't find the right one too often and when it
+ * finally does find _something_ it usually marks large part of the window
+ * right away (because a retransmission with a larger timestamp blocks the
+ * loop from advancing). -ij
  */
-static void tcp_verify_retransmit_hint(struct tcp_sock *tp,
-                                      struct sk_buff *skb)
+static void tcp_timeout_skbs(struct sock *sk)
 {
-       if ((tp->retransmit_skb_hint != NULL) &&
-           before(TCP_SKB_CB(skb)->seq,
-           TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
-               tp->retransmit_skb_hint = NULL;
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
+               return;
+
+       skb = tp->scoreboard_skb_hint;
+       if (tp->scoreboard_skb_hint == NULL)
+               skb = tcp_write_queue_head(sk);
+
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
+               if (!tcp_skb_timedout(sk, skb))
+                       break;
+
+               tcp_skb_mark_lost(tp, skb);
+       }
+
+       tp->scoreboard_skb_hint = skb;
+
+       tcp_verify_left_out(tp);
 }
 
-/* Mark head of queue up as lost. */
+/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
+ * is against sacked "cnt", otherwise it's against facked "cnt"
+ */
 static void tcp_mark_head_lost(struct sock *sk, int packets)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int cnt;
+       int cnt, oldcnt;
+       int err;
+       unsigned int mss;
 
-       BUG_TRAP(packets <= tp->packets_out);
+       WARN_ON(packets > tp->packets_out);
        if (tp->lost_skb_hint) {
                skb = tp->lost_skb_hint;
                cnt = tp->lost_cnt_hint;
@@ -2017,61 +2515,52 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
                /* this is not the most efficient way to do this... */
                tp->lost_skb_hint = skb;
                tp->lost_cnt_hint = cnt;
-               cnt += tcp_skb_pcount(skb);
-               if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
+
+               if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
                        break;
-               if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
-                       TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
-                       tp->lost_out += tcp_skb_pcount(skb);
-                       tcp_verify_retransmit_hint(tp, skb);
+
+               oldcnt = cnt;
+               if (tcp_is_fack(tp) || tcp_is_reno(tp) ||
+                   (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
+                       cnt += tcp_skb_pcount(skb);
+
+               if (cnt > packets) {
+                       if (tcp_is_sack(tp) || (oldcnt >= packets))
+                               break;
+
+                       mss = skb_shinfo(skb)->gso_size;
+                       err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss);
+                       if (err < 0)
+                               break;
+                       cnt = packets;
                }
+
+               tcp_skb_mark_lost(tp, skb);
        }
        tcp_verify_left_out(tp);
 }
 
 /* Account newly detected lost packet(s) */
 
-static void tcp_update_scoreboard(struct sock *sk)
+static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tcp_is_fack(tp)) {
+       if (tcp_is_reno(tp)) {
+               tcp_mark_head_lost(sk, 1);
+       } else if (tcp_is_fack(tp)) {
                int lost = tp->fackets_out - tp->reordering;
                if (lost <= 0)
                        lost = 1;
                tcp_mark_head_lost(sk, lost);
        } else {
-               tcp_mark_head_lost(sk, 1);
+               int sacked_upto = tp->sacked_out - tp->reordering;
+               if (sacked_upto < fast_rexmit)
+                       sacked_upto = fast_rexmit;
+               tcp_mark_head_lost(sk, sacked_upto);
        }
 
-       /* New heuristics: it is possible only after we switched
-        * to restart timer each time when something is ACKed.
-        * Hence, we can detect timed out packets during fast
-        * retransmit without falling to slow start.
-        */
-       if (!tcp_is_reno(tp) && tcp_head_timedout(sk)) {
-               struct sk_buff *skb;
-
-               skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
-                       : tcp_write_queue_head(sk);
-
-               tcp_for_write_queue_from(skb, sk) {
-                       if (skb == tcp_send_head(sk))
-                               break;
-                       if (!tcp_skb_timedout(sk, skb))
-                               break;
-
-                       if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
-                               TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
-                               tp->lost_out += tcp_skb_pcount(skb);
-                               tcp_verify_retransmit_hint(tp, skb);
-                       }
-               }
-
-               tp->scoreboard_skb_hint = skb;
-
-               tcp_verify_left_out(tp);
-       }
+       tcp_timeout_skbs(sk);
 }
 
 /* CWND moderation, preventing bursts due to too big ACKs
@@ -2080,7 +2569,7 @@ static void tcp_update_scoreboard(struct sock *sk)
 static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
 {
        tp->snd_cwnd = min(tp->snd_cwnd,
-                          tcp_packets_in_flight(tp)+tcp_max_burst(tp));
+                          tcp_packets_in_flight(tp) + tcp_max_burst(tp));
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -2100,15 +2589,15 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
        struct tcp_sock *tp = tcp_sk(sk);
        int decr = tp->snd_cwnd_cnt + 1;
 
-       if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) ||
-           (tcp_is_reno(tp) && !(flag&FLAG_NOT_DUP))) {
-               tp->snd_cwnd_cnt = decr&1;
+       if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) ||
+           (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) {
+               tp->snd_cwnd_cnt = decr & 1;
                decr >>= 1;
 
                if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
                        tp->snd_cwnd -= decr;
 
-               tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+               tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
                tp->snd_cwnd_stamp = tcp_time_stamp;
        }
 }
@@ -2120,7 +2609,7 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp)
 {
        return !tp->retrans_stamp ||
                (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
-                (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0);
+                before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
 }
 
 /* Undo procedures. */
@@ -2131,12 +2620,25 @@ static void DBGUNDO(struct sock *sk, const char *msg)
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
 
-       printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
-              msg,
-              NIPQUAD(inet->daddr), ntohs(inet->dport),
-              tp->snd_cwnd, tcp_left_out(tp),
-              tp->snd_ssthresh, tp->prior_ssthresh,
-              tp->packets_out);
+       if (sk->sk_family == AF_INET) {
+               printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
+                      msg,
+                      &inet->daddr, ntohs(inet->dport),
+                      tp->snd_cwnd, tcp_left_out(tp),
+                      tp->snd_ssthresh, tp->prior_ssthresh,
+                      tp->packets_out);
+       }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+       else if (sk->sk_family == AF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+               printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
+                      msg,
+                      &np->daddr, ntohs(inet->dport),
+                      tp->snd_cwnd, tcp_left_out(tp),
+                      tp->snd_ssthresh, tp->prior_ssthresh,
+                      tp->packets_out);
+       }
+#endif
 }
 #else
 #define DBGUNDO(x...) do { } while (0)
@@ -2152,7 +2654,7 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
                if (icsk->icsk_ca_ops->undo_cwnd)
                        tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
                else
-                       tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
+                       tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
 
                if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
                        tp->snd_ssthresh = tp->prior_ssthresh;
@@ -2163,16 +2665,11 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
        }
        tcp_moderate_cwnd(tp);
        tp->snd_cwnd_stamp = tcp_time_stamp;
-
-       /* There is something screwy going on with the retrans hints after
-          an undo */
-       tcp_clear_all_retrans_hints(tp);
 }
 
 static inline int tcp_may_undo(struct tcp_sock *tp)
 {
-       return tp->undo_marker &&
-               (!tp->undo_retrans || tcp_packet_delayed(tp));
+       return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
 }
 
 /* People celebrate: "We love our President!" */
@@ -2181,15 +2678,19 @@ static int tcp_try_undo_recovery(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tcp_may_undo(tp)) {
+               int mib_idx;
+
                /* Happy end! We did not retransmit anything
                 * or our original transmission succeeded.
                 */
                DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
                tcp_undo_cwr(sk, 1);
                if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
-                       NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+                       mib_idx = LINUX_MIB_TCPLOSSUNDO;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
+                       mib_idx = LINUX_MIB_TCPFULLUNDO;
+
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
                tp->undo_marker = 0;
        }
        if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2212,7 +2713,7 @@ static void tcp_try_undo_dsack(struct sock *sk)
                DBGUNDO(sk, "D-SACK");
                tcp_undo_cwr(sk, 1);
                tp->undo_marker = 0;
-               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
        }
 }
 
@@ -2222,7 +2723,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        /* Partial ACK arrived. Force Hoe's retransmit. */
-       int failed = tcp_is_reno(tp) || tp->fackets_out>tp->reordering;
+       int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering);
 
        if (tcp_may_undo(tp)) {
                /* Plain luck! Hole if filled with delayed
@@ -2235,7 +2736,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
 
                DBGUNDO(sk, "Hoe");
                tcp_undo_cwr(sk, 0);
-               NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
 
                /* So... Do not make Hoe's retransmit yet.
                 * If the first packet was delayed, the rest
@@ -2264,7 +2765,7 @@ static int tcp_try_undo_loss(struct sock *sk)
                DBGUNDO(sk, "partial loss");
                tp->lost_out = 0;
                tcp_undo_cwr(sk, 1);
-               NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
                inet_csk(sk)->icsk_retransmits = 0;
                tp->undo_marker = 0;
                if (tcp_is_sack(tp))
@@ -2282,28 +2783,34 @@ static inline void tcp_complete_cwr(struct sock *sk)
        tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
 
+static void tcp_try_keep_open(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int state = TCP_CA_Open;
+
+       if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
+               state = TCP_CA_Disorder;
+
+       if (inet_csk(sk)->icsk_ca_state != state) {
+               tcp_set_ca_state(sk, state);
+               tp->high_seq = tp->snd_nxt;
+       }
+}
+
 static void tcp_try_to_open(struct sock *sk, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
        tcp_verify_left_out(tp);
 
-       if (tp->retrans_out == 0)
+       if (!tp->frto_counter && tp->retrans_out == 0)
                tp->retrans_stamp = 0;
 
-       if (flag&FLAG_ECE)
+       if (flag & FLAG_ECE)
                tcp_enter_cwr(sk, 1);
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
-               int state = TCP_CA_Open;
-
-               if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
-                       state = TCP_CA_Disorder;
-
-               if (inet_csk(sk)->icsk_ca_state != state) {
-                       tcp_set_ca_state(sk, state);
-                       tp->high_seq = tp->snd_nxt;
-               }
+               tcp_try_keep_open(sk);
                tcp_moderate_cwnd(tp);
        } else {
                tcp_cwnd_down(sk, flag);
@@ -2318,7 +2825,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
        icsk->icsk_mtup.probe_size = 0;
 }
 
-static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
+static void tcp_mtup_probe_success(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2337,6 +2844,55 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
 }
 
+/* Do a simple retransmit without using the backoff mechanisms in
+ * tcp_timer. This is used for path mtu discovery.
+ * The socket is already locked here.
+ */
+void tcp_simple_retransmit(struct sock *sk)
+{
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+       unsigned int mss = tcp_current_mss(sk);
+       u32 prior_lost = tp->lost_out;
+
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
+               if (tcp_skb_seglen(skb) > mss &&
+                   !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
+                       if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
+                               TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+                               tp->retrans_out -= tcp_skb_pcount(skb);
+                       }
+                       tcp_skb_mark_lost_uncond_verify(tp, skb);
+               }
+       }
+
+       tcp_clear_retrans_hints_partial(tp);
+
+       if (prior_lost == tp->lost_out)
+               return;
+
+       if (tcp_is_reno(tp))
+               tcp_limit_reno_sacked(tp);
+
+       tcp_verify_left_out(tp);
+
+       /* Don't muck with the congestion window here.
+        * Reason is that we do not increase amount of _data_
+        * in network, but units changed and effective
+        * cwnd/ssthresh really reduced now.
+        */
+       if (icsk->icsk_ca_state != TCP_CA_Loss) {
+               tp->high_seq = tp->snd_nxt;
+               tp->snd_ssthresh = tcp_current_ssthresh(sk);
+               tp->prior_ssthresh = 0;
+               tp->undo_marker = 0;
+               tcp_set_ca_state(sk, TCP_CA_Loss);
+       }
+       tcp_xmit_retransmit_queue(sk);
+}
 
 /* Process an event, which can update packets-in-flight not trivially.
  * Main goal of this function is to calculate new estimate for left_out,
@@ -2349,39 +2905,36 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
  * It does _not_ decide what to send, it is made in function
  * tcp_xmit_retransmit_queue().
  */
-static void
-tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
+static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP));
-       int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) &&
-                                   (tp->fackets_out > tp->reordering));
+       int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
+       int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
+                                   (tcp_fackets_out(tp) > tp->reordering));
+       int fast_rexmit = 0, mib_idx;
 
-       /* Some technical things:
-        * 1. Reno does not count dupacks (sacked_out) automatically. */
-       if (!tp->packets_out)
+       if (WARN_ON(!tp->packets_out && tp->sacked_out))
                tp->sacked_out = 0;
-
        if (WARN_ON(!tp->sacked_out && tp->fackets_out))
                tp->fackets_out = 0;
 
        /* Now state machine starts.
         * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
-       if (flag&FLAG_ECE)
+       if (flag & FLAG_ECE)
                tp->prior_ssthresh = 0;
 
        /* B. In all the states check for reneging SACKs. */
-       if (tp->sacked_out && tcp_check_sack_reneging(sk))
+       if (tcp_check_sack_reneging(sk, flag))
                return;
 
        /* C. Process data loss notification, provided it is valid. */
-       if ((flag&FLAG_DATA_LOST) &&
+       if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
            before(tp->snd_una, tp->high_seq) &&
            icsk->icsk_ca_state != TCP_CA_Open &&
            tp->fackets_out > tp->reordering) {
                tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
-               NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
        }
 
        /* D. Check consistency of the current state. */
@@ -2390,7 +2943,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
        /* E. Check state exit conditions. State can be terminated
         *    when high_seq is ACKed. */
        if (icsk->icsk_ca_state == TCP_CA_Open) {
-               BUG_TRAP(tp->retrans_out == 0);
+               WARN_ON(tp->retrans_out != 0);
                tp->retrans_stamp = 0;
        } else if (!before(tp->snd_una, tp->high_seq)) {
                switch (icsk->icsk_ca_state) {
@@ -2440,8 +2993,10 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
                        do_lost = tcp_try_undo_partial(sk, pkts_acked);
                break;
        case TCP_CA_Loss:
-               if (flag&FLAG_DATA_ACKED)
+               if (flag & FLAG_DATA_ACKED)
                        icsk->icsk_retransmits = 0;
+               if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
+                       tcp_reset_reno_sack(tp);
                if (!tcp_try_undo_loss(sk)) {
                        tcp_moderate_cwnd(tp);
                        tcp_xmit_retransmit_queue(sk);
@@ -2480,9 +3035,11 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
                /* Otherwise enter Recovery state */
 
                if (tcp_is_reno(tp))
-                       NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
+                       mib_idx = LINUX_MIB_TCPRENORECOVERY;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
+                       mib_idx = LINUX_MIB_TCPSACKRECOVERY;
+
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
                tp->high_seq = tp->snd_nxt;
                tp->prior_ssthresh = 0;
@@ -2490,7 +3047,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
                tp->undo_retrans = tp->retrans_out;
 
                if (icsk->icsk_ca_state < TCP_CA_CWR) {
-                       if (!(flag&FLAG_ECE))
+                       if (!(flag & FLAG_ECE))
                                tp->prior_ssthresh = tcp_current_ssthresh(sk);
                        tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                        TCP_ECN_queue_cwr(tp);
@@ -2499,14 +3056,22 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
                tp->bytes_acked = 0;
                tp->snd_cwnd_cnt = 0;
                tcp_set_ca_state(sk, TCP_CA_Recovery);
+               fast_rexmit = 1;
        }
 
-       if (do_lost || tcp_head_timedout(sk))
-               tcp_update_scoreboard(sk);
+       if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
+               tcp_update_scoreboard(sk, fast_rexmit);
        tcp_cwnd_down(sk, flag);
        tcp_xmit_retransmit_queue(sk);
 }
 
+static void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
+{
+       tcp_rtt_estimator(sk, seq_rtt);
+       tcp_set_rto(sk);
+       inet_csk(sk)->icsk_backoff = 0;
+}
+
 /* Read draft-ietf-tcplw-high-performance before mucking
  * with this code. (Supersedes RFC1323)
  */
@@ -2528,11 +3093,8 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
         * in window is lost... Voila.                          --ANK (010210)
         */
        struct tcp_sock *tp = tcp_sk(sk);
-       const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
-       tcp_rtt_estimator(sk, seq_rtt);
-       tcp_set_rto(sk);
-       inet_csk(sk)->icsk_backoff = 0;
-       tcp_bound_rto(sk);
+
+       tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
 }
 
 static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
@@ -2549,10 +3111,7 @@ static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
        if (flag & FLAG_RETRANS_DATA_ACKED)
                return;
 
-       tcp_rtt_estimator(sk, seq_rtt);
-       tcp_set_rto(sk);
-       inet_csk(sk)->icsk_backoff = 0;
-       tcp_bound_rto(sk);
+       tcp_valid_rtt_meas(sk, seq_rtt);
 }
 
 static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
@@ -2566,11 +3125,10 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
                tcp_ack_no_tstamp(sk, seq_rtt, flag);
 }
 
-static void tcp_cong_avoid(struct sock *sk, u32 ack,
-                          u32 in_flight, int good)
+static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good);
+       icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight);
        tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -2584,7 +3142,8 @@ static void tcp_rearm_rto(struct sock *sk)
        if (!tp->packets_out) {
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
        } else {
-               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                                         inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
        }
 }
 
@@ -2613,7 +3172,8 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
  * is before the ack sequence we can discard it as it's confirmed to have
  * arrived at the other end.
  */
-static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
+static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+                              u32 prior_snd_una)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2621,67 +3181,58 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
        u32 now = tcp_time_stamp;
        int fully_acked = 1;
        int flag = 0;
-       int prior_packets = tp->packets_out;
+       u32 pkts_acked = 0;
+       u32 reord = tp->packets_out;
+       u32 prior_sacked = tp->sacked_out;
        s32 seq_rtt = -1;
+       s32 ca_seq_rtt = -1;
        ktime_t last_ackt = net_invalid_timestamp();
 
        while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
-               u32 end_seq;
-               u32 packets_acked;
+               u32 acked_pcount;
                u8 sacked = scb->sacked;
 
+               /* Determine how many packets and what bytes were acked, tso and else */
                if (after(scb->end_seq, tp->snd_una)) {
                        if (tcp_skb_pcount(skb) == 1 ||
                            !after(tp->snd_una, scb->seq))
                                break;
 
-                       packets_acked = tcp_tso_acked(sk, skb);
-                       if (!packets_acked)
+                       acked_pcount = tcp_tso_acked(sk, skb);
+                       if (!acked_pcount)
                                break;
 
                        fully_acked = 0;
-                       end_seq = tp->snd_una;
                } else {
-                       packets_acked = tcp_skb_pcount(skb);
-                       end_seq = scb->end_seq;
+                       acked_pcount = tcp_skb_pcount(skb);
                }
 
-               /* MTU probing checks */
-               if (fully_acked && icsk->icsk_mtup.probe_size &&
-                   !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) {
-                       tcp_mtup_probe_success(sk, skb);
+               if (sacked & TCPCB_RETRANS) {
+                       if (sacked & TCPCB_SACKED_RETRANS)
+                               tp->retrans_out -= acked_pcount;
+                       flag |= FLAG_RETRANS_DATA_ACKED;
+                       ca_seq_rtt = -1;
+                       seq_rtt = -1;
+                       if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1))
+                               flag |= FLAG_NONHEAD_RETRANS_ACKED;
+               } else {
+                       ca_seq_rtt = now - scb->when;
+                       last_ackt = skb->tstamp;
+                       if (seq_rtt < 0) {
+                               seq_rtt = ca_seq_rtt;
+                       }
+                       if (!(sacked & TCPCB_SACKED_ACKED))
+                               reord = min(pkts_acked, reord);
                }
 
-               if (sacked) {
-                       if (sacked & TCPCB_RETRANS) {
-                               if (sacked & TCPCB_SACKED_RETRANS)
-                                       tp->retrans_out -= packets_acked;
-                               flag |= FLAG_RETRANS_DATA_ACKED;
-                               seq_rtt = -1;
-                               if ((flag & FLAG_DATA_ACKED) ||
-                                   (packets_acked > 1))
-                                       flag |= FLAG_NONHEAD_RETRANS_ACKED;
-                       } else if (seq_rtt < 0) {
-                               seq_rtt = now - scb->when;
-                               if (fully_acked)
-                                       last_ackt = skb->tstamp;
-                       }
+               if (sacked & TCPCB_SACKED_ACKED)
+                       tp->sacked_out -= acked_pcount;
+               if (sacked & TCPCB_LOST)
+                       tp->lost_out -= acked_pcount;
 
-                       if (sacked & TCPCB_SACKED_ACKED)
-                               tp->sacked_out -= packets_acked;
-                       if (sacked & TCPCB_LOST)
-                               tp->lost_out -= packets_acked;
-
-                       if ((sacked & TCPCB_URG) && tp->urg_mode &&
-                           !before(end_seq, tp->snd_up))
-                               tp->urg_mode = 0;
-               } else if (seq_rtt < 0) {
-                       seq_rtt = now - scb->when;
-                       if (fully_acked)
-                               last_ackt = skb->tstamp;
-               }
-               tp->packets_out -= packets_acked;
+               tp->packets_out -= acked_pcount;
+               pkts_acked += acked_pcount;
 
                /* Initial outgoing SYN's get put onto the write_queue
                 * just like anything else we transmit.  It is not
@@ -2701,24 +3252,47 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
                        break;
 
                tcp_unlink_write_queue(skb, sk);
-               sk_stream_free_skb(sk, skb);
-               tcp_clear_all_retrans_hints(tp);
+               sk_wmem_free_skb(sk, skb);
+               tp->scoreboard_skb_hint = NULL;
+               if (skb == tp->retransmit_skb_hint)
+                       tp->retransmit_skb_hint = NULL;
+               if (skb == tp->lost_skb_hint)
+                       tp->lost_skb_hint = NULL;
        }
 
+       if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
+               tp->snd_up = tp->snd_una;
+
+       if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
+               flag |= FLAG_SACK_RENEGING;
+
        if (flag & FLAG_ACKED) {
-               u32 pkts_acked = prior_packets - tp->packets_out;
                const struct tcp_congestion_ops *ca_ops
                        = inet_csk(sk)->icsk_ca_ops;
 
+               if (unlikely(icsk->icsk_mtup.probe_size &&
+                            !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
+                       tcp_mtup_probe_success(sk);
+               }
+
                tcp_ack_update_rtt(sk, flag, seq_rtt);
                tcp_rearm_rto(sk);
 
-               tp->fackets_out -= min(pkts_acked, tp->fackets_out);
-               /* hint's skb might be NULL but we don't need to care */
-               tp->fastpath_cnt_hint -= min_t(u32, pkts_acked,
-                                              tp->fastpath_cnt_hint);
-               if (tcp_is_reno(tp))
+               if (tcp_is_reno(tp)) {
                        tcp_remove_reno_sacks(sk, pkts_acked);
+               } else {
+                       int delta;
+
+                       /* Non-retransmitted hole got filled? That's reordering */
+                       if (reord < prior_fackets)
+                               tcp_update_reordering(sk, tp->fackets_out - reord, 0);
+
+                       delta = tcp_is_fack(tp) ? pkts_acked :
+                                                 prior_sacked - tp->sacked_out;
+                       tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
+               }
+
+               tp->fackets_out -= min(pkts_acked, tp->fackets_out);
 
                if (ca_ops->pkts_acked) {
                        s32 rtt_us = -1;
@@ -2731,8 +3305,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
                                                 net_invalid_timestamp()))
                                        rtt_us = ktime_us_delta(ktime_get_real(),
                                                                last_ackt);
-                               else if (seq_rtt > 0)
-                                       rtt_us = jiffies_to_usecs(seq_rtt);
+                               else if (ca_seq_rtt > 0)
+                                       rtt_us = jiffies_to_usecs(ca_seq_rtt);
                        }
 
                        ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
@@ -2740,9 +3314,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
        }
 
 #if FASTRETRANS_DEBUG > 0
-       BUG_TRAP((int)tp->sacked_out >= 0);
-       BUG_TRAP((int)tp->lost_out >= 0);
-       BUG_TRAP((int)tp->retrans_out >= 0);
+       WARN_ON((int)tp->sacked_out < 0);
+       WARN_ON((int)tp->lost_out < 0);
+       WARN_ON((int)tp->retrans_out < 0);
        if (!tp->packets_out && tcp_is_sack(tp)) {
                icsk = inet_csk(sk);
                if (tp->lost_out) {
@@ -2762,7 +3336,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
                }
        }
 #endif
-       *seq_rtt_p = seq_rtt;
        return flag;
 }
 
@@ -2773,8 +3346,7 @@ static void tcp_ack_probe(struct sock *sk)
 
        /* Was it a usable window open? */
 
-       if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
-                  tp->snd_una + tp->snd_wnd)) {
+       if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) {
                icsk->icsk_backoff = 0;
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
                /* Socket must be waked up by subsequent tcp_data_snd_check().
@@ -2803,8 +3375,9 @@ static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
 /* Check that window update is acceptable.
  * The function assumes that snd_una<=ack<=snd_next.
  */
-static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
-                                       const u32 ack_seq, const u32 nwin)
+static inline int tcp_may_update_window(const struct tcp_sock *tp,
+                                       const u32 ack, const u32 ack_seq,
+                                       const u32 nwin)
 {
        return (after(ack, tp->snd_una) ||
                after(ack_seq, tp->snd_wl1) ||
@@ -2828,7 +3401,7 @@ static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
 
        if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
                flag |= FLAG_WIN_UPDATE;
-               tcp_update_wl(tp, ack, ack_seq);
+               tcp_update_wl(tp, ack_seq);
 
                if (tp->snd_wnd != nwin) {
                        tp->snd_wnd = nwin;
@@ -2873,7 +3446,7 @@ static void tcp_ratehalving_spur_to_response(struct sock *sk)
 
 static void tcp_undo_spur_to_response(struct sock *sk, int flag)
 {
-       if (flag&FLAG_ECE)
+       if (flag & FLAG_ECE)
                tcp_ratehalving_spur_to_response(sk);
        else
                tcp_undo_cwr(sk, 1);
@@ -2916,7 +3489,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
        tcp_verify_left_out(tp);
 
        /* Duplicate the behavior from Loss state (fastretrans_alert) */
-       if (flag&FLAG_DATA_ACKED)
+       if (flag & FLAG_DATA_ACKED)
                inet_csk(sk)->icsk_retransmits = 0;
 
        if ((flag & FLAG_NONHEAD_RETRANS_ACKED) ||
@@ -2928,21 +3501,21 @@ static int tcp_process_frto(struct sock *sk, int flag)
                return 1;
        }
 
-       if (!IsSackFrto() || tcp_is_reno(tp)) {
+       if (!tcp_is_sackfrto(tp)) {
                /* RFC4138 shortcoming in step 2; should also have case c):
                 * ACK isn't duplicate nor advances window, e.g., opposite dir
                 * data, winupdate
                 */
-               if (!(flag&FLAG_ANY_PROGRESS) && (flag&FLAG_NOT_DUP))
+               if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
                        return 1;
 
-               if (!(flag&FLAG_DATA_ACKED)) {
+               if (!(flag & FLAG_DATA_ACKED)) {
                        tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
                                            flag);
                        return 1;
                }
        } else {
-               if (!(flag&FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+               if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
                        /* Prevent sending of new data. */
                        tp->snd_cwnd = min(tp->snd_cwnd,
                                           tcp_packets_in_flight(tp));
@@ -2950,10 +3523,12 @@ static int tcp_process_frto(struct sock *sk, int flag)
                }
 
                if ((tp->frto_counter >= 2) &&
-                   (!(flag&FLAG_FORWARD_PROGRESS) ||
-                    ((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) {
+                   (!(flag & FLAG_FORWARD_PROGRESS) ||
+                    ((flag & FLAG_DATA_SACKED) &&
+                     !(flag & FLAG_ONLY_ORIG_SACKED)))) {
                        /* RFC4138 shortcoming (see comment above) */
-                       if (!(flag&FLAG_FORWARD_PROGRESS) && (flag&FLAG_NOT_DUP))
+                       if (!(flag & FLAG_FORWARD_PROGRESS) &&
+                           (flag & FLAG_NOT_DUP))
                                return 1;
 
                        tcp_enter_frto_loss(sk, 3, flag);
@@ -2962,17 +3537,13 @@ static int tcp_process_frto(struct sock *sk, int flag)
        }
 
        if (tp->frto_counter == 1) {
-               /* Sending of the next skb must be allowed or no F-RTO */
-               if (!tcp_send_head(sk) ||
-                   after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
-                                    tp->snd_una + tp->snd_wnd)) {
-                       tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3),
-                                           flag);
-                       return 1;
-               }
-
+               /* tcp_may_send_now needs to see updated state */
                tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
                tp->frto_counter = 2;
+
+               if (!tcp_may_send_now(sk))
+                       tcp_enter_frto_loss(sk, 2, flag);
+
                return 1;
        } else {
                switch (sysctl_tcp_frto_response) {
@@ -2988,7 +3559,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
                }
                tp->frto_counter = 0;
                tp->undo_marker = 0;
-               NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
        }
        return 0;
 }
@@ -3002,19 +3573,22 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
        u32 prior_in_flight;
-       s32 seq_rtt;
+       u32 prior_fackets;
        int prior_packets;
        int frto_cwnd = 0;
 
-       /* If the ack is newer than sent or older than previous acks
+       /* If the ack is older than previous acks
         * then we can probably ignore it.
         */
-       if (after(ack, tp->snd_nxt))
-               goto uninteresting_ack;
-
        if (before(ack, prior_snd_una))
                goto old_ack;
 
+       /* If the ack includes data we haven't sent yet, discard
+        * this segment (RFC793 Section 3.9).
+        */
+       if (after(ack, tp->snd_nxt))
+               goto invalid_ack;
+
        if (after(ack, prior_snd_una))
                flag |= FLAG_SND_UNA_ADVANCED;
 
@@ -3023,26 +3597,30 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
                        tp->bytes_acked += ack - prior_snd_una;
                else if (icsk->icsk_ca_state == TCP_CA_Loss)
                        /* we assume just one segment left network */
-                       tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
+                       tp->bytes_acked += min(ack - prior_snd_una,
+                                              tp->mss_cache);
        }
 
-       if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
+       prior_fackets = tp->fackets_out;
+       prior_in_flight = tcp_packets_in_flight(tp);
+
+       if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
                /* Window is constant, pure forward advance.
                 * No more checks are required.
                 * Note, we use the fact that SND.UNA>=SND.WL2.
                 */
-               tcp_update_wl(tp, ack, ack_seq);
+               tcp_update_wl(tp, ack_seq);
                tp->snd_una = ack;
                flag |= FLAG_WIN_UPDATE;
 
                tcp_ca_event(sk, CA_EVENT_FAST_ACK);
 
-               NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
        } else {
                if (ack_seq != TCP_SKB_CB(skb)->end_seq)
                        flag |= FLAG_DATA;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
 
                flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
 
@@ -3059,41 +3637,39 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
         * log. Something worked...
         */
        sk->sk_err_soft = 0;
+       icsk->icsk_probes_out = 0;
        tp->rcv_tstamp = tcp_time_stamp;
        prior_packets = tp->packets_out;
        if (!prior_packets)
                goto no_queue;
 
-       prior_in_flight = tcp_packets_in_flight(tp);
-
        /* See if we can take anything off of the retransmit queue. */
-       flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
+       flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
+       if (tp->frto_counter)
+               frto_cwnd = tcp_process_frto(sk, flag);
        /* Guarantee sacktag reordering detection against wrap-arounds */
        if (before(tp->frto_highmark, tp->snd_una))
                tp->frto_highmark = 0;
-       if (tp->frto_counter)
-               frto_cwnd = tcp_process_frto(sk, flag);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                /* Advance CWND, if state allows this. */
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
                    tcp_may_raise_cwnd(sk, flag))
-                       tcp_cong_avoid(sk, ack, prior_in_flight, 0);
-               tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, flag);
+                       tcp_cong_avoid(sk, ack, prior_in_flight);
+               tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
+                                     flag);
        } else {
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
-                       tcp_cong_avoid(sk, ack, prior_in_flight, 1);
+                       tcp_cong_avoid(sk, ack, prior_in_flight);
        }
 
-       if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
+       if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
                dst_confirm(sk->sk_dst_cache);
 
        return 1;
 
 no_queue:
-       icsk->icsk_probes_out = 0;
-
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
@@ -3102,138 +3678,188 @@ no_queue:
                tcp_ack_probe(sk);
        return 1;
 
+invalid_ack:
+       SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+       return -1;
+
 old_ack:
-       if (TCP_SKB_CB(skb)->sacked)
+       if (TCP_SKB_CB(skb)->sacked) {
                tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+               if (icsk->icsk_ca_state == TCP_CA_Open)
+                       tcp_try_keep_open(sk);
+       }
 
-uninteresting_ack:
-       SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+       SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
        return 0;
 }
 
-
 /* Look for tcp options. Normally only called on SYN and SYNACK packets.
  * But, this can also be called on packets in the established flow when
  * the fast version below fails.
  */
-void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
+void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
+                      int estab)
 {
        unsigned char *ptr;
        struct tcphdr *th = tcp_hdr(skb);
-       int length=(th->doff*4)-sizeof(struct tcphdr);
+       int length = (th->doff * 4) - sizeof(struct tcphdr);
 
        ptr = (unsigned char *)(th + 1);
        opt_rx->saw_tstamp = 0;
 
        while (length > 0) {
-               int opcode=*ptr++;
+               int opcode = *ptr++;
                int opsize;
 
                switch (opcode) {
-                       case TCPOPT_EOL:
+               case TCPOPT_EOL:
+                       return;
+               case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
+                       length--;
+                       continue;
+               default:
+                       opsize = *ptr++;
+                       if (opsize < 2) /* "silly options" */
                                return;
-                       case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
-                               length--;
-                               continue;
-                       default:
-                               opsize=*ptr++;
-                               if (opsize < 2) /* "silly options" */
-                                       return;
-                               if (opsize > length)
-                                       return; /* don't parse partial options */
-                               switch (opcode) {
-                               case TCPOPT_MSS:
-                                       if (opsize==TCPOLEN_MSS && th->syn && !estab) {
-                                               u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
-                                               if (in_mss) {
-                                                       if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
-                                                               in_mss = opt_rx->user_mss;
-                                                       opt_rx->mss_clamp = in_mss;
-                                               }
-                                       }
-                                       break;
-                               case TCPOPT_WINDOW:
-                                       if (opsize==TCPOLEN_WINDOW && th->syn && !estab)
-                                               if (sysctl_tcp_window_scaling) {
-                                                       __u8 snd_wscale = *(__u8 *) ptr;
-                                                       opt_rx->wscale_ok = 1;
-                                                       if (snd_wscale > 14) {
-                                                               if (net_ratelimit())
-                                                                       printk(KERN_INFO "tcp_parse_options: Illegal window "
-                                                                              "scaling value %d >14 received.\n",
-                                                                              snd_wscale);
-                                                               snd_wscale = 14;
-                                                       }
-                                                       opt_rx->snd_wscale = snd_wscale;
-                                               }
-                                       break;
-                               case TCPOPT_TIMESTAMP:
-                                       if (opsize==TCPOLEN_TIMESTAMP) {
-                                               if ((estab && opt_rx->tstamp_ok) ||
-                                                   (!estab && sysctl_tcp_timestamps)) {
-                                                       opt_rx->saw_tstamp = 1;
-                                                       opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr));
-                                                       opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4)));
-                                               }
+                       if (opsize > length)
+                               return; /* don't parse partial options */
+                       switch (opcode) {
+                       case TCPOPT_MSS:
+                               if (opsize == TCPOLEN_MSS && th->syn && !estab) {
+                                       u16 in_mss = get_unaligned_be16(ptr);
+                                       if (in_mss) {
+                                               if (opt_rx->user_mss &&
+                                                   opt_rx->user_mss < in_mss)
+                                                       in_mss = opt_rx->user_mss;
+                                               opt_rx->mss_clamp = in_mss;
                                        }
-                                       break;
-                               case TCPOPT_SACK_PERM:
-                                       if (opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
-                                               if (sysctl_tcp_sack) {
-                                                       opt_rx->sack_ok = 1;
-                                                       tcp_sack_reset(opt_rx);
-                                               }
+                               }
+                               break;
+                       case TCPOPT_WINDOW:
+                               if (opsize == TCPOLEN_WINDOW && th->syn &&
+                                   !estab && sysctl_tcp_window_scaling) {
+                                       __u8 snd_wscale = *(__u8 *)ptr;
+                                       opt_rx->wscale_ok = 1;
+                                       if (snd_wscale > 14) {
+                                               if (net_ratelimit())
+                                                       printk(KERN_INFO "tcp_parse_options: Illegal window "
+                                                              "scaling value %d >14 received.\n",
+                                                              snd_wscale);
+                                               snd_wscale = 14;
                                        }
-                                       break;
+                                       opt_rx->snd_wscale = snd_wscale;
+                               }
+                               break;
+                       case TCPOPT_TIMESTAMP:
+                               if ((opsize == TCPOLEN_TIMESTAMP) &&
+                                   ((estab && opt_rx->tstamp_ok) ||
+                                    (!estab && sysctl_tcp_timestamps))) {
+                                       opt_rx->saw_tstamp = 1;
+                                       opt_rx->rcv_tsval = get_unaligned_be32(ptr);
+                                       opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
+                               }
+                               break;
+                       case TCPOPT_SACK_PERM:
+                               if (opsize == TCPOLEN_SACK_PERM && th->syn &&
+                                   !estab && sysctl_tcp_sack) {
+                                       opt_rx->sack_ok = 1;
+                                       tcp_sack_reset(opt_rx);
+                               }
+                               break;
 
-                               case TCPOPT_SACK:
-                                       if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
-                                          !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
-                                          opt_rx->sack_ok) {
-                                               TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
-                                       }
-                                       break;
+                       case TCPOPT_SACK:
+                               if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
+                                  !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
+                                  opt_rx->sack_ok) {
+                                       TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
+                               }
+                               break;
 #ifdef CONFIG_TCP_MD5SIG
-                               case TCPOPT_MD5SIG:
-                                       /*
-                                        * The MD5 Hash has already been
-                                        * checked (see tcp_v{4,6}_do_rcv()).
-                                        */
-                                       break;
+                       case TCPOPT_MD5SIG:
+                               /*
+                                * The MD5 Hash has already been
+                                * checked (see tcp_v{4,6}_do_rcv()).
+                                */
+                               break;
 #endif
-                               }
+                       }
 
-                               ptr+=opsize-2;
-                               length-=opsize;
+                       ptr += opsize-2;
+                       length -= opsize;
                }
        }
 }
 
+static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
+{
+       __be32 *ptr = (__be32 *)(th + 1);
+
+       if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
+                         | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
+               tp->rx_opt.saw_tstamp = 1;
+               ++ptr;
+               tp->rx_opt.rcv_tsval = ntohl(*ptr);
+               ++ptr;
+               tp->rx_opt.rcv_tsecr = ntohl(*ptr);
+               return 1;
+       }
+       return 0;
+}
+
 /* Fast parse options. This hopes to only see timestamps.
  * If it is wrong it falls back on tcp_parse_options().
  */
 static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
                                  struct tcp_sock *tp)
 {
-       if (th->doff == sizeof(struct tcphdr)>>2) {
+       if (th->doff == sizeof(struct tcphdr) >> 2) {
                tp->rx_opt.saw_tstamp = 0;
                return 0;
        } else if (tp->rx_opt.tstamp_ok &&
                   th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
-               __be32 *ptr = (__be32 *)(th + 1);
-               if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
-                                 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
-                       tp->rx_opt.saw_tstamp = 1;
-                       ++ptr;
-                       tp->rx_opt.rcv_tsval = ntohl(*ptr);
-                       ++ptr;
-                       tp->rx_opt.rcv_tsecr = ntohl(*ptr);
+               if (tcp_parse_aligned_timestamp(tp, th))
                        return 1;
+       }
+       tcp_parse_options(skb, &tp->rx_opt, 1);
+       return 1;
+}
+
+#ifdef CONFIG_TCP_MD5SIG
+/*
+ * Parse MD5 Signature option
+ */
+u8 *tcp_parse_md5sig_option(struct tcphdr *th)
+{
+       int length = (th->doff << 2) - sizeof (*th);
+       u8 *ptr = (u8*)(th + 1);
+
+       /* If the TCP option is too short, we can short cut */
+       if (length < TCPOLEN_MD5SIG)
+               return NULL;
+
+       while (length > 0) {
+               int opcode = *ptr++;
+               int opsize;
+
+               switch(opcode) {
+               case TCPOPT_EOL:
+                       return NULL;
+               case TCPOPT_NOP:
+                       length--;
+                       continue;
+               default:
+                       opsize = *ptr++;
+                       if (opsize < 2 || opsize > length)
+                               return NULL;
+                       if (opcode == TCPOPT_MD5SIG)
+                               return ptr;
                }
+               ptr += opsize - 2;
+               length -= opsize;
        }
-       tcp_parse_options(skb, &tp->rx_opt, 1);
-       return 1;
+       return NULL;
 }
+#endif
 
 static inline void tcp_store_ts_recent(struct tcp_sock *tp)
 {
@@ -3251,8 +3877,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
                 * Not only, also it occurs for expired timestamps.
                 */
 
-               if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
-                  get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
+               if (tcp_paws_check(&tp->rx_opt, 0))
                        tcp_store_ts_recent(tp);
        }
 }
@@ -3300,12 +3925,13 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
                (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
 }
 
-static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb)
+static inline int tcp_paws_discard(const struct sock *sk,
+                                  const struct sk_buff *skb)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
-       return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
-               get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
-               !tcp_disordered_ack(sk, skb));
+
+       return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
+              !tcp_disordered_ack(sk, skb);
 }
 
 /* Check segment sequence number for validity.
@@ -3332,16 +3958,16 @@ static void tcp_reset(struct sock *sk)
 {
        /* We want the right error as BSD sees it (and indeed as we do). */
        switch (sk->sk_state) {
-               case TCP_SYN_SENT:
-                       sk->sk_err = ECONNREFUSED;
-                       break;
-               case TCP_CLOSE_WAIT:
-                       sk->sk_err = EPIPE;
-                       break;
-               case TCP_CLOSE:
-                       return;
-               default:
-                       sk->sk_err = ECONNRESET;
+       case TCP_SYN_SENT:
+               sk->sk_err = ECONNREFUSED;
+               break;
+       case TCP_CLOSE_WAIT:
+               sk->sk_err = EPIPE;
+               break;
+       case TCP_CLOSE:
+               return;
+       default:
+               sk->sk_err = ECONNRESET;
        }
 
        if (!sock_flag(sk, SOCK_DEAD))
@@ -3374,43 +4000,43 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
        sock_set_flag(sk, SOCK_DONE);
 
        switch (sk->sk_state) {
-               case TCP_SYN_RECV:
-               case TCP_ESTABLISHED:
-                       /* Move to CLOSE_WAIT */
-                       tcp_set_state(sk, TCP_CLOSE_WAIT);
-                       inet_csk(sk)->icsk_ack.pingpong = 1;
-                       break;
+       case TCP_SYN_RECV:
+       case TCP_ESTABLISHED:
+               /* Move to CLOSE_WAIT */
+               tcp_set_state(sk, TCP_CLOSE_WAIT);
+               inet_csk(sk)->icsk_ack.pingpong = 1;
+               break;
 
-               case TCP_CLOSE_WAIT:
-               case TCP_CLOSING:
-                       /* Received a retransmission of the FIN, do
-                        * nothing.
-                        */
-                       break;
-               case TCP_LAST_ACK:
-                       /* RFC793: Remain in the LAST-ACK state. */
-                       break;
+       case TCP_CLOSE_WAIT:
+       case TCP_CLOSING:
+               /* Received a retransmission of the FIN, do
+                * nothing.
+                */
+               break;
+       case TCP_LAST_ACK:
+               /* RFC793: Remain in the LAST-ACK state. */
+               break;
 
-               case TCP_FIN_WAIT1:
-                       /* This case occurs when a simultaneous close
-                        * happens, we must ack the received FIN and
-                        * enter the CLOSING state.
-                        */
-                       tcp_send_ack(sk);
-                       tcp_set_state(sk, TCP_CLOSING);
-                       break;
-               case TCP_FIN_WAIT2:
-                       /* Received a FIN -- send ACK and enter TIME_WAIT. */
-                       tcp_send_ack(sk);
-                       tcp_time_wait(sk, TCP_TIME_WAIT, 0);
-                       break;
-               default:
-                       /* Only TCP_LISTEN and TCP_CLOSE are left, in these
-                        * cases we should never reach this piece of code.
-                        */
-                       printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
-                              __FUNCTION__, sk->sk_state);
-                       break;
+       case TCP_FIN_WAIT1:
+               /* This case occurs when a simultaneous close
+                * happens, we must ack the received FIN and
+                * enter the CLOSING state.
+                */
+               tcp_send_ack(sk);
+               tcp_set_state(sk, TCP_CLOSING);
+               break;
+       case TCP_FIN_WAIT2:
+               /* Received a FIN -- send ACK and enter TIME_WAIT. */
+               tcp_send_ack(sk);
+               tcp_time_wait(sk, TCP_TIME_WAIT, 0);
+               break;
+       default:
+               /* Only TCP_LISTEN and TCP_CLOSE are left, in these
+                * cases we should never reach this piece of code.
+                */
+               printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
+                      __func__, sk->sk_state);
+               break;
        }
 
        /* It _is_ possible, that we have something out-of-order _after_ FIN.
@@ -3419,7 +4045,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
        __skb_queue_purge(&tp->out_of_order_queue);
        if (tcp_is_sack(tp))
                tcp_sack_reset(&tp->rx_opt);
-       sk_stream_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 
        if (!sock_flag(sk, SOCK_DEAD)) {
                sk->sk_state_change(sk);
@@ -3427,13 +4053,14 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
                /* Do not send POLL_HUP for half duplex close. */
                if (sk->sk_shutdown == SHUTDOWN_MASK ||
                    sk->sk_state == TCP_CLOSE)
-                       sk_wake_async(sk, 1, POLL_HUP);
+                       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
                else
-                       sk_wake_async(sk, 1, POLL_IN);
+                       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        }
 }
 
-static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
+static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
+                                 u32 end_seq)
 {
        if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
                if (before(seq, sp->start_seq))
@@ -3445,25 +4072,32 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_se
        return 0;
 }
 
-static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
+static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
+               int mib_idx;
+
                if (before(seq, tp->rcv_nxt))
-                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
+                       mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
                else
-                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
+                       mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
+
+               NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
                tp->rx_opt.dsack = 1;
                tp->duplicate_sack[0].start_seq = seq;
                tp->duplicate_sack[0].end_seq = end_seq;
-               tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok);
        }
 }
 
-static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
+static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (!tp->rx_opt.dsack)
-               tcp_dsack_set(tp, seq, end_seq);
+               tcp_dsack_set(sk, seq, end_seq);
        else
                tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
 }
@@ -3474,7 +4108,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
 
        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
            before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
                tcp_enter_quickack_mode(sk);
 
                if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -3482,7 +4116,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
 
                        if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
                                end_seq = tp->rcv_nxt;
-                       tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq);
+                       tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
                }
        }
 
@@ -3496,12 +4130,12 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
 {
        int this_sack;
        struct tcp_sack_block *sp = &tp->selective_acks[0];
-       struct tcp_sack_block *swalk = sp+1;
+       struct tcp_sack_block *swalk = sp + 1;
 
        /* See if the recent change to the first SACK eats into
         * or hits the sequence space of other SACK blocks, if so coalesce.
         */
-       for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) {
+       for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) {
                if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
                        int i;
 
@@ -3509,28 +4143,14 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
                         * Decrease num_sacks.
                         */
                        tp->rx_opt.num_sacks--;
-                       tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
-                       for (i=this_sack; i < tp->rx_opt.num_sacks; i++)
-                               sp[i] = sp[i+1];
+                       for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
+                               sp[i] = sp[i + 1];
                        continue;
                }
                this_sack++, swalk++;
        }
 }
 
-static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
-{
-       __u32 tmp;
-
-       tmp = sack1->start_seq;
-       sack1->start_seq = sack2->start_seq;
-       sack2->start_seq = tmp;
-
-       tmp = sack1->end_seq;
-       sack1->end_seq = sack2->end_seq;
-       sack2->end_seq = tmp;
-}
-
 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -3541,11 +4161,11 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
        if (!cur_sacks)
                goto new_sack;
 
-       for (this_sack=0; this_sack<cur_sacks; this_sack++, sp++) {
+       for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
                if (tcp_sack_extend(sp, seq, end_seq)) {
                        /* Rotate this_sack to the first one. */
-                       for (; this_sack>0; this_sack--, sp--)
-                               tcp_sack_swap(sp, sp-1);
+                       for (; this_sack > 0; this_sack--, sp--)
+                               swap(*sp, *(sp - 1));
                        if (cur_sacks > 1)
                                tcp_sack_maybe_coalesce(tp);
                        return;
@@ -3558,20 +4178,19 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
         *
         * If the sack array is full, forget about the last one.
         */
-       if (this_sack >= 4) {
+       if (this_sack >= TCP_NUM_SACKS) {
                this_sack--;
                tp->rx_opt.num_sacks--;
                sp--;
        }
        for (; this_sack > 0; this_sack--, sp--)
-               *sp = *(sp-1);
+               *sp = *(sp - 1);
 
 new_sack:
        /* Build the new head SACK, and we're done. */
        sp->start_seq = seq;
        sp->end_seq = end_seq;
        tp->rx_opt.num_sacks++;
-       tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
 }
 
 /* RCV.NXT advances, some SACKs should be eaten. */
@@ -3585,17 +4204,16 @@ static void tcp_sack_remove(struct tcp_sock *tp)
        /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
        if (skb_queue_empty(&tp->out_of_order_queue)) {
                tp->rx_opt.num_sacks = 0;
-               tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
                return;
        }
 
-       for (this_sack = 0; this_sack < num_sacks; ) {
+       for (this_sack = 0; this_sack < num_sacks;) {
                /* Check if the start of the sack is covered by RCV.NXT. */
                if (!before(tp->rcv_nxt, sp->start_seq)) {
                        int i;
 
                        /* RCV.NXT must cover all the block! */
-                       BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq));
+                       WARN_ON(before(tp->rcv_nxt, sp->end_seq));
 
                        /* Zap this SACK, by moving forward any other SACKS. */
                        for (i=this_sack+1; i < num_sacks; i++)
@@ -3606,10 +4224,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
                this_sack++;
                sp++;
        }
-       if (num_sacks != tp->rx_opt.num_sacks) {
-               tp->rx_opt.num_sacks = num_sacks;
-               tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
-       }
+       tp->rx_opt.num_sacks = num_sacks;
 }
 
 /* This one checks to see if we can put data from the
@@ -3629,7 +4244,7 @@ static void tcp_ofo_queue(struct sock *sk)
                        __u32 dsack = dsack_high;
                        if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
                                dsack_high = TCP_SKB_CB(skb)->end_seq;
-                       tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack);
+                       tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
                }
 
                if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
@@ -3650,8 +4265,28 @@ static void tcp_ofo_queue(struct sock *sk)
        }
 }
 
+static int tcp_prune_ofo_queue(struct sock *sk);
 static int tcp_prune_queue(struct sock *sk);
 
+static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
+{
+       if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+           !sk_rmem_schedule(sk, size)) {
+
+               if (tcp_prune_queue(sk) < 0)
+                       return -1;
+
+               if (!sk_rmem_schedule(sk, size)) {
+                       if (!tcp_prune_ofo_queue(sk))
+                               return -1;
+
+                       if (!sk_rmem_schedule(sk, size))
+                               return -1;
+               }
+       }
+       return 0;
+}
+
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
        struct tcphdr *th = tcp_hdr(skb);
@@ -3661,15 +4296,11 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
        if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
                goto drop;
 
-       __skb_pull(skb, th->doff*4);
+       __skb_pull(skb, th->doff * 4);
 
        TCP_ECN_accept_cwr(tp, skb);
 
-       if (tp->rx_opt.dsack) {
-               tp->rx_opt.dsack = 0;
-               tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks,
-                                                   4 - tp->rx_opt.tstamp_ok);
-       }
+       tp->rx_opt.dsack = 0;
 
        /*  Queue data for delivery to the user.
         *  Packets in sequence go to the receive queue.
@@ -3684,7 +4315,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
                    tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&
                    sock_owned_by_user(sk) && !tp->urg_data) {
                        int chunk = min_t(unsigned int, skb->len,
-                                                       tp->ucopy.len);
+                                         tp->ucopy.len);
 
                        __set_current_state(TASK_RUNNING);
 
@@ -3701,13 +4332,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
                if (eaten <= 0) {
 queue_and_out:
                        if (eaten < 0 &&
-                           (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-                            !sk_stream_rmem_schedule(sk, skb))) {
-                               if (tcp_prune_queue(sk) < 0 ||
-                                   !sk_stream_rmem_schedule(sk, skb))
-                                       goto drop;
-                       }
-                       sk_stream_set_owner_r(skb, sk);
+                           tcp_try_rmem_schedule(sk, skb->truesize))
+                               goto drop;
+
+                       skb_set_owner_r(skb, sk);
                        __skb_queue_tail(&sk->sk_receive_queue, skb);
                }
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
@@ -3740,8 +4368,8 @@ queue_and_out:
 
        if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
                /* A retransmit, 2nd most common case.  Force an immediate ack. */
-               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
-               tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
+               tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
 out_of_window:
                tcp_enter_quickack_mode(sk);
@@ -3763,7 +4391,7 @@ drop:
                           tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
                           TCP_SKB_CB(skb)->end_seq);
 
-               tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
+               tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
 
                /* If window is closed, drop tail of packet. But after
                 * remembering D-SACK for its head made in previous line.
@@ -3775,12 +4403,8 @@ drop:
 
        TCP_ECN_check_ce(tp, skb);
 
-       if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-           !sk_stream_rmem_schedule(sk, skb)) {
-               if (tcp_prune_queue(sk) < 0 ||
-                   !sk_stream_rmem_schedule(sk, skb))
-                       goto drop;
-       }
+       if (tcp_try_rmem_schedule(sk, skb->truesize))
+               goto drop;
 
        /* Disable header prediction. */
        tp->pred_flags = 0;
@@ -3789,26 +4413,24 @@ drop:
        SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
                   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
-       sk_stream_set_owner_r(skb, sk);
+       skb_set_owner_r(skb, sk);
 
        if (!skb_peek(&tp->out_of_order_queue)) {
                /* Initial out of order segment, build 1 SACK. */
                if (tcp_is_sack(tp)) {
                        tp->rx_opt.num_sacks = 1;
-                       tp->rx_opt.dsack     = 0;
-                       tp->rx_opt.eff_sacks = 1;
                        tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
                        tp->selective_acks[0].end_seq =
                                                TCP_SKB_CB(skb)->end_seq;
                }
-               __skb_queue_head(&tp->out_of_order_queue,skb);
+               __skb_queue_head(&tp->out_of_order_queue, skb);
        } else {
-               struct sk_buff *skb1 = tp->out_of_order_queue.prev;
+               struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
                u32 seq = TCP_SKB_CB(skb)->seq;
                u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
                if (seq == TCP_SKB_CB(skb1)->end_seq) {
-                       __skb_append(skb1, skb, &tp->out_of_order_queue);
+                       __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
 
                        if (!tp->rx_opt.num_sacks ||
                            tp->selective_acks[0].end_seq != seq)
@@ -3820,41 +4442,58 @@ drop:
                }
 
                /* Find place to insert this segment. */
-               do {
+               while (1) {
                        if (!after(TCP_SKB_CB(skb1)->seq, seq))
                                break;
-               } while ((skb1 = skb1->prev) !=
-                        (struct sk_buff*)&tp->out_of_order_queue);
+                       if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
+                               skb1 = NULL;
+                               break;
+                       }
+                       skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
+               }
 
                /* Do skb overlap to previous one? */
-               if (skb1 != (struct sk_buff*)&tp->out_of_order_queue &&
-                   before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+               if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
                        if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                                /* All the bits are present. Drop. */
                                __kfree_skb(skb);
-                               tcp_dsack_set(tp, seq, end_seq);
+                               tcp_dsack_set(sk, seq, end_seq);
                                goto add_sack;
                        }
                        if (after(seq, TCP_SKB_CB(skb1)->seq)) {
                                /* Partial overlap. */
-                               tcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq);
+                               tcp_dsack_set(sk, seq,
+                                             TCP_SKB_CB(skb1)->end_seq);
                        } else {
-                               skb1 = skb1->prev;
+                               if (skb_queue_is_first(&tp->out_of_order_queue,
+                                                      skb1))
+                                       skb1 = NULL;
+                               else
+                                       skb1 = skb_queue_prev(
+                                               &tp->out_of_order_queue,
+                                               skb1);
                        }
                }
-               __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
+               if (!skb1)
+                       __skb_queue_head(&tp->out_of_order_queue, skb);
+               else
+                       __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
 
                /* And clean segments covered by new one as whole. */
-               while ((skb1 = skb->next) !=
-                      (struct sk_buff*)&tp->out_of_order_queue &&
-                      after(end_seq, TCP_SKB_CB(skb1)->seq)) {
-                      if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
-                              tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
-                              break;
-                      }
-                      __skb_unlink(skb1, &tp->out_of_order_queue);
-                      tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
-                      __kfree_skb(skb1);
+               while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
+                       skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
+
+                       if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
+                               break;
+                       if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
+                               tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
+                                                end_seq);
+                               break;
+                       }
+                       __skb_unlink(skb1, &tp->out_of_order_queue);
+                       tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
+                                        TCP_SKB_CB(skb1)->end_seq);
+                       __kfree_skb(skb1);
                }
 
 add_sack:
@@ -3863,8 +4502,26 @@ add_sack:
        }
 }
 
+static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+                                       struct sk_buff_head *list)
+{
+       struct sk_buff *next = NULL;
+
+       if (!skb_queue_is_last(list, skb))
+               next = skb_queue_next(list, skb);
+
+       __skb_unlink(skb, list);
+       __kfree_skb(skb);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
+
+       return next;
+}
+
 /* Collapse contiguous sequence of skbs head..tail with
  * sequence numbers start..end.
+ *
+ * If tail is NULL, this means until the end of the list.
+ *
  * Segments with FIN/SYN are not collapsed (only because this
  * simplifies code)
  */
@@ -3873,19 +4530,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
             struct sk_buff *head, struct sk_buff *tail,
             u32 start, u32 end)
 {
-       struct sk_buff *skb;
+       struct sk_buff *skb, *n;
+       bool end_of_skbs;
 
        /* First, check that queue is collapsible and find
         * the point where collapsing can be useful. */
-       for (skb = head; skb != tail; ) {
+       skb = head;
+restart:
+       end_of_skbs = true;
+       skb_queue_walk_from_safe(list, skb, n) {
+               if (skb == tail)
+                       break;
                /* No new bits? It is possible on ofo queue. */
                if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
-                       struct sk_buff *next = skb->next;
-                       __skb_unlink(skb, list);
-                       __kfree_skb(skb);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
-                       skb = next;
-                       continue;
+                       skb = tcp_collapse_one(sk, skb, list);
+                       if (!skb)
+                               break;
+                       goto restart;
                }
 
                /* The first skb to collapse is:
@@ -3895,16 +4556,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                 */
                if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
                    (tcp_win_from_space(skb->truesize) > skb->len ||
-                    before(TCP_SKB_CB(skb)->seq, start) ||
-                    (skb->next != tail &&
-                     TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
+                    before(TCP_SKB_CB(skb)->seq, start))) {
+                       end_of_skbs = false;
                        break;
+               }
+
+               if (!skb_queue_is_last(list, skb)) {
+                       struct sk_buff *next = skb_queue_next(list, skb);
+                       if (next != tail &&
+                           TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
+                               end_of_skbs = false;
+                               break;
+                       }
+               }
 
                /* Decided to skip this, advance start seq. */
                start = TCP_SKB_CB(skb)->end_seq;
-               skb = skb->next;
        }
-       if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
+       if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
                return;
 
        while (before(start, end)) {
@@ -3915,9 +4584,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                /* Too big header? This can happen with IPv6. */
                if (copy < 0)
                        return;
-               if (end-start < copy)
-                       copy = end-start;
-               nskb = alloc_skb(copy+header, GFP_ATOMIC);
+               if (end - start < copy)
+                       copy = end - start;
+               nskb = alloc_skb(copy + header, GFP_ATOMIC);
                if (!nskb)
                        return;
 
@@ -3930,8 +4599,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                memcpy(nskb->head, skb->head, header);
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
-               __skb_insert(nskb, skb->prev, skb, list);
-               sk_stream_set_owner_r(nskb, sk);
+               __skb_queue_before(list, skb, nskb);
+               skb_set_owner_r(nskb, sk);
 
                /* Copy data, releasing collapsed skbs. */
                while (copy > 0) {
@@ -3948,12 +4617,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                                start += size;
                        }
                        if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
-                               struct sk_buff *next = skb->next;
-                               __skb_unlink(skb, list);
-                               __kfree_skb(skb);
-                               NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
-                               skb = next;
-                               if (skb == tail ||
+                               skb = tcp_collapse_one(sk, skb, list);
+                               if (!skb ||
+                                   skb == tail ||
                                    tcp_hdr(skb)->syn ||
                                    tcp_hdr(skb)->fin)
                                        return;
@@ -3980,17 +4646,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        head = skb;
 
        for (;;) {
-               skb = skb->next;
+               struct sk_buff *next = NULL;
+
+               if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
+                       next = skb_queue_next(&tp->out_of_order_queue, skb);
+               skb = next;
 
                /* Segment is terminated when we see gap or when
                 * we are at the end of all the queue. */
-               if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
+               if (!skb ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
                        tcp_collapse(sk, &tp->out_of_order_queue,
                                     head, skb, start, end);
                        head = skb;
-                       if (skb == (struct sk_buff *)&tp->out_of_order_queue)
+                       if (!skb)
                                break;
                        /* Start new segment */
                        start = TCP_SKB_CB(skb)->seq;
@@ -4004,6 +4674,32 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        }
 }
 
+/*
+ * Purge the out-of-order queue.
+ * Return true if queue was pruned.
+ */
+static int tcp_prune_ofo_queue(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int res = 0;
+
+       if (!skb_queue_empty(&tp->out_of_order_queue)) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
+               __skb_queue_purge(&tp->out_of_order_queue);
+
+               /* Reset SACK state.  A conforming SACK implementation will
+                * do the same at a timeout based retransmit.  When a connection
+                * is in a sad state like this, we care only about integrity
+                * of the connection not performance.
+                */
+               if (tp->rx_opt.sack_ok)
+                       tcp_sack_reset(&tp->rx_opt);
+               sk_mem_reclaim(sk);
+               res = 1;
+       }
+       return res;
+}
+
 /* Reduce allocated memory if we can, trying to get
  * the socket within its memory limits again.
  *
@@ -4017,7 +4713,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
 
-       NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
@@ -4025,11 +4721,12 @@ static int tcp_prune_queue(struct sock *sk)
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
-       tcp_collapse(sk, &sk->sk_receive_queue,
-                    sk->sk_receive_queue.next,
-                    (struct sk_buff*)&sk->sk_receive_queue,
-                    tp->copied_seq, tp->rcv_nxt);
-       sk_stream_mem_reclaim(sk);
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               tcp_collapse(sk, &sk->sk_receive_queue,
+                            skb_peek(&sk->sk_receive_queue),
+                            NULL,
+                            tp->copied_seq, tp->rcv_nxt);
+       sk_mem_reclaim(sk);
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
                return 0;
@@ -4037,20 +4734,7 @@ static int tcp_prune_queue(struct sock *sk)
        /* Collapsing did not help, destructive actions follow.
         * This must not ever occur. */
 
-       /* First, purge the out_of_order queue. */
-       if (!skb_queue_empty(&tp->out_of_order_queue)) {
-               NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
-               __skb_queue_purge(&tp->out_of_order_queue);
-
-               /* Reset SACK state.  A conforming SACK implementation will
-                * do the same at a timeout based retransmit.  When a connection
-                * is in a sad state like this, we care only about integrity
-                * of the connection not performance.
-                */
-               if (tcp_is_sack(tp))
-                       tcp_sack_reset(&tp->rx_opt);
-               sk_stream_mem_reclaim(sk);
-       }
+       tcp_prune_ofo_queue(sk);
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
                return 0;
@@ -4059,14 +4743,13 @@ static int tcp_prune_queue(struct sock *sk)
         * drop receive data on the floor.  It will get retransmitted
         * and hopefully then we'll have sufficient space.
         */
-       NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
 
        /* Massive buffer overcommit. */
        tp->pred_flags = 0;
        return -1;
 }
 
-
 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
  * As additional protections, we do not touch cwnd in retransmission phases,
  * and if application hit its sndbuf limit recently.
@@ -4126,10 +4809,10 @@ static void tcp_new_space(struct sock *sk)
 
        if (tcp_should_expand_sndbuf(sk)) {
                int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
-                       MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
-                   demanded = max_t(unsigned int, tp->snd_cwnd,
-                                                  tp->reordering + 1);
-               sndmem *= 2*demanded;
+                       MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
+               int demanded = max_t(unsigned int, tp->snd_cwnd,
+                                    tp->reordering + 1);
+               sndmem *= 2 * demanded;
                if (sndmem > sk->sk_sndbuf)
                        sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
                tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -4170,8 +4853,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
            /* We ACK each frame or... */
            tcp_in_quickack_mode(sk) ||
            /* We have out of order data. */
-           (ofo_possible &&
-            skb_peek(&tp->out_of_order_queue))) {
+           (ofo_possible && skb_peek(&tp->out_of_order_queue))) {
                /* Then ack it now */
                tcp_send_ack(sk);
        } else {
@@ -4199,7 +4881,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
  *     either form (or just set the sysctl tcp_stdurg).
  */
 
-static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
+static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        u32 ptr = ntohs(th->urg_ptr);
@@ -4248,8 +4930,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
         * buggy users.
         */
        if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
-           !sock_flag(sk, SOCK_URGINLINE) &&
-           tp->copied_seq != tp->rcv_nxt) {
+           !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
                struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
                tp->copied_seq++;
                if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
@@ -4258,8 +4939,8 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
                }
        }
 
-       tp->urg_data   = TCP_URG_NOTYET;
-       tp->urg_seq    = ptr;
+       tp->urg_data = TCP_URG_NOTYET;
+       tp->urg_seq = ptr;
 
        /* Disable header prediction. */
        tp->pred_flags = 0;
@@ -4272,7 +4953,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
 
        /* Check if we get a new urgent pointer - normally not. */
        if (th->urg)
-               tcp_check_urg(sk,th);
+               tcp_check_urg(sk, th);
 
        /* Do we wait for any urgent data? - normally not... */
        if (tp->urg_data == TCP_URG_NOTYET) {
@@ -4314,7 +4995,8 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
        return err;
 }
 
-static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
+static __sum16 __tcp_checksum_complete_user(struct sock *sk,
+                                           struct sk_buff *skb)
 {
        __sum16 result;
 
@@ -4328,14 +5010,16 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb
        return result;
 }
 
-static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
+static inline int tcp_checksum_complete_user(struct sock *sk,
+                                            struct sk_buff *skb)
 {
        return !skb_csum_unnecessary(skb) &&
-               __tcp_checksum_complete_user(sk, skb);
+              __tcp_checksum_complete_user(sk, skb);
 }
 
 #ifdef CONFIG_NET_DMA
-static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen)
+static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
+                                 int hlen)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int chunk = skb->len - hlen;
@@ -4346,12 +5030,14 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
                return 0;
 
        if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-               tp->ucopy.dma_chan = get_softnet_dma();
+               tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 
        if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 
                dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
-                       skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list);
+                                                        skb, hlen,
+                                                        tp->ucopy.iov, chunk,
+                                                        tp->ucopy.pinned_list);
 
                if (dma_cookie < 0)
                        goto out;
@@ -4378,6 +5064,67 @@ out:
 }
 #endif /* CONFIG_NET_DMA */
 
+/* Does PAWS and seqno based validation of an incoming segment, flags will
+ * play significant role here.
+ */
+static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+                             struct tcphdr *th, int syn_inerr)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       /* RFC1323: H1. Apply PAWS check first. */
+       if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
+           tcp_paws_discard(sk, skb)) {
+               if (!th->rst) {
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+                       tcp_send_dupack(sk, skb);
+                       goto discard;
+               }
+               /* Reset is accepted even if it did not pass PAWS. */
+       }
+
+       /* Step 1: check sequence number */
+       if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
+               /* RFC793, page 37: "In all states except SYN-SENT, all reset
+                * (RST) segments are validated by checking their SEQ-fields."
+                * And page 69: "If an incoming segment is not acceptable,
+                * an acknowledgment should be sent in reply (unless the RST
+                * bit is set, if so drop the segment and return)".
+                */
+               if (!th->rst)
+                       tcp_send_dupack(sk, skb);
+               goto discard;
+       }
+
+       /* Step 2: check RST bit */
+       if (th->rst) {
+               tcp_reset(sk);
+               goto discard;
+       }
+
+       /* ts_recent update must be made after we are sure that the packet
+        * is in window.
+        */
+       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+
+       /* step 3: check security and precedence [ignored] */
+
+       /* step 4: Check for a SYN in window. */
+       if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
+               if (syn_inerr)
+                       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
+               tcp_reset(sk);
+               return -1;
+       }
+
+       return 1;
+
+discard:
+       __kfree_skb(skb);
+       return 0;
+}
+
 /*
  *     TCP receive function for the ESTABLISHED state.
  *
@@ -4405,6 +5152,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                        struct tcphdr *th, unsigned len)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       int res;
 
        /*
         *      Header prediction.
@@ -4433,7 +5181,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
         */
 
        if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
-               TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
+           TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
+           !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
                int tcp_header_len = tp->tcp_header_len;
 
                /* Timestamp header prediction: tcp_header_len
@@ -4443,19 +5192,10 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                /* Check timestamp */
                if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
-                       __be32 *ptr = (__be32 *)(th + 1);
-
                        /* No? Slow path! */
-                       if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
-                                         | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
+                       if (!tcp_parse_aligned_timestamp(tp, th))
                                goto slow_path;
 
-                       tp->rx_opt.saw_tstamp = 1;
-                       ++ptr;
-                       tp->rx_opt.rcv_tsval = ntohl(*ptr);
-                       ++ptr;
-                       tp->rx_opt.rcv_tsecr = ntohl(*ptr);
-
                        /* If PAWS failed, check it more carefully in slow path */
                        if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
                                goto slow_path;
@@ -4487,7 +5227,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                tcp_data_snd_check(sk);
                                return 0;
                        } else { /* Header too small */
-                               TCP_INC_STATS_BH(TCP_MIB_INERRS);
+                               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
                                goto discard;
                        }
                } else {
@@ -4502,7 +5242,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                        eaten = 1;
                                }
 #endif
-                               if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) {
+                               if (tp->ucopy.task == current &&
+                                   sock_owned_by_user(sk) && !copied_early) {
                                        __set_current_state(TASK_RUNNING);
 
                                        if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
@@ -4523,7 +5264,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                                        __skb_pull(skb, tcp_header_len);
                                        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-                                       NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
+                                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
                                }
                                if (copied_early)
                                        tcp_cleanup_rbuf(sk, skb->len);
@@ -4546,12 +5287,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                if ((int)skb->truesize > sk->sk_forward_alloc)
                                        goto step5;
 
-                               NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
                                /* Bulk data transfer: receiver */
-                               __skb_pull(skb,tcp_header_len);
+                               __skb_pull(skb, tcp_header_len);
                                __skb_queue_tail(&sk->sk_receive_queue, skb);
-                               sk_stream_set_owner_r(skb, sk);
+                               skb_set_owner_r(skb, sk);
                                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                        }
 
@@ -4565,7 +5306,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                        goto no_ack;
                        }
 
-                       __tcp_ack_snd_check(sk, 0);
+                       if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
+                               __tcp_ack_snd_check(sk, 0);
 no_ack:
 #ifdef CONFIG_NET_DMA
                        if (copied_early)
@@ -4581,59 +5323,20 @@ no_ack:
        }
 
 slow_path:
-       if (len < (th->doff<<2) || tcp_checksum_complete_user(sk, skb))
+       if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
                goto csum_error;
 
        /*
-        * RFC1323: H1. Apply PAWS check first.
-        */
-       if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
-           tcp_paws_discard(sk, skb)) {
-               if (!th->rst) {
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
-                       tcp_send_dupack(sk, skb);
-                       goto discard;
-               }
-               /* Resets are accepted even if PAWS failed.
-
-                  ts_recent update must be made after we are sure
-                  that the packet is in window.
-                */
-       }
-
-       /*
         *      Standard slow path.
         */
 
-       if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
-               /* RFC793, page 37: "In all states except SYN-SENT, all reset
-                * (RST) segments are validated by checking their SEQ-fields."
-                * And page 69: "If an incoming segment is not acceptable,
-                * an acknowledgment should be sent in reply (unless the RST bit
-                * is set, if so drop the segment and return)".
-                */
-               if (!th->rst)
-                       tcp_send_dupack(sk, skb);
-               goto discard;
-       }
-
-       if (th->rst) {
-               tcp_reset(sk);
-               goto discard;
-       }
-
-       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
-       if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-               TCP_INC_STATS_BH(TCP_MIB_INERRS);
-               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
-               tcp_reset(sk);
-               return 1;
-       }
+       res = tcp_validate_incoming(sk, skb, th, 1);
+       if (res <= 0)
+               return -res;
 
 step5:
-       if (th->ack)
-               tcp_ack(sk, skb, FLAG_SLOWPATH);
+       if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+               goto discard;
 
        tcp_rcv_rtt_measure_ts(sk, skb);
 
@@ -4648,7 +5351,7 @@ step5:
        return 0;
 
 csum_error:
-       TCP_INC_STATS_BH(TCP_MIB_INERRS);
+       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
 discard:
        __kfree_skb(skb);
@@ -4682,7 +5385,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
                    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
                             tcp_time_stamp)) {
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
                        goto reset_and_undo;
                }
 
@@ -4731,7 +5434,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * never scaled.
                 */
                tp->snd_wnd = ntohs(th->window);
-               tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
+               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                if (!tp->rx_opt.wscale_ok) {
                        tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
@@ -4788,7 +5491,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 
                if (!sock_flag(sk, SOCK_DEAD)) {
                        sk->sk_state_change(sk);
-                       sk_wake_async(sk, 0, POLL_OUT);
+                       sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
                }
 
                if (sk->sk_write_pending ||
@@ -4831,7 +5534,8 @@ discard:
        }
 
        /* PAWS check. */
-       if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_check(&tp->rx_opt, 0))
+       if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
+           tcp_paws_reject(&tp->rx_opt, 0))
                goto discard_and_undo;
 
        if (th->syn) {
@@ -4866,7 +5570,6 @@ discard:
                tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
                tcp_initialize_rcv_mss(sk);
 
-
                tcp_send_synack(sk);
 #if 0
                /* Note, we could accept data and URG from this segment.
@@ -4898,7 +5601,6 @@ reset_and_undo:
        return 1;
 }
 
-
 /*
  *     This function implements the receiving procedure of RFC 793 for
  *     all states except ESTABLISHED and TIME_WAIT.
@@ -4912,6 +5614,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        int queued = 0;
+       int res;
 
        tp->rx_opt.saw_tstamp = 0;
 
@@ -4964,46 +5667,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                return 0;
        }
 
-       if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
-           tcp_paws_discard(sk, skb)) {
-               if (!th->rst) {
-                       NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
-                       tcp_send_dupack(sk, skb);
-                       goto discard;
-               }
-               /* Reset is accepted even if it did not pass PAWS. */
-       }
-
-       /* step 1: check sequence number */
-       if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
-               if (!th->rst)
-                       tcp_send_dupack(sk, skb);
-               goto discard;
-       }
-
-       /* step 2: check RST bit */
-       if (th->rst) {
-               tcp_reset(sk);
-               goto discard;
-       }
-
-       tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
-
-       /* step 3: check security and precedence [ignored] */
-
-       /*      step 4:
-        *
-        *      Check for a SYN in window.
-        */
-       if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
-               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
-               tcp_reset(sk);
-               return 1;
-       }
+       res = tcp_validate_incoming(sk, skb, th, 0);
+       if (res <= 0)
+               return -res;
 
        /* step 5: check the ACK field */
        if (th->ack) {
-               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
+               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
 
                switch (sk->sk_state) {
                case TCP_SYN_RECV:
@@ -5018,22 +5688,21 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                 * are not waked up, because sk->sk_sleep ==
                                 * NULL and sk->sk_socket == NULL.
                                 */
-                               if (sk->sk_socket) {
-                                       sk_wake_async(sk,0,POLL_OUT);
-                               }
+                               if (sk->sk_socket)
+                                       sk_wake_async(sk,
+                                                     SOCK_WAKE_IO, POLL_OUT);
 
                                tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
                                tp->snd_wnd = ntohs(th->window) <<
                                              tp->rx_opt.snd_wscale;
-                               tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq,
-                                           TCP_SKB_CB(skb)->seq);
+                               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                                /* tcp_ack considers this ACK as duplicate
                                 * and does not calculate rtt.
                                 * Fix it at least with timestamps.
                                 */
-                               if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
-                                   !tp->srtt)
+                               if (tp->rx_opt.saw_tstamp &&
+                                   tp->rx_opt.rcv_tsecr && !tp->srtt)
                                        tcp_ack_saw_tstamp(sk, 0);
 
                                if (tp->rx_opt.tstamp_ok)
@@ -5078,7 +5747,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                            (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
                                             after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
                                                tcp_done(sk);
-                                               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+                                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
                                                return 1;
                                        }
 
@@ -5138,7 +5807,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                if (sk->sk_shutdown & RCV_SHUTDOWN) {
                        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
                            after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
-                               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
                                tcp_reset(sk);
                                return 1;
                        }
@@ -5165,7 +5834,11 @@ discard:
 
 EXPORT_SYMBOL(sysctl_tcp_ecn);
 EXPORT_SYMBOL(sysctl_tcp_reordering);
+EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 EXPORT_SYMBOL(tcp_parse_options);
+#ifdef CONFIG_TCP_MD5SIG
+EXPORT_SYMBOL(tcp_parse_md5sig_option);
+#endif
 EXPORT_SYMBOL(tcp_rcv_established);
 EXPORT_SYMBOL(tcp_rcv_state_process);
 EXPORT_SYMBOL(tcp_initialize_rcv_mss);