[PKT_SCHED]: Some typo fixes in net/sched/Kconfig
[safe/jmp/linux-2.6] / net / ipv4 / tcp_input.c
index 9f0cca4..fec8a7a 100644 (file)
@@ -50,9 +50,9 @@
  *             Andi Kleen:             Make sure we never ack data there is not
  *                                     enough room for. Also make this condition
  *                                     a fatal error if it might still happen.
- *             Andi Kleen:             Add tcp_measure_rcv_mss to make 
+ *             Andi Kleen:             Add tcp_measure_rcv_mss to make
  *                                     connections with MSS<min(MTU,ann. MSS)
- *                                     work without delayed acks. 
+ *                                     work without delayed acks.
  *             Andi Kleen:             Process packets with PSH set in the
  *                                     fast path.
  *             J Hadi Salim:           ECN support
@@ -63,7 +63,6 @@
  *             Pasi Sarolahti:         F-RTO for dealing with spurious RTOs
  */
 
-#include <linux/config.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
 #include <net/inet_common.h>
 #include <linux/ipsec.h>
 #include <asm/unaligned.h>
-
-int sysctl_tcp_timestamps = 1;
-int sysctl_tcp_window_scaling = 1;
-int sysctl_tcp_sack = 1;
-int sysctl_tcp_fack = 1;
-int sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
-int sysctl_tcp_ecn;
-int sysctl_tcp_dsack = 1;
-int sysctl_tcp_app_win = 31;
-int sysctl_tcp_adv_win_scale = 2;
-
-int sysctl_tcp_stdurg;
-int sysctl_tcp_rfc1337;
-int sysctl_tcp_max_orphans = NR_FILE;
-int sysctl_tcp_frto;
-int sysctl_tcp_nometrics_save;
-
-int sysctl_tcp_moderate_rcvbuf = 1;
-int sysctl_tcp_abc = 1;
+#include <net/netdma.h>
+
+int sysctl_tcp_timestamps __read_mostly = 1;
+int sysctl_tcp_window_scaling __read_mostly = 1;
+int sysctl_tcp_sack __read_mostly = 1;
+int sysctl_tcp_fack __read_mostly = 1;
+int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
+int sysctl_tcp_ecn __read_mostly;
+int sysctl_tcp_dsack __read_mostly = 1;
+int sysctl_tcp_app_win __read_mostly = 31;
+int sysctl_tcp_adv_win_scale __read_mostly = 2;
+
+int sysctl_tcp_stdurg __read_mostly;
+int sysctl_tcp_rfc1337 __read_mostly;
+int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
+int sysctl_tcp_frto __read_mostly;
+int sysctl_tcp_frto_response __read_mostly;
+int sysctl_tcp_nometrics_save __read_mostly;
+
+int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
+int sysctl_tcp_abc __read_mostly;
 
 #define FLAG_DATA              0x01 /* Incoming frame contained data.          */
 #define FLAG_WIN_UPDATE                0x02 /* Incoming ACK was a window update.       */
@@ -100,6 +101,7 @@ int sysctl_tcp_abc = 1;
 #define FLAG_ECE               0x40 /* ECE in this ACK                         */
 #define FLAG_DATA_LOST         0x80 /* SACK detected data lossage.             */
 #define FLAG_SLOWPATH          0x100 /* Do not skip RFC checks for window update.*/
+#define FLAG_ONLY_ORIG_SACKED  0x200 /* SACKs only non-rexmit sent before RTO */
 
 #define FLAG_ACKED             (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 #define FLAG_NOT_DUP           (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -110,24 +112,26 @@ int sysctl_tcp_abc = 1;
 #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
 #define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4)
 
+#define IsSackFrto() (sysctl_tcp_frto == 0x2)
+
 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 
-/* Adapt the MSS value used to make delayed ack decision to the 
+/* Adapt the MSS value used to make delayed ack decision to the
  * real world.
- */ 
+ */
 static void tcp_measure_rcv_mss(struct sock *sk,
                                const struct sk_buff *skb)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
-       const unsigned int lss = icsk->icsk_ack.last_seg_size; 
+       const unsigned int lss = icsk->icsk_ack.last_seg_size;
        unsigned int len;
 
-       icsk->icsk_ack.last_seg_size = 0; 
+       icsk->icsk_ack.last_seg_size = 0;
 
        /* skb->len may jitter because of SACKs, even if peer
         * sends good full-sized frames.
         */
-       len = skb->len;
+       len = skb_shinfo(skb)->gso_size ?: skb->len;
        if (len >= icsk->icsk_ack.rcv_mss) {
                icsk->icsk_ack.rcv_mss = len;
        } else {
@@ -136,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
                 *
                 * "len" is invariant segment length, including TCP header.
                 */
-               len += skb->data - skb->h.raw;
+               len += skb->data - skb_transport_header(skb);
                if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
                    /* If PSH is not set, packet should be
                     * full sized, provided peer TCP is not badly broken.
@@ -144,7 +148,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
                     * to handle super-low mtu links fairly.
                     */
                    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
-                    !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) {
+                    !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
                        /* Subtract also invariant (if peer is RFC compliant),
                         * tcp header plus fixed timestamp option length.
                         * Resulting "len" is MSS free of SACK jitter.
@@ -156,6 +160,8 @@ static void tcp_measure_rcv_mss(struct sock *sk,
                                return;
                        }
                }
+               if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
+                       icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
                icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
        }
 }
@@ -229,9 +235,9 @@ static void tcp_fixup_sndbuf(struct sock *sk)
  */
 
 /* Slow part of check#2. */
-static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
-                            const struct sk_buff *skb)
+static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        /* Optimize this! */
        int truesize = tcp_win_from_space(skb->truesize)/2;
        int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
@@ -246,9 +252,11 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
        return 0;
 }
 
-static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
+static void tcp_grow_window(struct sock *sk,
                            struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
@@ -261,7 +269,7 @@ static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
                if (tcp_win_from_space(skb->truesize) <= skb->len)
                        incr = 2*tp->advmss;
                else
-                       incr = __tcp_grow_window(sk, tp, skb);
+                       incr = __tcp_grow_window(sk, skb);
 
                if (incr) {
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
@@ -324,8 +332,9 @@ static void tcp_init_buffer_space(struct sock *sk)
 }
 
 /* 5. Recalculate window clamp after socket hit its memory bounds. */
-static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
+static void tcp_clamp_window(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
 
        icsk->icsk_ack.quick = 0;
@@ -438,15 +447,15 @@ void tcp_rcv_space_adjust(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        int time;
        int space;
-       
+
        if (tp->rcvq_space.time == 0)
                goto new_measure;
-       
+
        time = tcp_time_stamp - tp->rcvq_space.time;
        if (time < (tp->rcv_rtt_est.rtt >> 3) ||
            tp->rcv_rtt_est.rtt == 0)
                return;
-       
+
        space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
 
        space = max(tp->rcvq_space.space, space);
@@ -481,7 +490,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
                        }
                }
        }
-       
+
 new_measure:
        tp->rcvq_space.seq = tp->copied_seq;
        tp->rcvq_space.time = tcp_time_stamp;
@@ -497,8 +506,9 @@ new_measure:
  * each ACK we send, he increments snd_cwnd and transmits more of his
  * queue.  -DaveM
  */
-static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
+static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now;
 
@@ -507,7 +517,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
        tcp_measure_rcv_mss(sk, skb);
 
        tcp_rcv_rtt_measure(tp);
-       
+
        now = tcp_time_stamp;
 
        if (!icsk->icsk_ack.ato) {
@@ -539,7 +549,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
        TCP_ECN_check_ce(tp, skb);
 
        if (skb->len >= 128)
-               tcp_grow_window(sk, tp, skb);
+               tcp_grow_window(sk, skb);
 }
 
 /* Called to compute a smoothed rtt estimate. The data fed to this
@@ -559,7 +569,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
        /*      The following amusing code comes from Jacobson's
         *      article in SIGCOMM '88.  Note that rtt and mdev
         *      are scaled versions of rtt and mean deviation.
-        *      This is designed to be as fast as possible 
+        *      This is designed to be as fast as possible
         *      m stands for "measurement".
         *
         *      On a 1990 paper the rto value is changed to:
@@ -572,7 +582,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
         * does not matter how to _calculate_ it. Seems, it was trap
         * that VJ failed to avoid. 8)
         */
-       if(m == 0)
+       if (m == 0)
                m = 1;
        if (tp->srtt != 0) {
                m -= (tp->srtt >> 3);   /* m is now error in rtt est */
@@ -757,15 +767,17 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
 }
 
 /* Set slow start threshold and cwnd not falling to slow start */
-void tcp_enter_cwr(struct sock *sk)
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       const struct inet_connection_sock *icsk = inet_csk(sk);
 
        tp->prior_ssthresh = 0;
        tp->bytes_acked = 0;
-       if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+       if (icsk->icsk_ca_state < TCP_CA_CWR) {
                tp->undo_marker = 0;
-               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+               if (set_ssthresh)
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tp->snd_cwnd = min(tp->snd_cwnd,
                                   tcp_packets_in_flight(tp) + 1U);
                tp->snd_cwnd_cnt = 0;
@@ -932,30 +944,61 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
-       struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2);
+       unsigned char *ptr = (skb_transport_header(ack_skb) +
+                             TCP_SKB_CB(ack_skb)->sacked);
+       struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
+       struct sk_buff *cached_skb;
        int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
        int reord = tp->packets_out;
        int prior_fackets;
        u32 lost_retrans = 0;
        int flag = 0;
-       int dup_sack = 0;
+       int found_dup_sack = 0;
+       int cached_fack_count;
        int i;
+       int first_sack_index;
 
        if (!tp->sacked_out)
                tp->fackets_out = 0;
        prior_fackets = tp->fackets_out;
 
+       /* Check for D-SACK. */
+       if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
+               found_dup_sack = 1;
+               tp->rx_opt.sack_ok |= 4;
+               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+       } else if (num_sacks > 1 &&
+                       !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
+                       !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
+               found_dup_sack = 1;
+               tp->rx_opt.sack_ok |= 4;
+               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+       }
+
+       /* D-SACK for already forgotten data...
+        * Do dumb counting. */
+       if (found_dup_sack &&
+                       !after(ntohl(sp[0].end_seq), prior_snd_una) &&
+                       after(ntohl(sp[0].end_seq), tp->undo_marker))
+               tp->undo_retrans--;
+
+       /* Eliminate too old ACKs, but take into
+        * account more or less fresh ones, they can
+        * contain valid SACK info.
+        */
+       if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
+               return 0;
+
        /* SACK fastpath:
         * if the only SACK change is the increase of the end_seq of
         * the first block then only apply that SACK block
         * and use retrans queue hinting otherwise slowpath */
        flag = 1;
-       for (i = 0; i< num_sacks; i++) {
-               __u32 start_seq = ntohl(sp[i].start_seq);
-               __u32 end_seq =  ntohl(sp[i].end_seq);
+       for (i = 0; i < num_sacks; i++) {
+               __be32 start_seq = sp[i].start_seq;
+               __be32 end_seq = sp[i].end_seq;
 
-               if (i == 0){
+               if (i == 0) {
                        if (tp->recv_sack_cache[i].start_seq != start_seq)
                                flag = 0;
                } else {
@@ -965,39 +1008,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                }
                tp->recv_sack_cache[i].start_seq = start_seq;
                tp->recv_sack_cache[i].end_seq = end_seq;
-
-               /* Check for D-SACK. */
-               if (i == 0) {
-                       u32 ack = TCP_SKB_CB(ack_skb)->ack_seq;
-
-                       if (before(start_seq, ack)) {
-                               dup_sack = 1;
-                               tp->rx_opt.sack_ok |= 4;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
-                       } else if (num_sacks > 1 &&
-                                  !after(end_seq, ntohl(sp[1].end_seq)) &&
-                                  !before(start_seq, ntohl(sp[1].start_seq))) {
-                               dup_sack = 1;
-                               tp->rx_opt.sack_ok |= 4;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
-                       }
-
-                       /* D-SACK for already forgotten data...
-                        * Do dumb counting. */
-                       if (dup_sack &&
-                           !after(end_seq, prior_snd_una) &&
-                           after(end_seq, tp->undo_marker))
-                               tp->undo_retrans--;
-
-                       /* Eliminate too old ACKs, but take into
-                        * account more or less fresh ones, they can
-                        * contain valid SACK info.
-                        */
-                       if (before(ack, prior_snd_una - tp->max_window))
-                               return 0;
-               }
+       }
+       /* Clear the rest of the cache sack blocks so they won't match mistakenly. */
+       for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
+               tp->recv_sack_cache[i].start_seq = 0;
+               tp->recv_sack_cache[i].end_seq = 0;
        }
 
+       first_sack_index = 0;
        if (flag)
                num_sacks = 1;
        else {
@@ -1009,10 +1027,15 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                        for (j = 0; j < i; j++){
                                if (after(ntohl(sp[j].start_seq),
                                          ntohl(sp[j+1].start_seq))){
-                                       sp[j].start_seq = htonl(tp->recv_sack_cache[j+1].start_seq);
-                                       sp[j].end_seq = htonl(tp->recv_sack_cache[j+1].end_seq);
-                                       sp[j+1].start_seq = htonl(tp->recv_sack_cache[j].start_seq);
-                                       sp[j+1].end_seq = htonl(tp->recv_sack_cache[j].end_seq);
+                                       struct tcp_sack_block_wire tmp;
+
+                                       tmp = sp[j];
+                                       sp[j] = sp[j+1];
+                                       sp[j+1] = tmp;
+
+                                       /* Track where the first SACK block goes to */
+                                       if (j == first_sack_index)
+                                               first_sack_index = j+1;
                                }
 
                        }
@@ -1022,31 +1045,41 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        /* clear flag as used for different purpose in following code */
        flag = 0;
 
+       /* Use SACK fastpath hint if valid */
+       cached_skb = tp->fastpath_skb_hint;
+       cached_fack_count = tp->fastpath_cnt_hint;
+       if (!cached_skb) {
+               cached_skb = tcp_write_queue_head(sk);
+               cached_fack_count = 0;
+       }
+
        for (i=0; i<num_sacks; i++, sp++) {
                struct sk_buff *skb;
                __u32 start_seq = ntohl(sp->start_seq);
                __u32 end_seq = ntohl(sp->end_seq);
                int fack_count;
+               int dup_sack = (found_dup_sack && (i == first_sack_index));
 
-               /* Use SACK fastpath hint if valid */
-               if (tp->fastpath_skb_hint) {
-                       skb = tp->fastpath_skb_hint;
-                       fack_count = tp->fastpath_cnt_hint;
-               } else {
-                       skb = sk->sk_write_queue.next;
-                       fack_count = 0;
-               }
+               skb = cached_skb;
+               fack_count = cached_fack_count;
 
                /* Event "B" in the comment above. */
                if (after(end_seq, tp->high_seq))
                        flag |= FLAG_DATA_LOST;
 
-               sk_stream_for_retrans_queue_from(skb, sk) {
+               tcp_for_write_queue_from(skb, sk) {
                        int in_sack, pcount;
                        u8 sacked;
 
-                       tp->fastpath_skb_hint = skb;
-                       tp->fastpath_cnt_hint = fack_count;
+                       if (skb == tcp_send_head(sk))
+                               break;
+
+                       cached_skb = skb;
+                       cached_fack_count = fack_count;
+                       if (i == first_sack_index) {
+                               tp->fastpath_skb_hint = skb;
+                               tp->fastpath_cnt_hint = fack_count;
+                       }
 
                        /* The retransmission queue is always in order, so
                         * we can short-circuit the walk early.
@@ -1072,7 +1105,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                                else
                                        pkt_len = (end_seq -
                                                   TCP_SKB_CB(skb)->seq);
-                               if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
+                               if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
                                        break;
                                pcount = tcp_skb_pcount(skb);
                        }
@@ -1141,6 +1174,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                                                /* clear lost hint */
                                                tp->retransmit_skb_hint = NULL;
                                        }
+                                       /* SACK enhanced F-RTO detection.
+                                        * Set flag if and only if non-rexmitted
+                                        * segments below frto_highmark are
+                                        * SACKed (RFC4138; Appendix B).
+                                        * Clearing correct due to in-order walk
+                                        */
+                                       if (after(end_seq, tp->frto_highmark)) {
+                                               flag &= ~FLAG_ONLY_ORIG_SACKED;
+                                       } else {
+                                               if (!(sacked & TCPCB_RETRANS))
+                                                       flag |= FLAG_ONLY_ORIG_SACKED;
+                                       }
                                }
 
                                TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
@@ -1177,7 +1222,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
                struct sk_buff *skb;
 
-               sk_stream_for_retrans_queue(skb, sk) {
+               tcp_for_write_queue(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
                                break;
                        if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
@@ -1206,7 +1253,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
 
        tp->left_out = tp->sacked_out + tp->lost_out;
 
-       if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss)
+       if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
+           (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
                tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
 
 #if FASTRETRANS_DEBUG > 0
@@ -1218,9 +1266,49 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        return flag;
 }
 
-/* RTO occurred, but do not yet enter loss state. Instead, transmit two new
- * segments to see from the next ACKs whether any data was really missing.
- * If the RTO was spurious, new ACKs should arrive.
+/* F-RTO can only be used if TCP has never retransmitted anything other than
+ * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
+ */
+int tcp_use_frto(struct sock *sk)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (!sysctl_tcp_frto)
+               return 0;
+
+       if (IsSackFrto())
+               return 1;
+
+       /* Avoid expensive walking of rexmit queue if possible */
+       if (tp->retrans_out > 1)
+               return 0;
+
+       skb = tcp_write_queue_head(sk);
+       skb = tcp_write_queue_next(sk, skb);    /* Skips head */
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
+               if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
+                       return 0;
+               /* Short-circuit when first non-SACKed skb has been checked */
+               if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED))
+                       break;
+       }
+       return 1;
+}
+
+/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
+ * recovery a bit and use heuristics in tcp_process_frto() to detect if
+ * the RTO was spurious. Only clear SACKED_RETRANS of the head here to
+ * keep retrans_out counting accurate (with SACK F-RTO, other than head
+ * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS
+ * bits are handled if the Loss state is really to be entered (in
+ * tcp_enter_frto_loss).
+ *
+ * Do like tcp_enter_loss() would; when RTO expires the second time it
+ * does:
+ *  "Reduce ssthresh if it has not yet been made inside this window."
  */
 void tcp_enter_frto(struct sock *sk)
 {
@@ -1228,39 +1316,69 @@ void tcp_enter_frto(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
 
-       tp->frto_counter = 1;
-
-       if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
-            tp->snd_una == tp->high_seq ||
-            (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
+       if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
+           tp->snd_una == tp->high_seq ||
+           ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
+            !icsk->icsk_retransmits)) {
                tp->prior_ssthresh = tcp_current_ssthresh(sk);
-               tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+               /* Our state is too optimistic in ssthresh() call because cwnd
+                * is not reduced until tcp_enter_frto_loss() when previous FRTO
+                * recovery has not yet completed. Pattern would be this: RTO,
+                * Cumulative ACK, RTO (2xRTO for the same segment does not end
+                * up here twice).
+                * RFC4138 should be more specific on what to do, even though
+                * RTO is quite unlikely to occur after the first Cumulative ACK
+                * due to back-off and complexity of triggering events ...
+                */
+               if (tp->frto_counter) {
+                       u32 stored_cwnd;
+                       stored_cwnd = tp->snd_cwnd;
+                       tp->snd_cwnd = 2;
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+                       tp->snd_cwnd = stored_cwnd;
+               } else {
+                       tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+               }
+               /* ... in theory, cong.control module could do "any tricks" in
+                * ssthresh(), which means that ca_state, lost bits and lost_out
+                * counter would have to be faked before the call occurs. We
+                * consider that too expensive, unlikely and hacky, so modules
+                * using these in ssthresh() must deal these incompatibility
+                * issues if they receives CA_EVENT_FRTO and frto_counter != 0
+                */
                tcp_ca_event(sk, CA_EVENT_FRTO);
        }
 
-       /* Have to clear retransmission markers here to keep the bookkeeping
-        * in shape, even though we are not yet in Loss state.
-        * If something was really lost, it is eventually caught up
-        * in tcp_enter_frto_loss.
-        */
-       tp->retrans_out = 0;
        tp->undo_marker = tp->snd_una;
        tp->undo_retrans = 0;
 
-       sk_stream_for_retrans_queue(skb, sk) {
-               TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS;
+       skb = tcp_write_queue_head(sk);
+       if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
+               TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+               tp->retrans_out -= tcp_skb_pcount(skb);
        }
        tcp_sync_left_out(tp);
 
-       tcp_set_ca_state(sk, TCP_CA_Open);
-       tp->frto_highmark = tp->snd_nxt;
+       /* Earlier loss recovery underway (see RFC4138; Appendix B).
+        * The last condition is necessary at least in tp->frto_counter case.
+        */
+       if (IsSackFrto() && (tp->frto_counter ||
+           ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
+           after(tp->high_seq, tp->snd_una)) {
+               tp->frto_highmark = tp->high_seq;
+       } else {
+               tp->frto_highmark = tp->snd_nxt;
+       }
+       tcp_set_ca_state(sk, TCP_CA_Disorder);
+       tp->high_seq = tp->snd_nxt;
+       tp->frto_counter = 1;
 }
 
 /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
  * which indicates that we should follow the traditional RTO recovery,
  * i.e. mark everything lost and do go-back-N retransmission.
  */
-static void tcp_enter_frto_loss(struct sock *sk)
+static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -1269,10 +1387,25 @@ static void tcp_enter_frto_loss(struct sock *sk)
        tp->sacked_out = 0;
        tp->lost_out = 0;
        tp->fackets_out = 0;
+       tp->retrans_out = 0;
 
-       sk_stream_for_retrans_queue(skb, sk) {
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                cnt += tcp_skb_pcount(skb);
-               TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
+               /*
+                * Count the retransmission made on RTO correctly (only when
+                * waiting for the first ACK and did not get it)...
+                */
+               if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) {
+                       /* For some reason this R-bit might get cleared? */
+                       if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
+                               tp->retrans_out += tcp_skb_pcount(skb);
+                       /* ...enter this if branch just for the first segment */
+                       flag |= FLAG_DATA_ACKED;
+               } else {
+                       TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+               }
                if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
 
                        /* Do not mark those segments lost that were
@@ -1290,7 +1423,7 @@ static void tcp_enter_frto_loss(struct sock *sk)
        }
        tcp_sync_left_out(tp);
 
-       tp->snd_cwnd = tp->frto_counter + tcp_packets_in_flight(tp)+1;
+       tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
        tp->snd_cwnd_cnt = 0;
        tp->snd_cwnd_stamp = tcp_time_stamp;
        tp->undo_marker = 0;
@@ -1348,7 +1481,9 @@ void tcp_enter_loss(struct sock *sk, int how)
        if (!how)
                tp->undo_marker = tp->snd_una;
 
-       sk_stream_for_retrans_queue(skb, sk) {
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                cnt += tcp_skb_pcount(skb);
                if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
                        tp->undo_marker = 0;
@@ -1369,6 +1504,8 @@ void tcp_enter_loss(struct sock *sk, int how)
        tcp_set_ca_state(sk, TCP_CA_Loss);
        tp->high_seq = tp->snd_nxt;
        TCP_ECN_queue_cwr(tp);
+       /* Abort FRTO algorithm if one is in progress */
+       tp->frto_counter = 0;
 
        clear_all_retrans_hints(tp);
 }
@@ -1383,14 +1520,14 @@ static int tcp_check_sack_reneging(struct sock *sk)
         * receiver _host_ is heavily congested (or buggy).
         * Do processing similar to RTO timeout.
         */
-       if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
+       if ((skb = tcp_write_queue_head(sk)) != NULL &&
            (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
                struct inet_connection_sock *icsk = inet_csk(sk);
                NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
 
                tcp_enter_loss(sk, 1);
                icsk->icsk_retransmits++;
-               tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
+               tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                          icsk->icsk_rto, TCP_RTO_MAX);
                return 1;
@@ -1408,10 +1545,12 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
        return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
 }
 
-static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
+static inline int tcp_head_timedout(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        return tp->packets_out &&
-              tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
+              tcp_skb_timedout(sk, tcp_write_queue_head(sk));
 }
 
 /* Linux NewReno/SACK/FACK/ECN state machine.
@@ -1507,10 +1646,15 @@ static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
  * Main question: may we further continue forward transmission
  * with the same cwnd?
  */
-static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
+static int tcp_time_to_recover(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        __u32 packets_out;
 
+       /* Do not perform any recovery during FRTO algorithm */
+       if (tp->frto_counter)
+               return 0;
+
        /* Trick#1: The loss is proven. */
        if (tp->lost_out)
                return 1;
@@ -1522,7 +1666,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
        /* Trick#3 : when we use RFC2988 timer restart, fast
         * retransmit can be triggered by timeout of queue head.
         */
-       if (tcp_head_timedout(sk, tp))
+       if (tcp_head_timedout(sk))
                return 1;
 
        /* Trick#4: It is still not OK... But will it be useful to delay
@@ -1531,7 +1675,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp)
        packets_out = tp->packets_out;
        if (packets_out <= tp->reordering &&
            tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
-           !tcp_may_send_now(sk, tp)) {
+           !tcp_may_send_now(sk)) {
                /* We have nothing to send. This connection is limited
                 * either by receiver window or by application.
                 */
@@ -1571,8 +1715,10 @@ static void tcp_add_reno_sack(struct sock *sk)
 
 /* Account for ACK, ACKing some data in Reno Recovery phase. */
 
-static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked)
+static void tcp_remove_reno_sacks(struct sock *sk, int acked)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (acked > 0) {
                /* One ACK acked hole. The rest eat duplicate ACKs. */
                if (acked-1 >= tp->sacked_out)
@@ -1591,9 +1737,10 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
 }
 
 /* Mark head of queue up as lost. */
-static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
+static void tcp_mark_head_lost(struct sock *sk,
                               int packets, u32 high_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        int cnt;
 
@@ -1602,11 +1749,13 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
                skb = tp->lost_skb_hint;
                cnt = tp->lost_cnt_hint;
        } else {
-               skb = sk->sk_write_queue.next;
+               skb = tcp_write_queue_head(sk);
                cnt = 0;
        }
 
-       sk_stream_for_retrans_queue_from(skb, sk) {
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                /* TODO: do this better */
                /* this is not the most efficient way to do this... */
                tp->lost_skb_hint = skb;
@@ -1620,12 +1769,11 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
 
                        /* clear xmit_retransmit_queue hints
                         *  if this is beyond hint */
-                       if(tp->retransmit_skb_hint != NULL &&
-                          before(TCP_SKB_CB(skb)->seq,
-                                 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) {
-
+                       if (tp->retransmit_skb_hint != NULL &&
+                           before(TCP_SKB_CB(skb)->seq,
+                                  TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
                                tp->retransmit_skb_hint = NULL;
-                       }
+
                }
        }
        tcp_sync_left_out(tp);
@@ -1633,15 +1781,17 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
 
 /* Account newly detected lost packet(s) */
 
-static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
+static void tcp_update_scoreboard(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (IsFack(tp)) {
                int lost = tp->fackets_out - tp->reordering;
                if (lost <= 0)
                        lost = 1;
-               tcp_mark_head_lost(sk, tp, lost, tp->high_seq);
+               tcp_mark_head_lost(sk, lost, tp->high_seq);
        } else {
-               tcp_mark_head_lost(sk, tp, 1, tp->high_seq);
+               tcp_mark_head_lost(sk, 1, tp->high_seq);
        }
 
        /* New heuristics: it is possible only after we switched
@@ -1649,13 +1799,15 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
         * Hence, we can detect timed out packets during fast
         * retransmit without falling to slow start.
         */
-       if (tcp_head_timedout(sk, tp)) {
+       if (!IsReno(tp) && tcp_head_timedout(sk)) {
                struct sk_buff *skb;
 
                skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
-                       : sk->sk_write_queue.next;
+                       : tcp_write_queue_head(sk);
 
-               sk_stream_for_retrans_queue_from(skb, sk) {
+               tcp_for_write_queue_from(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        if (!tcp_skb_timedout(sk, skb))
                                break;
 
@@ -1688,17 +1840,26 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
+/* Lower bound on congestion window is slow start threshold
+ * unless congestion avoidance choice decides to overide it.
+ */
+static inline u32 tcp_cwnd_min(const struct sock *sk)
+{
+       const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
+
+       return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
+}
+
 /* Decrease cwnd each second ack. */
 static void tcp_cwnd_down(struct sock *sk)
 {
-       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int decr = tp->snd_cwnd_cnt + 1;
 
        tp->snd_cwnd_cnt = decr&1;
        decr >>= 1;
 
-       if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk))
+       if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
                tp->snd_cwnd -= decr;
 
        tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
@@ -1718,9 +1879,11 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp)
 /* Undo procedures. */
 
 #if FASTRETRANS_DEBUG > 1
-static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg)
+static void DBGUNDO(struct sock *sk, const char *msg)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
+
        printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
               msg,
               NIPQUAD(inet->daddr), ntohs(inet->dport),
@@ -1766,13 +1929,15 @@ static inline int tcp_may_undo(struct tcp_sock *tp)
 }
 
 /* People celebrate: "We love our President!" */
-static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
+static int tcp_try_undo_recovery(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tcp_may_undo(tp)) {
                /* Happy end! We did not retransmit anything
                 * or our original transmission succeeded.
                 */
-               DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
+               DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
                tcp_undo_cwr(sk, 1);
                if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
                        NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
@@ -1792,10 +1957,12 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp)
 }
 
 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
-static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
+static void tcp_try_undo_dsack(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tp->undo_marker && !tp->undo_retrans) {
-               DBGUNDO(sk, tp, "D-SACK");
+               DBGUNDO(sk, "D-SACK");
                tcp_undo_cwr(sk, 1);
                tp->undo_marker = 0;
                NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
@@ -1804,9 +1971,9 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp)
 
 /* Undo during fast recovery after partial ACK. */
 
-static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
-                               int acked)
+static int tcp_try_undo_partial(struct sock *sk, int acked)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        /* Partial ACK arrived. Force Hoe's retransmit. */
        int failed = IsReno(tp) || tp->fackets_out>tp->reordering;
 
@@ -1819,7 +1986,7 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
 
                tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
 
-               DBGUNDO(sk, tp, "Hoe");
+               DBGUNDO(sk, "Hoe");
                tcp_undo_cwr(sk, 0);
                NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
 
@@ -1833,17 +2000,21 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp,
 }
 
 /* Undo during loss recovery after partial ACK. */
-static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
+static int tcp_try_undo_loss(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (tcp_may_undo(tp)) {
                struct sk_buff *skb;
-               sk_stream_for_retrans_queue(skb, sk) {
+               tcp_for_write_queue(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
                }
 
                clear_all_retrans_hints(tp);
 
-               DBGUNDO(sk, tp, "partial loss");
+               DBGUNDO(sk, "partial loss");
                tp->lost_out = 0;
                tp->left_out = tp->sacked_out;
                tcp_undo_cwr(sk, 1);
@@ -1865,15 +2036,17 @@ static inline void tcp_complete_cwr(struct sock *sk)
        tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
 
-static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
+static void tcp_try_to_open(struct sock *sk, int flag)
 {
-       tp->left_out = tp->sacked_out;
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tcp_sync_left_out(tp);
 
        if (tp->retrans_out == 0)
                tp->retrans_stamp = 0;
 
        if (flag&FLAG_ECE)
-               tcp_enter_cwr(sk);
+               tcp_enter_cwr(sk, 1);
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                int state = TCP_CA_Open;
@@ -1942,11 +2115,11 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
         * 1. Reno does not count dupacks (sacked_out) automatically. */
        if (!tp->packets_out)
                tp->sacked_out = 0;
-        /* 2. SACK counts snd_fack in packets inaccurately. */
+       /* 2. SACK counts snd_fack in packets inaccurately. */
        if (tp->sacked_out == 0)
                tp->fackets_out = 0;
 
-        /* Now state machine starts.
+       /* Now state machine starts.
         * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
        if (flag&FLAG_ECE)
                tp->prior_ssthresh = 0;
@@ -1960,7 +2133,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
            before(tp->snd_una, tp->high_seq) &&
            icsk->icsk_ca_state != TCP_CA_Open &&
            tp->fackets_out > tp->reordering) {
-               tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
+               tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, tp->high_seq);
                NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
        }
 
@@ -1970,14 +2143,13 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
        /* E. Check state exit conditions. State can be terminated
         *    when high_seq is ACKed. */
        if (icsk->icsk_ca_state == TCP_CA_Open) {
-               if (!sysctl_tcp_frto)
-                       BUG_TRAP(tp->retrans_out == 0);
+               BUG_TRAP(tp->retrans_out == 0);
                tp->retrans_stamp = 0;
        } else if (!before(tp->snd_una, tp->high_seq)) {
                switch (icsk->icsk_ca_state) {
                case TCP_CA_Loss:
                        icsk->icsk_retransmits = 0;
-                       if (tcp_try_undo_recovery(sk, tp))
+                       if (tcp_try_undo_recovery(sk))
                                return;
                        break;
 
@@ -1991,7 +2163,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                        break;
 
                case TCP_CA_Disorder:
-                       tcp_try_undo_dsack(sk, tp);
+                       tcp_try_undo_dsack(sk);
                        if (!tp->undo_marker ||
                            /* For SACK case do not Open to allow to undo
                             * catching for all duplicate ACKs. */
@@ -2004,7 +2176,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                case TCP_CA_Recovery:
                        if (IsReno(tp))
                                tcp_reset_reno_sack(tp);
-                       if (tcp_try_undo_recovery(sk, tp))
+                       if (tcp_try_undo_recovery(sk))
                                return;
                        tcp_complete_cwr(sk);
                        break;
@@ -2020,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                } else {
                        int acked = prior_packets - tp->packets_out;
                        if (IsReno(tp))
-                               tcp_remove_reno_sacks(sk, tp, acked);
-                       is_dupack = tcp_try_undo_partial(sk, tp, acked);
+                               tcp_remove_reno_sacks(sk, acked);
+                       is_dupack = tcp_try_undo_partial(sk, acked);
                }
                break;
        case TCP_CA_Loss:
                if (flag&FLAG_DATA_ACKED)
                        icsk->icsk_retransmits = 0;
-               if (!tcp_try_undo_loss(sk, tp)) {
+               if (!tcp_try_undo_loss(sk)) {
                        tcp_moderate_cwnd(tp);
                        tcp_xmit_retransmit_queue(sk);
                        return;
@@ -2044,10 +2216,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                }
 
                if (icsk->icsk_ca_state == TCP_CA_Disorder)
-                       tcp_try_undo_dsack(sk, tp);
+                       tcp_try_undo_dsack(sk);
 
-               if (!tcp_time_to_recover(sk, tp)) {
-                       tcp_try_to_open(sk, tp, flag);
+               if (!tcp_time_to_recover(sk)) {
+                       tcp_try_to_open(sk, flag);
                        return;
                }
 
@@ -2086,8 +2258,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                tcp_set_ca_state(sk, TCP_CA_Recovery);
        }
 
-       if (is_dupack || tcp_head_timedout(sk, tp))
-               tcp_update_scoreboard(sk, tp);
+       if (is_dupack || tcp_head_timedout(sk))
+               tcp_update_scoreboard(sk);
        tcp_cwnd_down(sk);
        tcp_xmit_retransmit_queue(sk);
 }
@@ -2151,11 +2323,11 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
                tcp_ack_no_tstamp(sk, seq_rtt, flag);
 }
 
-static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void tcp_cong_avoid(struct sock *sk, u32 ack,
                           u32 in_flight, int good)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
+       icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good);
        tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -2163,8 +2335,10 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
  * RFC2988 recommends to restart timer to now+rto.
  */
 
-static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
+static void tcp_ack_packets_out(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (!tp->packets_out) {
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
        } else {
@@ -2176,7 +2350,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
                         __u32 now, __s32 *seq_rtt)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 
+       struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
        __u32 seq = tp->snd_una;
        __u32 packets_acked;
        int acked = 0;
@@ -2228,15 +2402,6 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
        return acked;
 }
 
-static u32 tcp_usrtt(const struct sk_buff *skb)
-{
-       struct timeval tv, now;
-
-       do_gettimeofday(&now);
-       skb_get_timestamp(skb, &tv);
-       return (now.tv_sec - tv.tv_sec) * 1000000 + (now.tv_usec - tv.tv_usec);
-}
-
 /* Remove acknowledged frames from the retransmission queue. */
 static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
 {
@@ -2245,14 +2410,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
        struct sk_buff *skb;
        __u32 now = tcp_time_stamp;
        int acked = 0;
+       int prior_packets = tp->packets_out;
        __s32 seq_rtt = -1;
-       u32 pkts_acked = 0;
-       void (*rtt_sample)(struct sock *sk, u32 usrtt)
-               = icsk->icsk_ca_ops->rtt_sample;
+       ktime_t last_ackt = net_invalid_timestamp();
 
-       while ((skb = skb_peek(&sk->sk_write_queue)) &&
-              skb != sk->sk_send_head) {
-               struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 
+       while ((skb = tcp_write_queue_head(sk)) &&
+              skb != tcp_send_head(sk)) {
+               struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
                __u8 sacked = scb->sacked;
 
                /* If our packet is before the ack sequence we can
@@ -2276,7 +2440,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                 */
                if (!(scb->flags & TCPCB_FLAG_SYN)) {
                        acked |= FLAG_DATA_ACKED;
-                       ++pkts_acked;
                } else {
                        acked |= FLAG_SYN_ACKED;
                        tp->retrans_stamp = 0;
@@ -2291,14 +2454,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
 
                if (sacked) {
                        if (sacked & TCPCB_RETRANS) {
-                               if(sacked & TCPCB_SACKED_RETRANS)
+                               if (sacked & TCPCB_SACKED_RETRANS)
                                        tp->retrans_out -= tcp_skb_pcount(skb);
                                acked |= FLAG_RETRANS_DATA_ACKED;
                                seq_rtt = -1;
                        } else if (seq_rtt < 0) {
                                seq_rtt = now - scb->when;
-                               if (rtt_sample)
-                                       (*rtt_sample)(sk, tcp_usrtt(skb));
+                               last_ackt = skb->tstamp;
                        }
                        if (sacked & TCPCB_SACKED_ACKED)
                                tp->sacked_out -= tcp_skb_pcount(skb);
@@ -2311,22 +2473,29 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                        }
                } else if (seq_rtt < 0) {
                        seq_rtt = now - scb->when;
-                       if (rtt_sample)
-                               (*rtt_sample)(sk, tcp_usrtt(skb));
+                       last_ackt = skb->tstamp;
                }
                tcp_dec_pcount_approx(&tp->fackets_out, skb);
                tcp_packets_out_dec(tp, skb);
-               __skb_unlink(skb, &sk->sk_write_queue);
+               tcp_unlink_write_queue(skb, sk);
                sk_stream_free_skb(sk, skb);
                clear_all_retrans_hints(tp);
        }
 
        if (acked&FLAG_ACKED) {
+               u32 pkts_acked = prior_packets - tp->packets_out;
+               const struct tcp_congestion_ops *ca_ops
+                       = inet_csk(sk)->icsk_ca_ops;
+
                tcp_ack_update_rtt(sk, acked, seq_rtt);
-               tcp_ack_packets_out(sk, tp);
+               tcp_ack_packets_out(sk);
 
-               if (icsk->icsk_ca_ops->pkts_acked)
-                       icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
+               /* Is the ACK triggering packet unambiguous? */
+               if (acked & FLAG_RETRANS_DATA_ACKED)
+                       last_ackt = net_invalid_timestamp();
+
+               if (ca_ops->pkts_acked)
+                       ca_ops->pkts_acked(sk, pkts_acked, last_ackt);
        }
 
 #if FASTRETRANS_DEBUG > 0
@@ -2363,7 +2532,7 @@ static void tcp_ack_probe(struct sock *sk)
 
        /* Was it a usable window open? */
 
-       if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
+       if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
                   tp->snd_una + tp->snd_wnd)) {
                icsk->icsk_backoff = 0;
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
@@ -2406,13 +2575,14 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack
  * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
  * and in FreeBSD. NetBSD's one is even worse.) is wrong.
  */
-static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
-                                struct sk_buff *skb, u32 ack, u32 ack_seq)
+static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
+                                u32 ack_seq)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        int flag = 0;
-       u32 nwin = ntohs(skb->h.th->window);
+       u32 nwin = ntohs(tcp_hdr(skb)->window);
 
-       if (likely(!skb->h.th->syn))
+       if (likely(!tcp_hdr(skb)->syn))
                nwin <<= tp->rx_opt.snd_wscale;
 
        if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
@@ -2426,7 +2596,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
                         * fast path is recovered for sending TCP.
                         */
                        tp->pred_flags = 0;
-                       tcp_fast_path_check(sk, tp);
+                       tcp_fast_path_check(sk);
 
                        if (nwin > tp->max_window) {
                                tp->max_window = nwin;
@@ -2440,39 +2610,140 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
        return flag;
 }
 
-static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
+/* A very conservative spurious RTO response algorithm: reduce cwnd and
+ * continue in congestion avoidance.
+ */
+static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
+{
+       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+       tp->snd_cwnd_cnt = 0;
+       TCP_ECN_queue_cwr(tp);
+       tcp_moderate_cwnd(tp);
+}
+
+/* A conservative spurious RTO response algorithm: reduce cwnd using
+ * rate halving and continue in congestion avoidance.
+ */
+static void tcp_ratehalving_spur_to_response(struct sock *sk)
+{
+       tcp_enter_cwr(sk, 0);
+}
+
+static void tcp_undo_spur_to_response(struct sock *sk, int flag)
+{
+       if (flag&FLAG_ECE)
+               tcp_ratehalving_spur_to_response(sk);
+       else
+               tcp_undo_cwr(sk, 1);
+}
+
+/* F-RTO spurious RTO detection algorithm (RFC4138)
+ *
+ * F-RTO affects during two new ACKs following RTO (well, almost, see inline
+ * comments). State (ACK number) is kept in frto_counter. When ACK advances
+ * window (but not to or beyond highest sequence sent before RTO):
+ *   On First ACK,  send two new segments out.
+ *   On Second ACK, RTO was likely spurious. Do spurious response (response
+ *                  algorithm is not part of the F-RTO detection algorithm
+ *                  given in RFC4138 but can be selected separately).
+ * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss
+ * and TCP falls back to conventional RTO recovery. F-RTO allows overriding
+ * of Nagle, this is done using frto_counter states 2 and 3, when a new data
+ * segment of any size sent during F-RTO, state 2 is upgraded to 3.
+ *
+ * Rationale: if the RTO was spurious, new ACKs should arrive from the
+ * original window even after we transmit two new data segments.
+ *
+ * SACK version:
+ *   on first step, wait until first cumulative ACK arrives, then move to
+ *   the second step. In second step, the next ACK decides.
+ *
+ * F-RTO is implemented (mainly) in four functions:
+ *   - tcp_use_frto() is used to determine if TCP is can use F-RTO
+ *   - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is
+ *     called when tcp_use_frto() showed green light
+ *   - tcp_process_frto() handles incoming ACKs during F-RTO algorithm
+ *   - tcp_enter_frto_loss() is called if there is not enough evidence
+ *     to prove that the RTO is indeed spurious. It transfers the control
+ *     from F-RTO to the conventional RTO recovery
+ */
+static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       
+
        tcp_sync_left_out(tp);
-       
-       if (tp->snd_una == prior_snd_una ||
-           !before(tp->snd_una, tp->frto_highmark)) {
-               /* RTO was caused by loss, start retransmitting in
-                * go-back-N slow start
+
+       /* Duplicate the behavior from Loss state (fastretrans_alert) */
+       if (flag&FLAG_DATA_ACKED)
+               inet_csk(sk)->icsk_retransmits = 0;
+
+       if (!before(tp->snd_una, tp->frto_highmark)) {
+               tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
+               return 1;
+       }
+
+       if (!IsSackFrto() || IsReno(tp)) {
+               /* RFC4138 shortcoming in step 2; should also have case c):
+                * ACK isn't duplicate nor advances window, e.g., opposite dir
+                * data, winupdate
                 */
-               tcp_enter_frto_loss(sk);
-               return;
+               if ((tp->snd_una == prior_snd_una) && (flag&FLAG_NOT_DUP) &&
+                   !(flag&FLAG_FORWARD_PROGRESS))
+                       return 1;
+
+               if (!(flag&FLAG_DATA_ACKED)) {
+                       tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
+                                           flag);
+                       return 1;
+               }
+       } else {
+               if (!(flag&FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+                       /* Prevent sending of new data. */
+                       tp->snd_cwnd = min(tp->snd_cwnd,
+                                          tcp_packets_in_flight(tp));
+                       return 1;
+               }
+
+               if ((tp->frto_counter >= 2) &&
+                   (!(flag&FLAG_FORWARD_PROGRESS) ||
+                    ((flag&FLAG_DATA_SACKED) && !(flag&FLAG_ONLY_ORIG_SACKED)))) {
+                       /* RFC4138 shortcoming (see comment above) */
+                       if (!(flag&FLAG_FORWARD_PROGRESS) && (flag&FLAG_NOT_DUP))
+                               return 1;
+
+                       tcp_enter_frto_loss(sk, 3, flag);
+                       return 1;
+               }
        }
 
        if (tp->frto_counter == 1) {
-               /* First ACK after RTO advances the window: allow two new
-                * segments out.
-                */
+               /* Sending of the next skb must be allowed or no FRTO */
+               if (!tcp_send_head(sk) ||
+                   after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
+                                    tp->snd_una + tp->snd_wnd)) {
+                       tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3),
+                                           flag);
+                       return 1;
+               }
+
                tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
+               tp->frto_counter = 2;
+               return 1;
        } else {
-               /* Also the second ACK after RTO advances the window.
-                * The RTO was likely spurious. Reduce cwnd and continue
-                * in congestion avoidance
-                */
-               tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
-               tcp_moderate_cwnd(tp);
+               switch (sysctl_tcp_frto_response) {
+               case 2:
+                       tcp_undo_spur_to_response(sk, flag);
+                       break;
+               case 1:
+                       tcp_conservative_spur_to_response(tp);
+                       break;
+               default:
+                       tcp_ratehalving_spur_to_response(sk);
+                       break;
+               }
+               tp->frto_counter = 0;
        }
-
-       /* F-RTO affects on two new ACKs following RTO.
-        * At latest on third ACK the TCP behavior is back to normal.
-        */
-       tp->frto_counter = (tp->frto_counter + 1) % 3;
+       return 0;
 }
 
 /* This routine deals with incoming acks, but not outgoing ones. */
@@ -2486,6 +2757,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        u32 prior_in_flight;
        s32 seq_rtt;
        int prior_packets;
+       int frto_cwnd = 0;
 
        /* If the ack is newer than sent or older than previous acks
         * then we can probably ignore it.
@@ -2496,8 +2768,13 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        if (before(ack, prior_snd_una))
                goto old_ack;
 
-       if (sysctl_tcp_abc && icsk->icsk_ca_state < TCP_CA_CWR)
-               tp->bytes_acked += ack - prior_snd_una;
+       if (sysctl_tcp_abc) {
+               if (icsk->icsk_ca_state < TCP_CA_CWR)
+                       tp->bytes_acked += ack - prior_snd_una;
+               else if (icsk->icsk_ca_state == TCP_CA_Loss)
+                       /* we assume just one segment left network */
+                       tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
+       }
 
        if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
                /* Window is constant, pure forward advance.
@@ -2517,12 +2794,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
                else
                        NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
 
-               flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
+               flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
 
                if (TCP_SKB_CB(skb)->sacked)
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
 
-               if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
+               if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
                        flag |= FLAG_ECE;
 
                tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
@@ -2543,16 +2820,17 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
 
        if (tp->frto_counter)
-               tcp_process_frto(sk, prior_snd_una);
+               frto_cwnd = tcp_process_frto(sk, prior_snd_una, flag);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                /* Advance CWND, if state allows this. */
-               if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
-                       tcp_cong_avoid(sk, ack,  seq_rtt, prior_in_flight, 0);
+               if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
+                   tcp_may_raise_cwnd(sk, flag))
+                       tcp_cong_avoid(sk, ack, prior_in_flight, 0);
                tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
        } else {
-               if ((flag & FLAG_DATA_ACKED))
-                       tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
+               if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
+                       tcp_cong_avoid(sk, ack, prior_in_flight, 1);
        }
 
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
@@ -2567,7 +2845,7 @@ no_queue:
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
         */
-       if (sk->sk_send_head)
+       if (tcp_send_head(sk))
                tcp_ack_probe(sk);
        return 1;
 
@@ -2588,14 +2866,14 @@ uninteresting_ack:
 void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
 {
        unsigned char *ptr;
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        int length=(th->doff*4)-sizeof(struct tcphdr);
 
        ptr = (unsigned char *)(th + 1);
        opt_rx->saw_tstamp = 0;
 
-       while(length>0) {
-               int opcode=*ptr++;
+       while (length > 0) {
+               int opcode=*ptr++;
                int opsize;
 
                switch (opcode) {
@@ -2610,10 +2888,10 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        return;
                                if (opsize > length)
                                        return; /* don't parse partial options */
-                               switch(opcode) {
+                               switch (opcode) {
                                case TCPOPT_MSS:
-                                       if(opsize==TCPOLEN_MSS && th->syn && !estab) {
-                                               u16 in_mss = ntohs(get_unaligned((__u16 *)ptr));
+                                       if (opsize==TCPOLEN_MSS && th->syn && !estab) {
+                                               u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
                                                if (in_mss) {
                                                        if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
                                                                in_mss = opt_rx->user_mss;
@@ -2622,12 +2900,12 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        }
                                        break;
                                case TCPOPT_WINDOW:
-                                       if(opsize==TCPOLEN_WINDOW && th->syn && !estab)
+                                       if (opsize==TCPOLEN_WINDOW && th->syn && !estab)
                                                if (sysctl_tcp_window_scaling) {
                                                        __u8 snd_wscale = *(__u8 *) ptr;
                                                        opt_rx->wscale_ok = 1;
                                                        if (snd_wscale > 14) {
-                                                               if(net_ratelimit())
+                                                               if (net_ratelimit())
                                                                        printk(KERN_INFO "tcp_parse_options: Illegal window "
                                                                               "scaling value %d >14 received.\n",
                                                                               snd_wscale);
@@ -2637,17 +2915,17 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                                }
                                        break;
                                case TCPOPT_TIMESTAMP:
-                                       if(opsize==TCPOLEN_TIMESTAMP) {
+                                       if (opsize==TCPOLEN_TIMESTAMP) {
                                                if ((estab && opt_rx->tstamp_ok) ||
                                                    (!estab && sysctl_tcp_timestamps)) {
                                                        opt_rx->saw_tstamp = 1;
-                                                       opt_rx->rcv_tsval = ntohl(get_unaligned((__u32 *)ptr));
-                                                       opt_rx->rcv_tsecr = ntohl(get_unaligned((__u32 *)(ptr+4)));
+                                                       opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr));
+                                                       opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4)));
                                                }
                                        }
                                        break;
                                case TCPOPT_SACK_PERM:
-                                       if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
+                                       if (opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
                                                if (sysctl_tcp_sack) {
                                                        opt_rx->sack_ok = 1;
                                                        tcp_sack_reset(opt_rx);
@@ -2656,15 +2934,25 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        break;
 
                                case TCPOPT_SACK:
-                                       if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
+                                       if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
                                           !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
                                           opt_rx->sack_ok) {
                                                TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
                                        }
-                               };
-                               ptr+=opsize-2;
-                               length-=opsize;
-               };
+                                       break;
+#ifdef CONFIG_TCP_MD5SIG
+                               case TCPOPT_MD5SIG:
+                                       /*
+                                        * The MD5 Hash has already been
+                                        * checked (see tcp_v{4,6}_do_rcv()).
+                                        */
+                                       break;
+#endif
+                               }
+
+                               ptr+=opsize-2;
+                               length-=opsize;
+               }
        }
 }
 
@@ -2679,8 +2967,8 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
                return 0;
        } else if (tp->rx_opt.tstamp_ok &&
                   th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
-               __u32 *ptr = (__u32 *)(th + 1);
-               if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
+               __be32 *ptr = (__be32 *)(th + 1);
+               if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
                                  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
                        tp->rx_opt.saw_tstamp = 1;
                        ++ptr;
@@ -2697,7 +2985,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
 static inline void tcp_store_ts_recent(struct tcp_sock *tp)
 {
        tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
-       tp->rx_opt.ts_recent_stamp = xtime.tv_sec;
+       tp->rx_opt.ts_recent_stamp = get_seconds();
 }
 
 static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
@@ -2710,8 +2998,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
                 * Not only, also it occurs for expired timestamps.
                 */
 
-               if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
-                  xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
+               if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
+                  get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
                        tcp_store_ts_recent(tp);
        }
 }
@@ -2742,7 +3030,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        u32 seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
 
@@ -2763,7 +3051,7 @@ static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
-               xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
+               get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
                !tcp_disordered_ack(sk, skb));
 }
 
@@ -2870,7 +3158,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
                        printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
                               __FUNCTION__, sk->sk_state);
                        break;
-       };
+       }
 
        /* It _is_ possible, that we have something out-of-order _after_ FIN.
         * Probably, we should reset in this case. For now drop them.
@@ -2969,7 +3257,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
                         */
                        tp->rx_opt.num_sacks--;
                        tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
-                       for(i=this_sack; i < tp->rx_opt.num_sacks; i++)
+                       for (i=this_sack; i < tp->rx_opt.num_sacks; i++)
                                sp[i] = sp[i+1];
                        continue;
                }
@@ -3022,7 +3310,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
                tp->rx_opt.num_sacks--;
                sp--;
        }
-       for(; this_sack > 0; this_sack--, sp--)
+       for (; this_sack > 0; this_sack--, sp--)
                *sp = *(sp-1);
 
 new_sack:
@@ -3048,7 +3336,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
                return;
        }
 
-       for(this_sack = 0; this_sack < num_sacks; ) {
+       for (this_sack = 0; this_sack < num_sacks; ) {
                /* Check if the start of the sack is covered by RCV.NXT. */
                if (!before(tp->rcv_nxt, sp->start_seq)) {
                        int i;
@@ -3104,8 +3392,8 @@ static void tcp_ofo_queue(struct sock *sk)
                __skb_unlink(skb, &tp->out_of_order_queue);
                __skb_queue_tail(&sk->sk_receive_queue, skb);
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               if(skb->h.th->fin)
-                       tcp_fin(skb, sk, skb->h.th);
+               if (tcp_hdr(skb)->fin)
+                       tcp_fin(skb, sk, tcp_hdr(skb));
        }
 }
 
@@ -3113,7 +3401,7 @@ static int tcp_prune_queue(struct sock *sk);
 
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct tcp_sock *tp = tcp_sk(sk);
        int eaten = -1;
 
@@ -3170,9 +3458,9 @@ queue_and_out:
                        __skb_queue_tail(&sk->sk_receive_queue, skb);
                }
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               if(skb->len)
-                       tcp_event_data_recv(sk, tp, skb);
-               if(th->fin)
+               if (skb->len)
+                       tcp_event_data_recv(sk, skb);
+               if (th->fin)
                        tcp_fin(skb, sk, th);
 
                if (!skb_queue_empty(&tp->out_of_order_queue)) {
@@ -3188,7 +3476,7 @@ queue_and_out:
                if (tp->rx_opt.num_sacks)
                        tcp_sack_remove(tp);
 
-               tcp_fast_path_check(sk, tp);
+               tcp_fast_path_check(sk);
 
                if (eaten > 0)
                        __kfree_skb(skb);
@@ -3223,7 +3511,7 @@ drop:
                           TCP_SKB_CB(skb)->end_seq);
 
                tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
-               
+
                /* If window is closed, drop tail of packet. But after
                 * remembering D-SACK for its head made in previous line.
                 */
@@ -3302,7 +3590,7 @@ drop:
                        }
                }
                __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
-               
+
                /* And clean segments covered by new one as whole. */
                while ((skb1 = skb->next) !=
                       (struct sk_buff*)&tp->out_of_order_queue &&
@@ -3352,7 +3640,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                 * - bloated or contains data before "start" or
                 *   overlaps to the next one.
                 */
-               if (!skb->h.th->syn && !skb->h.th->fin &&
+               if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
                    (tcp_win_from_space(skb->truesize) > skb->len ||
                     before(TCP_SKB_CB(skb)->seq, start) ||
                     (skb->next != tail &&
@@ -3363,7 +3651,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                start = TCP_SKB_CB(skb)->end_seq;
                skb = skb->next;
        }
-       if (skb == tail || skb->h.th->syn || skb->h.th->fin)
+       if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
                return;
 
        while (before(start, end)) {
@@ -3379,11 +3667,14 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                nskb = alloc_skb(copy+header, GFP_ATOMIC);
                if (!nskb)
                        return;
+
+               skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head);
+               skb_set_network_header(nskb, (skb_network_header(skb) -
+                                             skb->head));
+               skb_set_transport_header(nskb, (skb_transport_header(skb) -
+                                               skb->head));
                skb_reserve(nskb, header);
                memcpy(nskb->head, skb->head, header);
-               nskb->nh.raw = nskb->head + (skb->nh.raw-skb->head);
-               nskb->h.raw = nskb->head + (skb->h.raw-skb->head);
-               nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
                __skb_insert(nskb, skb->prev, skb, list);
@@ -3409,7 +3700,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                                __kfree_skb(skb);
                                NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
                                skb = next;
-                               if (skb == tail || skb->h.th->syn || skb->h.th->fin)
+                               if (skb == tail ||
+                                   tcp_hdr(skb)->syn ||
+                                   tcp_hdr(skb)->fin)
                                        return;
                        }
                }
@@ -3467,14 +3760,14 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
  */
 static int tcp_prune_queue(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk); 
+       struct tcp_sock *tp = tcp_sk(sk);
 
        SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
 
        NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
-               tcp_clamp_window(sk, tp);
+               tcp_clamp_window(sk);
        else if (tcp_memory_pressure)
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
@@ -3532,7 +3825,8 @@ void tcp_cwnd_application_limited(struct sock *sk)
        if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
            sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
                /* Limited by application or receiver window. */
-               u32 win_used = max(tp->snd_cwnd_used, 2U);
+               u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
+               u32 win_used = max(tp->snd_cwnd_used, init_win);
                if (win_used < tp->snd_cwnd) {
                        tp->snd_ssthresh = tcp_current_ssthresh(sk);
                        tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
@@ -3542,8 +3836,10 @@ void tcp_cwnd_application_limited(struct sock *sk)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
+static int tcp_should_expand_sndbuf(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        /* If the user specified a specific send buffer setting, do
         * not modify it.
         */
@@ -3575,8 +3871,8 @@ static void tcp_new_space(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tcp_should_expand_sndbuf(sk, tp)) {
-               int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
+       if (tcp_should_expand_sndbuf(sk)) {
+               int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
                        MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
                    demanded = max_t(unsigned int, tp->snd_cwnd,
                                                   tp->reordering + 1);
@@ -3599,9 +3895,9 @@ static void tcp_check_space(struct sock *sk)
        }
 }
 
-static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_data_snd_check(struct sock *sk)
 {
-       tcp_push_pending_frames(sk, tp);
+       tcp_push_pending_frames(sk);
        tcp_check_space(sk);
 }
 
@@ -3649,7 +3945,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
  *     For 1003.1g we should support a new option TCP_STDURG to permit
  *     either form (or just set the sysctl tcp_stdurg).
  */
+
 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -3730,7 +4026,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
                u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
                          th->syn;
 
-               /* Is the urgent pointer pointing into this packet? */   
+               /* Is the urgent pointer pointing into this packet? */
                if (ptr < skb->len) {
                        u8 tmp;
                        if (skb_copy_bits(skb, ptr, &tmp, 1))
@@ -3749,7 +4045,7 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
        int err;
 
        local_bh_enable();
-       if (skb->ip_summed==CHECKSUM_UNNECESSARY)
+       if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
        else
                err = skb_copy_and_csum_datagram_iovec(skb, hlen,
@@ -3765,9 +4061,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
        return err;
 }
 
-static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
+static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
 {
-       int result;
+       __sum16 result;
 
        if (sock_owned_by_user(sk)) {
                local_bh_enable();
@@ -3781,31 +4077,75 @@ static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
 
 static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
 {
-       return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+       return !skb_csum_unnecessary(skb) &&
                __tcp_checksum_complete_user(sk, skb);
 }
 
+#ifdef CONFIG_NET_DMA
+static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int chunk = skb->len - hlen;
+       int dma_cookie;
+       int copied_early = 0;
+
+       if (tp->ucopy.wakeup)
+               return 0;
+
+       if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
+               tp->ucopy.dma_chan = get_softnet_dma();
+
+       if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
+
+               dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
+                       skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list);
+
+               if (dma_cookie < 0)
+                       goto out;
+
+               tp->ucopy.dma_cookie = dma_cookie;
+               copied_early = 1;
+
+               tp->ucopy.len -= chunk;
+               tp->copied_seq += chunk;
+               tcp_rcv_space_adjust(sk);
+
+               if ((tp->ucopy.len == 0) ||
+                   (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
+                   (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
+                       tp->ucopy.wakeup = 1;
+                       sk->sk_data_ready(sk, 0);
+               }
+       } else if (chunk > 0) {
+               tp->ucopy.wakeup = 1;
+               sk->sk_data_ready(sk, 0);
+       }
+out:
+       return copied_early;
+}
+#endif /* CONFIG_NET_DMA */
+
 /*
- *     TCP receive function for the ESTABLISHED state. 
+ *     TCP receive function for the ESTABLISHED state.
  *
- *     It is split into a fast path and a slow path. The fast path is 
+ *     It is split into a fast path and a slow path. The fast path is
  *     disabled when:
  *     - A zero window was announced from us - zero window probing
- *        is only handled properly in the slow path. 
+ *        is only handled properly in the slow path.
  *     - Out of order segments arrived.
  *     - Urgent data is expected.
  *     - There is no buffer space left
  *     - Unexpected TCP flags/window values/header lengths are received
- *       (detected by checking the TCP header against pred_flags) 
+ *       (detected by checking the TCP header against pred_flags)
  *     - Data is sent in both directions. Fast path only supports pure senders
  *       or pure receivers (this means either the sequence number or the ack
  *       value must stay constant)
  *     - Unexpected TCP option.
  *
- *     When these conditions are not satisfied it drops into a standard 
+ *     When these conditions are not satisfied it drops into a standard
  *     receive procedure patterned after RFC793 to handle all cases.
  *     The first three cases are guaranteed by proper pred_flags setting,
- *     the rest is checked inline. Fast processing is turned on in 
+ *     the rest is checked inline. Fast processing is turned on in
  *     tcp_data_queue when everything is OK.
  */
 int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
@@ -3815,15 +4155,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
        /*
         *      Header prediction.
-        *      The code loosely follows the one in the famous 
+        *      The code loosely follows the one in the famous
         *      "30 instruction TCP receive" Van Jacobson mail.
-        *      
-        *      Van's trick is to deposit buffers into socket queue 
+        *
+        *      Van's trick is to deposit buffers into socket queue
         *      on a device interrupt, to call tcp_recv function
         *      on the receive process context and checksum and copy
         *      the buffer to user space. smart...
         *
-        *      Our current scheme is not silly either but we take the 
+        *      Our current scheme is not silly either but we take the
         *      extra cost of the net_bh soft interrupt processing...
         *      We do checksum and copy also but from device to kernel.
         */
@@ -3834,7 +4174,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
         *      if header_prediction is to be made
         *      'S' will always be tp->tcp_header_len >> 2
         *      '?' will be 0 for the fast path, otherwise pred_flags is 0 to
-        *  turn it off (when there are holes in the receive 
+        *  turn it off (when there are holes in the receive
         *       space for instance)
         *      PSH flag is ignored.
         */
@@ -3850,15 +4190,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                /* Check timestamp */
                if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
-                       __u32 *ptr = (__u32 *)(th + 1);
+                       __be32 *ptr = (__be32 *)(th + 1);
 
                        /* No? Slow path! */
-                       if (*ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
+                       if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
                                          | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
                                goto slow_path;
 
                        tp->rx_opt.saw_tstamp = 1;
-                       ++ptr; 
+                       ++ptr;
                        tp->rx_opt.rcv_tsval = ntohl(*ptr);
                        ++ptr;
                        tp->rx_opt.rcv_tsecr = ntohl(*ptr);
@@ -3886,14 +4226,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                    tp->rcv_nxt == tp->rcv_wup)
                                        tcp_store_ts_recent(tp);
 
-                               tcp_rcv_rtt_measure_ts(sk, skb);
-
                                /* We know that such packets are checksummed
                                 * on entry.
                                 */
                                tcp_ack(sk, skb, 0);
-                               __kfree_skb(skb); 
-                               tcp_data_snd_check(sk, tp);
+                               __kfree_skb(skb);
+                               tcp_data_snd_check(sk);
                                return 0;
                        } else { /* Header too small */
                                TCP_INC_STATS_BH(TCP_MIB_INERRS);
@@ -3901,14 +4239,23 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                        }
                } else {
                        int eaten = 0;
+                       int copied_early = 0;
 
-                       if (tp->ucopy.task == current &&
-                           tp->copied_seq == tp->rcv_nxt &&
-                           len - tcp_header_len <= tp->ucopy.len &&
-                           sock_owned_by_user(sk)) {
-                               __set_current_state(TASK_RUNNING);
+                       if (tp->copied_seq == tp->rcv_nxt &&
+                           len - tcp_header_len <= tp->ucopy.len) {
+#ifdef CONFIG_NET_DMA
+                               if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
+                                       copied_early = 1;
+                                       eaten = 1;
+                               }
+#endif
+                               if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) {
+                                       __set_current_state(TASK_RUNNING);
 
-                               if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
+                                       if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
+                                               eaten = 1;
+                               }
+                               if (eaten) {
                                        /* Predicted packet is in window by definition.
                                         * seq == rcv_nxt and rcv_wup <= rcv_nxt.
                                         * Hence, check seq<=rcv_wup reduces to:
@@ -3924,8 +4271,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                        __skb_pull(skb, tcp_header_len);
                                        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                                        NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
-                                       eaten = 1;
                                }
+                               if (copied_early)
+                                       tcp_cleanup_rbuf(sk, skb->len);
                        }
                        if (!eaten) {
                                if (tcp_checksum_complete_user(sk, skb))
@@ -3954,18 +4302,23 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                        }
 
-                       tcp_event_data_recv(sk, tp, skb);
+                       tcp_event_data_recv(sk, skb);
 
                        if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
                                /* Well, only one small jumplet in fast path... */
                                tcp_ack(sk, skb, FLAG_DATA);
-                               tcp_data_snd_check(sk, tp);
+                               tcp_data_snd_check(sk);
                                if (!inet_csk_ack_scheduled(sk))
                                        goto no_ack;
                        }
 
                        __tcp_ack_snd_check(sk, 0);
 no_ack:
+#ifdef CONFIG_NET_DMA
+                       if (copied_early)
+                               __skb_queue_tail(&sk->sk_async_wait_queue, skb);
+                       else
+#endif
                        if (eaten)
                                __kfree_skb(skb);
                        else
@@ -4011,7 +4364,7 @@ slow_path:
                goto discard;
        }
 
-       if(th->rst) {
+       if (th->rst) {
                tcp_reset(sk);
                goto discard;
        }
@@ -4026,7 +4379,7 @@ slow_path:
        }
 
 step5:
-       if(th->ack)
+       if (th->ack)
                tcp_ack(sk, skb, FLAG_SLOWPATH);
 
        tcp_rcv_rtt_measure_ts(sk, skb);
@@ -4037,7 +4390,7 @@ step5:
        /* step 7: process the segment text */
        tcp_data_queue(sk, skb);
 
-       tcp_data_snd_check(sk, tp);
+       tcp_data_snd_check(sk);
        tcp_ack_snd_check(sk);
        return 0;
 
@@ -4111,8 +4464,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 */
 
                TCP_ECN_rcv_synack(tp, th);
-               if (tp->ecn_flags&TCP_ECN_OK)
-                       sock_set_flag(sk, SOCK_NO_LARGESEND);
 
                tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
                tcp_ack(sk, skb, FLAG_SLOWPATH);
@@ -4155,9 +4506,11 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
                tp->copied_seq = tp->rcv_nxt;
-               mb();
+               smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
 
+               security_inet_conn_established(sk, skb);
+
                /* Make sure socket is routed, for correct metrics.  */
                icsk->icsk_af_ops->rebuild_header(sk);
 
@@ -4255,8 +4608,6 @@ discard:
                tp->max_window = tp->snd_wnd;
 
                TCP_ECN_rcv_syn(tp, th);
-               if (tp->ecn_flags&TCP_ECN_OK)
-                       sock_set_flag(sk, SOCK_NO_LARGESEND);
 
                tcp_mtup_init(sk);
                tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
@@ -4297,11 +4648,11 @@ reset_and_undo:
 
 /*
  *     This function implements the receiving procedure of RFC 793 for
- *     all states except ESTABLISHED and TIME_WAIT. 
+ *     all states except ESTABLISHED and TIME_WAIT.
  *     It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
  *     address independent.
  */
-       
+
 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                          struct tcphdr *th, unsigned len)
 {
@@ -4316,33 +4667,35 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                goto discard;
 
        case TCP_LISTEN:
-               if(th->ack)
+               if (th->ack)
                        return 1;
 
-               if(th->rst)
+               if (th->rst)
                        goto discard;
 
-               if(th->syn) {
+               if (th->syn) {
                        if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
 
-                       /* Now we have several options: In theory there is 
-                        * nothing else in the frame. KA9Q has an option to 
+                       /* Now we have several options: In theory there is
+                        * nothing else in the frame. KA9Q has an option to
                         * send data with the syn, BSD accepts data with the
-                        * syn up to the [to be] advertised window and 
-                        * Solaris 2.1 gives you a protocol error. For now 
-                        * we just ignore it, that fits the spec precisely 
+                        * syn up to the [to be] advertised window and
+                        * Solaris 2.1 gives you a protocol error. For now
+                        * we just ignore it, that fits the spec precisely
                         * and avoids incompatibilities. It would be nice in
                         * future to drop through and process the data.
                         *
-                        * Now that TTCP is starting to be used we ought to 
+                        * Now that TTCP is starting to be used we ought to
                         * queue this data.
                         * But, this leaves one open to an easy denial of
-                        * service attack, and SYN cookies can't defend
+                        * service attack, and SYN cookies can't defend
                         * against this problem. So, we drop the data
-                        * in the interest of security over speed.
+                        * in the interest of security over speed unless
+                        * it's still in use.
                         */
-                       goto discard;
+                       kfree_skb(skb);
+                       return 0;
                }
                goto discard;
 
@@ -4354,7 +4707,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                /* Do step6 onward by hand. */
                tcp_urg(sk, skb, th);
                __kfree_skb(skb);
-               tcp_data_snd_check(sk, tp);
+               tcp_data_snd_check(sk);
                return 0;
        }
 
@@ -4376,7 +4729,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        /* step 2: check RST bit */
-       if(th->rst) {
+       if (th->rst) {
                tcp_reset(sk);
                goto discard;
        }
@@ -4399,11 +4752,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        if (th->ack) {
                int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
 
-               switch(sk->sk_state) {
+               switch (sk->sk_state) {
                case TCP_SYN_RECV:
                        if (acceptable) {
                                tp->copied_seq = tp->rcv_nxt;
-                               mb();
+                               smp_mb();
                                tcp_set_state(sk, TCP_ESTABLISHED);
                                sk->sk_state_change(sk);
 
@@ -4526,7 +4879,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        case TCP_FIN_WAIT1:
        case TCP_FIN_WAIT2:
                /* RFC 793 says to queue data in these states,
-                * RFC 1122 says we MUST send a reset. 
+                * RFC 1122 says we MUST send a reset.
                 * BSD 4.4 also does reset.
                 */
                if (sk->sk_shutdown & RCV_SHUTDOWN) {
@@ -4538,7 +4891,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        }
                }
                /* Fall through */
-       case TCP_ESTABLISHED: 
+       case TCP_ESTABLISHED:
                tcp_data_queue(sk, skb);
                queued = 1;
                break;
@@ -4546,11 +4899,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
        /* tcp_data could move socket to TIME-WAIT */
        if (sk->sk_state != TCP_CLOSE) {
-               tcp_data_snd_check(sk, tp);
+               tcp_data_snd_check(sk);
                tcp_ack_snd_check(sk);
        }
 
-       if (!queued) { 
+       if (!queued) {
 discard:
                __kfree_skb(skb);
        }