[NET] IPV4: Fix whitespace errors.
[safe/jmp/linux-2.6] / net / ipv4 / tcp_input.c
index 40a26b7..1a14191 100644 (file)
@@ -50,9 +50,9 @@
  *             Andi Kleen:             Make sure we never ack data there is not
  *                                     enough room for. Also make this condition
  *                                     a fatal error if it might still happen.
- *             Andi Kleen:             Add tcp_measure_rcv_mss to make 
+ *             Andi Kleen:             Add tcp_measure_rcv_mss to make
  *                                     connections with MSS<min(MTU,ann. MSS)
- *                                     work without delayed acks. 
+ *                                     work without delayed acks.
  *             Andi Kleen:             Process packets with PSH set in the
  *                                     fast path.
  *             J Hadi Salim:           ECN support
@@ -63,7 +63,6 @@
  *             Pasi Sarolahti:         F-RTO for dealing with spurious RTOs
  */
 
-#include <linux/config.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
 #include <net/inet_common.h>
 #include <linux/ipsec.h>
 #include <asm/unaligned.h>
-
-int sysctl_tcp_timestamps = 1;
-int sysctl_tcp_window_scaling = 1;
-int sysctl_tcp_sack = 1;
-int sysctl_tcp_fack = 1;
-int sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
-int sysctl_tcp_ecn;
-int sysctl_tcp_dsack = 1;
-int sysctl_tcp_app_win = 31;
-int sysctl_tcp_adv_win_scale = 2;
-
-int sysctl_tcp_stdurg;
-int sysctl_tcp_rfc1337;
-int sysctl_tcp_max_orphans = NR_FILE;
-int sysctl_tcp_frto;
-int sysctl_tcp_nometrics_save;
-
-int sysctl_tcp_moderate_rcvbuf = 1;
-int sysctl_tcp_abc = 1;
+#include <net/netdma.h>
+
+int sysctl_tcp_timestamps __read_mostly = 1;
+int sysctl_tcp_window_scaling __read_mostly = 1;
+int sysctl_tcp_sack __read_mostly = 1;
+int sysctl_tcp_fack __read_mostly = 1;
+int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
+int sysctl_tcp_ecn __read_mostly;
+int sysctl_tcp_dsack __read_mostly = 1;
+int sysctl_tcp_app_win __read_mostly = 31;
+int sysctl_tcp_adv_win_scale __read_mostly = 2;
+
+int sysctl_tcp_stdurg __read_mostly;
+int sysctl_tcp_rfc1337 __read_mostly;
+int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
+int sysctl_tcp_frto __read_mostly;
+int sysctl_tcp_nometrics_save __read_mostly;
+
+int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
+int sysctl_tcp_abc __read_mostly;
 
 #define FLAG_DATA              0x01 /* Incoming frame contained data.          */
 #define FLAG_WIN_UPDATE                0x02 /* Incoming ACK was a window update.       */
@@ -112,22 +112,22 @@ int sysctl_tcp_abc = 1;
 
 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 
-/* Adapt the MSS value used to make delayed ack decision to the 
+/* Adapt the MSS value used to make delayed ack decision to the
  * real world.
- */ 
-static inline void tcp_measure_rcv_mss(struct sock *sk,
-                                      const struct sk_buff *skb)
+ */
+static void tcp_measure_rcv_mss(struct sock *sk,
+                               const struct sk_buff *skb)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
-       const unsigned int lss = icsk->icsk_ack.last_seg_size; 
+       const unsigned int lss = icsk->icsk_ack.last_seg_size;
        unsigned int len;
 
-       icsk->icsk_ack.last_seg_size = 0; 
+       icsk->icsk_ack.last_seg_size = 0;
 
        /* skb->len may jitter because of SACKs, even if peer
         * sends good full-sized frames.
         */
-       len = skb->len;
+       len = skb_shinfo(skb)->gso_size ?: skb->len;
        if (len >= icsk->icsk_ack.rcv_mss) {
                icsk->icsk_ack.rcv_mss = len;
        } else {
@@ -156,6 +156,8 @@ static inline void tcp_measure_rcv_mss(struct sock *sk,
                                return;
                        }
                }
+               if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
+                       icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
                icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
        }
 }
@@ -246,8 +248,8 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
        return 0;
 }
 
-static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
-                                  struct sk_buff *skb)
+static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
+                           struct sk_buff *skb)
 {
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
@@ -341,6 +343,26 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
                tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
 }
 
+
+/* Initialize RCV_MSS value.
+ * RCV_MSS is an our guess about MSS used by the peer.
+ * We haven't any direct information about the MSS.
+ * It's better to underestimate the RCV_MSS rather than overestimate.
+ * Overestimations make us ACKing less frequently than needed.
+ * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
+ */
+void tcp_initialize_rcv_mss(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
+
+       hint = min(hint, tp->rcv_wnd/2);
+       hint = min(hint, TCP_MIN_RCVMSS);
+       hint = max(hint, TCP_MIN_MSS);
+
+       inet_csk(sk)->icsk_ack.rcv_mss = hint;
+}
+
 /* Receiver "autotuning" code.
  *
  * The algorithm for RTT estimation w/o timestamps is based on
@@ -367,7 +389,7 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
                 * are stalled on filesystem I/O.
                 *
                 * Also, since we are only going for a minimum in the
-                * non-timestamp case, we do not smoother things out
+                * non-timestamp case, we do not smooth things out
                 * else with timestamps disabled convergence takes too
                 * long.
                 */
@@ -418,15 +440,15 @@ void tcp_rcv_space_adjust(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        int time;
        int space;
-       
+
        if (tp->rcvq_space.time == 0)
                goto new_measure;
-       
+
        time = tcp_time_stamp - tp->rcvq_space.time;
        if (time < (tp->rcv_rtt_est.rtt >> 3) ||
            tp->rcv_rtt_est.rtt == 0)
                return;
-       
+
        space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
 
        space = max(tp->rcvq_space.space, space);
@@ -436,7 +458,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
 
                tp->rcvq_space.space = space;
 
-               if (sysctl_tcp_moderate_rcvbuf) {
+               if (sysctl_tcp_moderate_rcvbuf &&
+                   !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
                        int new_clamp = space;
 
                        /* Receive space grows, normalize in order to
@@ -460,7 +483,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
                        }
                }
        }
-       
+
 new_measure:
        tp->rcvq_space.seq = tp->copied_seq;
        tp->rcvq_space.time = tcp_time_stamp;
@@ -486,7 +509,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
        tcp_measure_rcv_mss(sk, skb);
 
        tcp_rcv_rtt_measure(tp);
-       
+
        now = tcp_time_stamp;
 
        if (!icsk->icsk_ack.ato) {
@@ -538,7 +561,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
        /*      The following amusing code comes from Jacobson's
         *      article in SIGCOMM '88.  Note that rtt and mdev
         *      are scaled versions of rtt and mean deviation.
-        *      This is designed to be as fast as possible 
+        *      This is designed to be as fast as possible
         *      m stands for "measurement".
         *
         *      On a 1990 paper the rto value is changed to:
@@ -546,7 +569,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
         *
         * Funny. This algorithm seems to be very broken.
         * These formulae increase RTO, when it should be decreased, increase
-        * too slowly, when it should be increased fastly, decrease too fastly
+        * too slowly, when it should be increased quickly, decrease too quickly
         * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
         * does not matter how to _calculate_ it. Seems, it was trap
         * that VJ failed to avoid. 8)
@@ -735,6 +758,27 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
+/* Set slow start threshold and cwnd not falling to slow start */
+void tcp_enter_cwr(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tp->prior_ssthresh = 0;
+       tp->bytes_acked = 0;
+       if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+               tp->undo_marker = 0;
+               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+               tp->snd_cwnd = min(tp->snd_cwnd,
+                                  tcp_packets_in_flight(tp) + 1U);
+               tp->snd_cwnd_cnt = 0;
+               tp->high_seq = tp->snd_nxt;
+               tp->snd_cwnd_stamp = tcp_time_stamp;
+               TCP_ECN_queue_cwr(tp);
+
+               tcp_set_ca_state(sk, TCP_CA_CWR);
+       }
+}
+
 /* Initialize metrics on socket. */
 
 static void tcp_init_metrics(struct sock *sk)
@@ -891,29 +935,59 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
-       struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2);
+       struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
+       struct sk_buff *cached_skb;
        int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
        int reord = tp->packets_out;
        int prior_fackets;
        u32 lost_retrans = 0;
        int flag = 0;
        int dup_sack = 0;
+       int cached_fack_count;
        int i;
+       int first_sack_index;
 
        if (!tp->sacked_out)
                tp->fackets_out = 0;
        prior_fackets = tp->fackets_out;
 
+       /* Check for D-SACK. */
+       if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
+               dup_sack = 1;
+               tp->rx_opt.sack_ok |= 4;
+               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+       } else if (num_sacks > 1 &&
+                       !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
+                       !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
+               dup_sack = 1;
+               tp->rx_opt.sack_ok |= 4;
+               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+       }
+
+       /* D-SACK for already forgotten data...
+        * Do dumb counting. */
+       if (dup_sack &&
+                       !after(ntohl(sp[0].end_seq), prior_snd_una) &&
+                       after(ntohl(sp[0].end_seq), tp->undo_marker))
+               tp->undo_retrans--;
+
+       /* Eliminate too old ACKs, but take into
+        * account more or less fresh ones, they can
+        * contain valid SACK info.
+        */
+       if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
+               return 0;
+
        /* SACK fastpath:
         * if the only SACK change is the increase of the end_seq of
         * the first block then only apply that SACK block
         * and use retrans queue hinting otherwise slowpath */
        flag = 1;
-       for (i = 0; i< num_sacks; i++) {
-               __u32 start_seq = ntohl(sp[i].start_seq);
-               __u32 end_seq =  ntohl(sp[i].end_seq);
+       for (i = 0; i < num_sacks; i++) {
+               __be32 start_seq = sp[i].start_seq;
+               __be32 end_seq = sp[i].end_seq;
 
-               if (i == 0){
+               if (i == 0) {
                        if (tp->recv_sack_cache[i].start_seq != start_seq)
                                flag = 0;
                } else {
@@ -923,39 +997,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                }
                tp->recv_sack_cache[i].start_seq = start_seq;
                tp->recv_sack_cache[i].end_seq = end_seq;
-
-               /* Check for D-SACK. */
-               if (i == 0) {
-                       u32 ack = TCP_SKB_CB(ack_skb)->ack_seq;
-
-                       if (before(start_seq, ack)) {
-                               dup_sack = 1;
-                               tp->rx_opt.sack_ok |= 4;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
-                       } else if (num_sacks > 1 &&
-                                  !after(end_seq, ntohl(sp[1].end_seq)) &&
-                                  !before(start_seq, ntohl(sp[1].start_seq))) {
-                               dup_sack = 1;
-                               tp->rx_opt.sack_ok |= 4;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
-                       }
-
-                       /* D-SACK for already forgotten data...
-                        * Do dumb counting. */
-                       if (dup_sack &&
-                           !after(end_seq, prior_snd_una) &&
-                           after(end_seq, tp->undo_marker))
-                               tp->undo_retrans--;
-
-                       /* Eliminate too old ACKs, but take into
-                        * account more or less fresh ones, they can
-                        * contain valid SACK info.
-                        */
-                       if (before(ack, prior_snd_una - tp->max_window))
-                               return 0;
-               }
+       }
+       /* Clear the rest of the cache sack blocks so they won't match mistakenly. */
+       for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
+               tp->recv_sack_cache[i].start_seq = 0;
+               tp->recv_sack_cache[i].end_seq = 0;
        }
 
+       first_sack_index = 0;
        if (flag)
                num_sacks = 1;
        else {
@@ -967,10 +1016,15 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                        for (j = 0; j < i; j++){
                                if (after(ntohl(sp[j].start_seq),
                                          ntohl(sp[j+1].start_seq))){
-                                       sp[j].start_seq = htonl(tp->recv_sack_cache[j+1].start_seq);
-                                       sp[j].end_seq = htonl(tp->recv_sack_cache[j+1].end_seq);
-                                       sp[j+1].start_seq = htonl(tp->recv_sack_cache[j].start_seq);
-                                       sp[j+1].end_seq = htonl(tp->recv_sack_cache[j].end_seq);
+                                       struct tcp_sack_block_wire tmp;
+
+                                       tmp = sp[j];
+                                       sp[j] = sp[j+1];
+                                       sp[j+1] = tmp;
+
+                                       /* Track where the first SACK block goes to */
+                                       if (j == first_sack_index)
+                                               first_sack_index = j+1;
                                }
 
                        }
@@ -980,20 +1034,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        /* clear flag as used for different purpose in following code */
        flag = 0;
 
+       /* Use SACK fastpath hint if valid */
+       cached_skb = tp->fastpath_skb_hint;
+       cached_fack_count = tp->fastpath_cnt_hint;
+       if (!cached_skb) {
+               cached_skb = sk->sk_write_queue.next;
+               cached_fack_count = 0;
+       }
+
        for (i=0; i<num_sacks; i++, sp++) {
                struct sk_buff *skb;
                __u32 start_seq = ntohl(sp->start_seq);
                __u32 end_seq = ntohl(sp->end_seq);
                int fack_count;
 
-               /* Use SACK fastpath hint if valid */
-               if (tp->fastpath_skb_hint) {
-                       skb = tp->fastpath_skb_hint;
-                       fack_count = tp->fastpath_cnt_hint;
-               } else {
-                       skb = sk->sk_write_queue.next;
-                       fack_count = 0;
-               }
+               skb = cached_skb;
+               fack_count = cached_fack_count;
 
                /* Event "B" in the comment above. */
                if (after(end_seq, tp->high_seq))
@@ -1003,8 +1059,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                        int in_sack, pcount;
                        u8 sacked;
 
-                       tp->fastpath_skb_hint = skb;
-                       tp->fastpath_cnt_hint = fack_count;
+                       cached_skb = skb;
+                       cached_fack_count = fack_count;
+                       if (i == first_sack_index) {
+                               tp->fastpath_skb_hint = skb;
+                               tp->fastpath_cnt_hint = fack_count;
+                       }
 
                        /* The retransmission queue is always in order, so
                         * we can short-circuit the walk early.
@@ -1030,7 +1090,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                                else
                                        pkt_len = (end_seq -
                                                   TCP_SKB_CB(skb)->seq);
-                               if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size))
+                               if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size))
                                        break;
                                pcount = tcp_skb_pcount(skb);
                        }
@@ -1189,8 +1249,8 @@ void tcp_enter_frto(struct sock *sk)
        tp->frto_counter = 1;
 
        if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
-            tp->snd_una == tp->high_seq ||
-            (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
+           tp->snd_una == tp->high_seq ||
+           (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
                tp->prior_ssthresh = tcp_current_ssthresh(sk);
                tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tcp_ca_event(sk, CA_EVENT_FRTO);
@@ -1607,7 +1667,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
         * Hence, we can detect timed out packets during fast
         * retransmit without falling to slow start.
         */
-       if (tcp_head_timedout(sk, tp)) {
+       if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
                struct sk_buff *skb;
 
                skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
@@ -1646,17 +1706,26 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
+/* Lower bound on congestion window is slow start threshold
+ * unless congestion avoidance choice decides to overide it.
+ */
+static inline u32 tcp_cwnd_min(const struct sock *sk)
+{
+       const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
+
+       return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
+}
+
 /* Decrease cwnd each second ack. */
 static void tcp_cwnd_down(struct sock *sk)
 {
-       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int decr = tp->snd_cwnd_cnt + 1;
 
        tp->snd_cwnd_cnt = decr&1;
        decr >>= 1;
 
-       if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk))
+       if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
                tp->snd_cwnd -= decr;
 
        tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
@@ -1849,6 +1918,34 @@ static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag)
        }
 }
 
+static void tcp_mtup_probe_failed(struct sock *sk)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+
+       icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
+       icsk->icsk_mtup.probe_size = 0;
+}
+
+static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
+
+       /* FIXME: breaks with very large cwnd */
+       tp->prior_ssthresh = tcp_current_ssthresh(sk);
+       tp->snd_cwnd = tp->snd_cwnd *
+                      tcp_mss_to_mtu(sk, tp->mss_cache) /
+                      icsk->icsk_mtup.probe_size;
+       tp->snd_cwnd_cnt = 0;
+       tp->snd_cwnd_stamp = tcp_time_stamp;
+       tp->rcv_ssthresh = tcp_current_ssthresh(sk);
+
+       icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
+       icsk->icsk_mtup.probe_size = 0;
+       tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+}
+
+
 /* Process an event, which can update packets-in-flight not trivially.
  * Main goal of this function is to calculate new estimate for left_out,
  * taking into account both packets sitting in receiver's buffer and
@@ -1872,11 +1969,11 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
         * 1. Reno does not count dupacks (sacked_out) automatically. */
        if (!tp->packets_out)
                tp->sacked_out = 0;
-        /* 2. SACK counts snd_fack in packets inaccurately. */
+       /* 2. SACK counts snd_fack in packets inaccurately. */
        if (tp->sacked_out == 0)
                tp->fackets_out = 0;
 
-        /* Now state machine starts.
+       /* Now state machine starts.
         * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
        if (flag&FLAG_ECE)
                tp->prior_ssthresh = 0;
@@ -1981,6 +2078,17 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
                        return;
                }
 
+               /* MTU probe failure: don't reduce cwnd */
+               if (icsk->icsk_ca_state < TCP_CA_CWR &&
+                   icsk->icsk_mtup.probe_size &&
+                   tp->snd_una == tp->mtu_probe.probe_seq_start) {
+                       tcp_mtup_probe_failed(sk);
+                       /* Restores the reduction we did in tcp_mtup_probe() */
+                       tp->snd_cwnd++;
+                       tcp_simple_retransmit(sk);
+                       return;
+               }
+
                /* Otherwise enter Recovery state */
 
                if (IsReno(tp))
@@ -2070,8 +2178,8 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
                tcp_ack_no_tstamp(sk, seq_rtt, flag);
 }
 
-static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
-                                 u32 in_flight, int good)
+static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+                          u32 in_flight, int good)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
@@ -2082,7 +2190,7 @@ static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
  * RFC2988 recommends to restart timer to now+rto.
  */
 
-static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
+static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
 {
        if (!tp->packets_out) {
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
@@ -2095,7 +2203,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
                         __u32 now, __s32 *seq_rtt)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 
+       struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
        __u32 seq = tp->snd_una;
        __u32 packets_acked;
        int acked = 0;
@@ -2147,13 +2255,12 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
        return acked;
 }
 
-static inline u32 tcp_usrtt(const struct sk_buff *skb)
+static u32 tcp_usrtt(struct timeval *tv)
 {
-       struct timeval tv, now;
+       struct timeval now;
 
        do_gettimeofday(&now);
-       skb_get_timestamp(skb, &tv);
-       return (now.tv_sec - tv.tv_sec) * 1000000 + (now.tv_usec - tv.tv_usec);
+       return (now.tv_sec - tv->tv_sec) * 1000000 + (now.tv_usec - tv->tv_usec);
 }
 
 /* Remove acknowledged frames from the retransmission queue. */
@@ -2168,10 +2275,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
        u32 pkts_acked = 0;
        void (*rtt_sample)(struct sock *sk, u32 usrtt)
                = icsk->icsk_ca_ops->rtt_sample;
+       struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
 
        while ((skb = skb_peek(&sk->sk_write_queue)) &&
               skb != sk->sk_send_head) {
-               struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 
+               struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
                __u8 sacked = scb->sacked;
 
                /* If our packet is before the ack sequence we can
@@ -2201,6 +2309,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                        tp->retrans_stamp = 0;
                }
 
+               /* MTU probing checks */
+               if (icsk->icsk_mtup.probe_size) {
+                       if (!after(tp->mtu_probe.probe_seq_end, TCP_SKB_CB(skb)->end_seq)) {
+                               tcp_mtup_probe_success(sk, skb);
+                       }
+               }
+
                if (sacked) {
                        if (sacked & TCPCB_RETRANS) {
                                if(sacked & TCPCB_SACKED_RETRANS)
@@ -2209,8 +2324,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                                seq_rtt = -1;
                        } else if (seq_rtt < 0) {
                                seq_rtt = now - scb->when;
-                               if (rtt_sample)
-                                       (*rtt_sample)(sk, tcp_usrtt(skb));
+                               skb_get_timestamp(skb, &tv);
                        }
                        if (sacked & TCPCB_SACKED_ACKED)
                                tp->sacked_out -= tcp_skb_pcount(skb);
@@ -2223,8 +2337,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                        }
                } else if (seq_rtt < 0) {
                        seq_rtt = now - scb->when;
-                       if (rtt_sample)
-                               (*rtt_sample)(sk, tcp_usrtt(skb));
+                       skb_get_timestamp(skb, &tv);
                }
                tcp_dec_pcount_approx(&tp->fackets_out, skb);
                tcp_packets_out_dec(tp, skb);
@@ -2236,6 +2349,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
        if (acked&FLAG_ACKED) {
                tcp_ack_update_rtt(sk, acked, seq_rtt);
                tcp_ack_packets_out(sk, tp);
+               if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED))
+                       (*rtt_sample)(sk, tcp_usrtt(&tv));
 
                if (icsk->icsk_ca_ops->pkts_acked)
                        icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked);
@@ -2342,7 +2457,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
 
                        if (nwin > tp->max_window) {
                                tp->max_window = nwin;
-                               tcp_sync_mss(sk, tp->pmtu_cookie);
+                               tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
                        }
                }
        }
@@ -2355,9 +2470,9 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
 static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       
+
        tcp_sync_left_out(tp);
-       
+
        if (tp->snd_una == prior_snd_una ||
            !before(tp->snd_una, tp->frto_highmark)) {
                /* RTO was caused by loss, start retransmitting in
@@ -2408,8 +2523,13 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        if (before(ack, prior_snd_una))
                goto old_ack;
 
-       if (sysctl_tcp_abc && icsk->icsk_ca_state < TCP_CA_CWR)
-               tp->bytes_acked += ack - prior_snd_una;
+       if (sysctl_tcp_abc) {
+               if (icsk->icsk_ca_state < TCP_CA_CWR)
+                       tp->bytes_acked += ack - prior_snd_una;
+               else if (icsk->icsk_ca_state == TCP_CA_Loss)
+                       /* we assume just one segment left network */
+                       tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
+       }
 
        if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
                /* Window is constant, pure forward advance.
@@ -2507,7 +2627,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
        opt_rx->saw_tstamp = 0;
 
        while(length>0) {
-               int opcode=*ptr++;
+               int opcode=*ptr++;
                int opsize;
 
                switch (opcode) {
@@ -2522,10 +2642,10 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        return;
                                if (opsize > length)
                                        return; /* don't parse partial options */
-                               switch(opcode) {
+                               switch(opcode) {
                                case TCPOPT_MSS:
                                        if(opsize==TCPOLEN_MSS && th->syn && !estab) {
-                                               u16 in_mss = ntohs(get_unaligned((__u16 *)ptr));
+                                               u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
                                                if (in_mss) {
                                                        if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
                                                                in_mss = opt_rx->user_mss;
@@ -2553,8 +2673,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                                if ((estab && opt_rx->tstamp_ok) ||
                                                    (!estab && sysctl_tcp_timestamps)) {
                                                        opt_rx->saw_tstamp = 1;
-                                                       opt_rx->rcv_tsval = ntohl(get_unaligned((__u32 *)ptr));
-                                                       opt_rx->rcv_tsecr = ntohl(get_unaligned((__u32 *)(ptr+4)));
+                                                       opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr));
+                                                       opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4)));
                                                }
                                        }
                                        break;
@@ -2573,26 +2693,34 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                           opt_rx->sack_ok) {
                                                TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
                                        }
-                               };
-                               ptr+=opsize-2;
-                               length-=opsize;
-               };
+#ifdef CONFIG_TCP_MD5SIG
+                               case TCPOPT_MD5SIG:
+                                       /*
+                                        * The MD5 Hash has already been
+                                        * checked (see tcp_v{4,6}_do_rcv()).
+                                        */
+                                       break;
+#endif
+                               };
+                               ptr+=opsize-2;
+                               length-=opsize;
+               };
        }
 }
 
 /* Fast parse options. This hopes to only see timestamps.
  * If it is wrong it falls back on tcp_parse_options().
  */
-static inline int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
-                                        struct tcp_sock *tp)
+static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
+                                 struct tcp_sock *tp)
 {
        if (th->doff == sizeof(struct tcphdr)>>2) {
                tp->rx_opt.saw_tstamp = 0;
                return 0;
        } else if (tp->rx_opt.tstamp_ok &&
                   th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
-               __u32 *ptr = (__u32 *)(th + 1);
-               if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
+               __be32 *ptr = (__be32 *)(th + 1);
+               if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
                                  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
                        tp->rx_opt.saw_tstamp = 1;
                        ++ptr;
@@ -2804,8 +2932,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
        }
 }
 
-static __inline__ int
-tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
+static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
 {
        if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
                if (before(seq, sp->start_seq))
@@ -2817,7 +2944,7 @@ tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
        return 0;
 }
 
-static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
+static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
 {
        if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
                if (before(seq, tp->rcv_nxt))
@@ -2832,7 +2959,7 @@ static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
        }
 }
 
-static inline void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
+static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
 {
        if (!tp->rx_opt.dsack)
                tcp_dsack_set(tp, seq, end_seq);
@@ -2890,7 +3017,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
        }
 }
 
-static __inline__ void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
+static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
 {
        __u32 tmp;
 
@@ -3136,7 +3263,7 @@ drop:
                           TCP_SKB_CB(skb)->end_seq);
 
                tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
-               
+
                /* If window is closed, drop tail of packet. But after
                 * remembering D-SACK for its head made in previous line.
                 */
@@ -3215,7 +3342,7 @@ drop:
                        }
                }
                __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
-               
+
                /* And clean segments covered by new one as whole. */
                while ((skb1 = skb->next) !=
                       (struct sk_buff*)&tp->out_of_order_queue &&
@@ -3307,7 +3434,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                        int offset = start - TCP_SKB_CB(skb)->seq;
                        int size = TCP_SKB_CB(skb)->end_seq - start;
 
-                       if (offset < 0) BUG();
+                       BUG_ON(offset < 0);
                        if (size > 0) {
                                size = min(copy, size);
                                if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
@@ -3380,7 +3507,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
  */
 static int tcp_prune_queue(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk); 
+       struct tcp_sock *tp = tcp_sk(sk);
 
        SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
 
@@ -3445,7 +3572,8 @@ void tcp_cwnd_application_limited(struct sock *sk)
        if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
            sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
                /* Limited by application or receiver window. */
-               u32 win_used = max(tp->snd_cwnd_used, 2U);
+               u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
+               u32 win_used = max(tp->snd_cwnd_used, init_win);
                if (win_used < tp->snd_cwnd) {
                        tp->snd_ssthresh = tcp_current_ssthresh(sk);
                        tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
@@ -3455,7 +3583,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-static inline int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
+static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
 {
        /* If the user specified a specific send buffer setting, do
         * not modify it.
@@ -3489,7 +3617,7 @@ static void tcp_new_space(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tcp_should_expand_sndbuf(sk, tp)) {
-               int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
+               int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
                        MAX_TCP_HEADER + 16 + sizeof(struct sk_buff),
                    demanded = max_t(unsigned int, tp->snd_cwnd,
                                                   tp->reordering + 1);
@@ -3502,7 +3630,7 @@ static void tcp_new_space(struct sock *sk)
        sk->sk_write_space(sk);
 }
 
-static inline void tcp_check_space(struct sock *sk)
+static void tcp_check_space(struct sock *sk)
 {
        if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
                sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
@@ -3512,7 +3640,7 @@ static inline void tcp_check_space(struct sock *sk)
        }
 }
 
-static __inline__ void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
 {
        tcp_push_pending_frames(sk, tp);
        tcp_check_space(sk);
@@ -3544,7 +3672,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
        }
 }
 
-static __inline__ void tcp_ack_snd_check(struct sock *sk)
+static inline void tcp_ack_snd_check(struct sock *sk)
 {
        if (!inet_csk_ack_scheduled(sk)) {
                /* We sent a data segment already. */
@@ -3562,7 +3690,7 @@ static __inline__ void tcp_ack_snd_check(struct sock *sk)
  *     For 1003.1g we should support a new option TCP_STDURG to permit
  *     either form (or just set the sysctl tcp_stdurg).
  */
+
 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -3643,7 +3771,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
                u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
                          th->syn;
 
-               /* Is the urgent pointer pointing into this packet? */   
+               /* Is the urgent pointer pointing into this packet? */
                if (ptr < skb->len) {
                        u8 tmp;
                        if (skb_copy_bits(skb, ptr, &tmp, 1))
@@ -3678,9 +3806,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
        return err;
 }
 
-static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
+static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
 {
-       int result;
+       __sum16 result;
 
        if (sock_owned_by_user(sk)) {
                local_bh_enable();
@@ -3692,34 +3820,77 @@ static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
        return result;
 }
 
-static __inline__ int
-tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
+static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
 {
        return skb->ip_summed != CHECKSUM_UNNECESSARY &&
                __tcp_checksum_complete_user(sk, skb);
 }
 
+#ifdef CONFIG_NET_DMA
+static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int chunk = skb->len - hlen;
+       int dma_cookie;
+       int copied_early = 0;
+
+       if (tp->ucopy.wakeup)
+               return 0;
+
+       if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
+               tp->ucopy.dma_chan = get_softnet_dma();
+
+       if (tp->ucopy.dma_chan && skb->ip_summed == CHECKSUM_UNNECESSARY) {
+
+               dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
+                       skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list);
+
+               if (dma_cookie < 0)
+                       goto out;
+
+               tp->ucopy.dma_cookie = dma_cookie;
+               copied_early = 1;
+
+               tp->ucopy.len -= chunk;
+               tp->copied_seq += chunk;
+               tcp_rcv_space_adjust(sk);
+
+               if ((tp->ucopy.len == 0) ||
+                   (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) ||
+                   (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
+                       tp->ucopy.wakeup = 1;
+                       sk->sk_data_ready(sk, 0);
+               }
+       } else if (chunk > 0) {
+               tp->ucopy.wakeup = 1;
+               sk->sk_data_ready(sk, 0);
+       }
+out:
+       return copied_early;
+}
+#endif /* CONFIG_NET_DMA */
+
 /*
- *     TCP receive function for the ESTABLISHED state. 
+ *     TCP receive function for the ESTABLISHED state.
  *
- *     It is split into a fast path and a slow path. The fast path is 
+ *     It is split into a fast path and a slow path. The fast path is
  *     disabled when:
  *     - A zero window was announced from us - zero window probing
- *        is only handled properly in the slow path. 
+ *        is only handled properly in the slow path.
  *     - Out of order segments arrived.
  *     - Urgent data is expected.
  *     - There is no buffer space left
  *     - Unexpected TCP flags/window values/header lengths are received
- *       (detected by checking the TCP header against pred_flags) 
+ *       (detected by checking the TCP header against pred_flags)
  *     - Data is sent in both directions. Fast path only supports pure senders
  *       or pure receivers (this means either the sequence number or the ack
  *       value must stay constant)
  *     - Unexpected TCP option.
  *
- *     When these conditions are not satisfied it drops into a standard 
+ *     When these conditions are not satisfied it drops into a standard
  *     receive procedure patterned after RFC793 to handle all cases.
  *     The first three cases are guaranteed by proper pred_flags setting,
- *     the rest is checked inline. Fast processing is turned on in 
+ *     the rest is checked inline. Fast processing is turned on in
  *     tcp_data_queue when everything is OK.
  */
 int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
@@ -3729,15 +3900,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
        /*
         *      Header prediction.
-        *      The code loosely follows the one in the famous 
+        *      The code loosely follows the one in the famous
         *      "30 instruction TCP receive" Van Jacobson mail.
-        *      
-        *      Van's trick is to deposit buffers into socket queue 
+        *
+        *      Van's trick is to deposit buffers into socket queue
         *      on a device interrupt, to call tcp_recv function
         *      on the receive process context and checksum and copy
         *      the buffer to user space. smart...
         *
-        *      Our current scheme is not silly either but we take the 
+        *      Our current scheme is not silly either but we take the
         *      extra cost of the net_bh soft interrupt processing...
         *      We do checksum and copy also but from device to kernel.
         */
@@ -3748,7 +3919,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
         *      if header_prediction is to be made
         *      'S' will always be tp->tcp_header_len >> 2
         *      '?' will be 0 for the fast path, otherwise pred_flags is 0 to
-        *  turn it off (when there are holes in the receive 
+        *  turn it off (when there are holes in the receive
         *       space for instance)
         *      PSH flag is ignored.
         */
@@ -3764,15 +3935,15 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                /* Check timestamp */
                if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
-                       __u32 *ptr = (__u32 *)(th + 1);
+                       __be32 *ptr = (__be32 *)(th + 1);
 
                        /* No? Slow path! */
-                       if (*ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
+                       if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
                                          | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
                                goto slow_path;
 
                        tp->rx_opt.saw_tstamp = 1;
-                       ++ptr; 
+                       ++ptr;
                        tp->rx_opt.rcv_tsval = ntohl(*ptr);
                        ++ptr;
                        tp->rx_opt.rcv_tsecr = ntohl(*ptr);
@@ -3800,13 +3971,11 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                    tp->rcv_nxt == tp->rcv_wup)
                                        tcp_store_ts_recent(tp);
 
-                               tcp_rcv_rtt_measure_ts(sk, skb);
-
                                /* We know that such packets are checksummed
                                 * on entry.
                                 */
                                tcp_ack(sk, skb, 0);
-                               __kfree_skb(skb); 
+                               __kfree_skb(skb);
                                tcp_data_snd_check(sk, tp);
                                return 0;
                        } else { /* Header too small */
@@ -3815,14 +3984,23 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                        }
                } else {
                        int eaten = 0;
+                       int copied_early = 0;
 
-                       if (tp->ucopy.task == current &&
-                           tp->copied_seq == tp->rcv_nxt &&
-                           len - tcp_header_len <= tp->ucopy.len &&
-                           sock_owned_by_user(sk)) {
-                               __set_current_state(TASK_RUNNING);
+                       if (tp->copied_seq == tp->rcv_nxt &&
+                           len - tcp_header_len <= tp->ucopy.len) {
+#ifdef CONFIG_NET_DMA
+                               if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
+                                       copied_early = 1;
+                                       eaten = 1;
+                               }
+#endif
+                               if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) {
+                                       __set_current_state(TASK_RUNNING);
 
-                               if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
+                                       if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
+                                               eaten = 1;
+                               }
+                               if (eaten) {
                                        /* Predicted packet is in window by definition.
                                         * seq == rcv_nxt and rcv_wup <= rcv_nxt.
                                         * Hence, check seq<=rcv_wup reduces to:
@@ -3838,8 +4016,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                        __skb_pull(skb, tcp_header_len);
                                        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                                        NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
-                                       eaten = 1;
                                }
+                               if (copied_early)
+                                       tcp_cleanup_rbuf(sk, skb->len);
                        }
                        if (!eaten) {
                                if (tcp_checksum_complete_user(sk, skb))
@@ -3880,6 +4059,11 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 
                        __tcp_ack_snd_check(sk, 0);
 no_ack:
+#ifdef CONFIG_NET_DMA
+                       if (copied_early)
+                               __skb_queue_tail(&sk->sk_async_wait_queue, skb);
+                       else
+#endif
                        if (eaten)
                                __kfree_skb(skb);
                        else
@@ -3967,12 +4151,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                                         struct tcphdr *th, unsigned len)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        int saved_clamp = tp->rx_opt.mss_clamp;
 
        tcp_parse_options(skb, &tp->rx_opt, 0);
 
        if (th->ack) {
-               struct inet_connection_sock *icsk;
                /* rfc793:
                 * "If the state is SYN-SENT then
                 *    first check the ACK bit
@@ -4025,8 +4209,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 */
 
                TCP_ECN_rcv_synack(tp, th);
-               if (tp->ecn_flags&TCP_ECN_OK)
-                       sock_set_flag(sk, SOCK_NO_LARGESEND);
 
                tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
                tcp_ack(sk, skb, FLAG_SLOWPATH);
@@ -4061,18 +4243,21 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                if (tp->rx_opt.sack_ok && sysctl_tcp_fack)
                        tp->rx_opt.sack_ok |= 2;
 
-               tcp_sync_mss(sk, tp->pmtu_cookie);
+               tcp_mtup_init(sk);
+               tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
                tcp_initialize_rcv_mss(sk);
 
                /* Remember, tcp_poll() does not lock socket!
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
                tp->copied_seq = tp->rcv_nxt;
-               mb();
+               smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
 
+               security_inet_conn_established(sk, skb);
+
                /* Make sure socket is routed, for correct metrics.  */
-               tp->af_specific->rebuild_header(sk);
+               icsk->icsk_af_ops->rebuild_header(sk);
 
                tcp_init_metrics(sk);
 
@@ -4098,8 +4283,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                        sk_wake_async(sk, 0, POLL_OUT);
                }
 
-               icsk = inet_csk(sk);
-
                if (sk->sk_write_pending ||
                    icsk->icsk_accept_queue.rskq_defer_accept ||
                    icsk->icsk_ack.pingpong) {
@@ -4170,10 +4353,9 @@ discard:
                tp->max_window = tp->snd_wnd;
 
                TCP_ECN_rcv_syn(tp, th);
-               if (tp->ecn_flags&TCP_ECN_OK)
-                       sock_set_flag(sk, SOCK_NO_LARGESEND);
 
-               tcp_sync_mss(sk, tp->pmtu_cookie);
+               tcp_mtup_init(sk);
+               tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
                tcp_initialize_rcv_mss(sk);
 
 
@@ -4211,15 +4393,16 @@ reset_and_undo:
 
 /*
  *     This function implements the receiving procedure of RFC 793 for
- *     all states except ESTABLISHED and TIME_WAIT. 
+ *     all states except ESTABLISHED and TIME_WAIT.
  *     It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
  *     address independent.
  */
-       
+
 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                          struct tcphdr *th, unsigned len)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_connection_sock *icsk = inet_csk(sk);
        int queued = 0;
 
        tp->rx_opt.saw_tstamp = 0;
@@ -4236,26 +4419,28 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        goto discard;
 
                if(th->syn) {
-                       if(tp->af_specific->conn_request(sk, skb) < 0)
+                       if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
 
-                       /* Now we have several options: In theory there is 
-                        * nothing else in the frame. KA9Q has an option to 
+                       /* Now we have several options: In theory there is
+                        * nothing else in the frame. KA9Q has an option to
                         * send data with the syn, BSD accepts data with the
-                        * syn up to the [to be] advertised window and 
-                        * Solaris 2.1 gives you a protocol error. For now 
-                        * we just ignore it, that fits the spec precisely 
+                        * syn up to the [to be] advertised window and
+                        * Solaris 2.1 gives you a protocol error. For now
+                        * we just ignore it, that fits the spec precisely
                         * and avoids incompatibilities. It would be nice in
                         * future to drop through and process the data.
                         *
-                        * Now that TTCP is starting to be used we ought to 
+                        * Now that TTCP is starting to be used we ought to
                         * queue this data.
                         * But, this leaves one open to an easy denial of
-                        * service attack, and SYN cookies can't defend
+                        * service attack, and SYN cookies can't defend
                         * against this problem. So, we drop the data
-                        * in the interest of security over speed.
+                        * in the interest of security over speed unless
+                        * it's still in use.
                         */
-                       goto discard;
+                       kfree_skb(skb);
+                       return 0;
                }
                goto discard;
 
@@ -4316,7 +4501,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                case TCP_SYN_RECV:
                        if (acceptable) {
                                tp->copied_seq = tp->rcv_nxt;
-                               mb();
+                               smp_mb();
                                tcp_set_state(sk, TCP_ESTABLISHED);
                                sk->sk_state_change(sk);
 
@@ -4349,7 +4534,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                /* Make sure socket is routed, for
                                 * correct metrics.
                                 */
-                               tp->af_specific->rebuild_header(sk);
+                               icsk->icsk_af_ops->rebuild_header(sk);
 
                                tcp_init_metrics(sk);
 
@@ -4360,6 +4545,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                 */
                                tp->lsndtime = tcp_time_stamp;
 
+                               tcp_mtup_init(sk);
                                tcp_initialize_rcv_mss(sk);
                                tcp_init_buffer_space(sk);
                                tcp_fast_path_on(tp);
@@ -4438,7 +4624,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        case TCP_FIN_WAIT1:
        case TCP_FIN_WAIT2:
                /* RFC 793 says to queue data in these states,
-                * RFC 1122 says we MUST send a reset. 
+                * RFC 1122 says we MUST send a reset.
                 * BSD 4.4 also does reset.
                 */
                if (sk->sk_shutdown & RCV_SHUTDOWN) {
@@ -4450,7 +4636,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        }
                }
                /* Fall through */
-       case TCP_ESTABLISHED: 
+       case TCP_ESTABLISHED:
                tcp_data_queue(sk, skb);
                queued = 1;
                break;
@@ -4462,7 +4648,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                tcp_ack_snd_check(sk);
        }
 
-       if (!queued) { 
+       if (!queued) {
 discard:
                __kfree_skb(skb);
        }
@@ -4471,7 +4657,7 @@ discard:
 
 EXPORT_SYMBOL(sysctl_tcp_ecn);
 EXPORT_SYMBOL(sysctl_tcp_reordering);
-EXPORT_SYMBOL(sysctl_tcp_abc);
 EXPORT_SYMBOL(tcp_parse_options);
 EXPORT_SYMBOL(tcp_rcv_established);
 EXPORT_SYMBOL(tcp_rcv_state_process);
+EXPORT_SYMBOL(tcp_initialize_rcv_mss);