nfsd4.1: common slot allocation size calculation
[safe/jmp/linux-2.6] / net / ipv4 / tcp_input.c
index 03f5ede..d86784b 100644 (file)
@@ -64,6 +64,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sysctl.h>
+#include <linux/kernel.h>
 #include <net/dst.h>
 #include <net/tcp.h>
 #include <net/inet_common.h>
@@ -76,7 +77,7 @@ int sysctl_tcp_window_scaling __read_mostly = 1;
 int sysctl_tcp_sack __read_mostly = 1;
 int sysctl_tcp_fack __read_mostly = 1;
 int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
-int sysctl_tcp_ecn __read_mostly;
+int sysctl_tcp_ecn __read_mostly = 2;
 int sysctl_tcp_dsack __read_mostly = 1;
 int sysctl_tcp_app_win __read_mostly = 31;
 int sysctl_tcp_adv_win_scale __read_mostly = 2;
@@ -596,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
                tcp_grow_window(sk, skb);
 }
 
-static u32 tcp_rto_min(struct sock *sk)
-{
-       struct dst_entry *dst = __sk_dst_get(sk);
-       u32 rto_min = TCP_RTO_MIN;
-
-       if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
-               rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
-       return rto_min;
-}
-
 /* Called to compute a smoothed rtt estimate. The data fed to this
  * routine either comes from timestamps, or from segments that were
  * known _not_ to have been retransmitted [see Karn/Partridge
@@ -694,7 +685,7 @@ static inline void tcp_set_rto(struct sock *sk)
         *    is invisible. Actually, Linux-2.4 also generates erratic
         *    ACKs in some circumstances.
         */
-       inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
+       inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
 
        /* 2. Fixups made earlier cannot be right.
         *    If we do not estimate RTO correctly without them,
@@ -705,8 +696,7 @@ static inline void tcp_set_rto(struct sock *sk)
        /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
         * guarantees that rto is higher.
         */
-       if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
-               inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+       tcp_bound_rto(sk);
 }
 
 /* Save metrics learned by this TCP session.
@@ -771,7 +761,7 @@ void tcp_update_metrics(struct sock *sk)
                        set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
                }
 
-               if (tp->snd_ssthresh >= 0xFFFF) {
+               if (tcp_in_initial_slowstart(tp)) {
                        /* Slow start still did not finish. */
                        if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
@@ -927,6 +917,8 @@ static void tcp_init_metrics(struct sock *sk)
        tcp_set_rto(sk);
        if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
                goto reset;
+
+cwnd:
        tp->snd_cwnd = tcp_init_cwnd(tp, dst);
        tp->snd_cwnd_stamp = tcp_time_stamp;
        return;
@@ -941,6 +933,7 @@ reset:
                tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
                inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
        }
+       goto cwnd;
 }
 
 static void tcp_update_reordering(struct sock *sk, const int metric,
@@ -1802,11 +1795,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
        for (i = used_sacks - 1; i > 0; i--) {
                for (j = 0; j < i; j++) {
                        if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
-                               struct tcp_sack_block tmp;
-
-                               tmp = sp[j];
-                               sp[j] = sp[j + 1];
-                               sp[j + 1] = tmp;
+                               swap(sp[j], sp[j + 1]);
 
                                /* Track where the first SACK block goes to */
                                if (j == first_sack_index)
@@ -2836,7 +2825,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
        icsk->icsk_mtup.probe_size = 0;
 }
 
-static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
+static void tcp_mtup_probe_success(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2864,7 +2853,7 @@ void tcp_simple_retransmit(struct sock *sk)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       unsigned int mss = tcp_current_mss(sk, 0);
+       unsigned int mss = tcp_current_mss(sk);
        u32 prior_lost = tp->lost_out;
 
        tcp_for_write_queue(skb, sk) {
@@ -3201,7 +3190,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 
        while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
-               u32 end_seq;
                u32 acked_pcount;
                u8 sacked = scb->sacked;
 
@@ -3216,16 +3204,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                                break;
 
                        fully_acked = 0;
-                       end_seq = tp->snd_una;
                } else {
                        acked_pcount = tcp_skb_pcount(skb);
-                       end_seq = scb->end_seq;
-               }
-
-               /* MTU probing checks */
-               if (fully_acked && icsk->icsk_mtup.probe_size &&
-                   !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) {
-                       tcp_mtup_probe_success(sk, skb);
                }
 
                if (sacked & TCPCB_RETRANS) {
@@ -3290,6 +3270,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                const struct tcp_congestion_ops *ca_ops
                        = inet_csk(sk)->icsk_ca_ops;
 
+               if (unlikely(icsk->icsk_mtup.probe_size &&
+                            !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
+                       tcp_mtup_probe_success(sk);
+               }
+
                tcp_ack_update_rtt(sk, flag, seq_rtt);
                tcp_rearm_rto(sk);
 
@@ -3416,7 +3401,7 @@ static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
 
        if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
                flag |= FLAG_WIN_UPDATE;
-               tcp_update_wl(tp, ack, ack_seq);
+               tcp_update_wl(tp, ack_seq);
 
                if (tp->snd_wnd != nwin) {
                        tp->snd_wnd = nwin;
@@ -3592,15 +3577,18 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
        int prior_packets;
        int frto_cwnd = 0;
 
-       /* If the ack is newer than sent or older than previous acks
+       /* If the ack is older than previous acks
         * then we can probably ignore it.
         */
-       if (after(ack, tp->snd_nxt))
-               goto uninteresting_ack;
-
        if (before(ack, prior_snd_una))
                goto old_ack;
 
+       /* If the ack includes data we haven't sent yet, discard
+        * this segment (RFC793 Section 3.9).
+        */
+       if (after(ack, tp->snd_nxt))
+               goto invalid_ack;
+
        if (after(ack, prior_snd_una))
                flag |= FLAG_SND_UNA_ADVANCED;
 
@@ -3621,7 +3609,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
                 * No more checks are required.
                 * Note, we use the fact that SND.UNA>=SND.WL2.
                 */
-               tcp_update_wl(tp, ack, ack_seq);
+               tcp_update_wl(tp, ack_seq);
                tp->snd_una = ack;
                flag |= FLAG_WIN_UPDATE;
 
@@ -3690,6 +3678,10 @@ no_queue:
                tcp_ack_probe(sk);
        return 1;
 
+invalid_ack:
+       SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+       return -1;
+
 old_ack:
        if (TCP_SKB_CB(skb)->sacked) {
                tcp_sacktag_write_queue(sk, skb, prior_snd_una);
@@ -3697,8 +3689,7 @@ old_ack:
                        tcp_try_keep_open(sk);
        }
 
-uninteresting_ack:
-       SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+       SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
        return 0;
 }
 
@@ -3886,8 +3877,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
                 * Not only, also it occurs for expired timestamps.
                 */
 
-               if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
-                  get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
+               if (tcp_paws_check(&tp->rx_opt, 0))
                        tcp_store_ts_recent(tp);
        }
 }
@@ -3939,9 +3929,9 @@ static inline int tcp_paws_discard(const struct sock *sk,
                                   const struct sk_buff *skb)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
-       return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
-               get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
-               !tcp_disordered_ack(sk, skb));
+
+       return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
+              !tcp_disordered_ack(sk, skb);
 }
 
 /* Check segment sequence number for validity.
@@ -4099,7 +4089,6 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
                tp->rx_opt.dsack = 1;
                tp->duplicate_sack[0].start_seq = seq;
                tp->duplicate_sack[0].end_seq = end_seq;
-               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1;
        }
 }
 
@@ -4154,8 +4143,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
                         * Decrease num_sacks.
                         */
                        tp->rx_opt.num_sacks--;
-                       tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
-                                              tp->rx_opt.dsack;
                        for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
                                sp[i] = sp[i + 1];
                        continue;
@@ -4164,20 +4151,6 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
        }
 }
 
-static inline void tcp_sack_swap(struct tcp_sack_block *sack1,
-                                struct tcp_sack_block *sack2)
-{
-       __u32 tmp;
-
-       tmp = sack1->start_seq;
-       sack1->start_seq = sack2->start_seq;
-       sack2->start_seq = tmp;
-
-       tmp = sack1->end_seq;
-       sack1->end_seq = sack2->end_seq;
-       sack2->end_seq = tmp;
-}
-
 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
 {
        struct tcp_sock *tp = tcp_sk(sk);
@@ -4192,7 +4165,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
                if (tcp_sack_extend(sp, seq, end_seq)) {
                        /* Rotate this_sack to the first one. */
                        for (; this_sack > 0; this_sack--, sp--)
-                               tcp_sack_swap(sp, sp - 1);
+                               swap(*sp, *(sp - 1));
                        if (cur_sacks > 1)
                                tcp_sack_maybe_coalesce(tp);
                        return;
@@ -4218,7 +4191,6 @@ new_sack:
        sp->start_seq = seq;
        sp->end_seq = end_seq;
        tp->rx_opt.num_sacks++;
-       tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 }
 
 /* RCV.NXT advances, some SACKs should be eaten. */
@@ -4232,7 +4204,6 @@ static void tcp_sack_remove(struct tcp_sock *tp)
        /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
        if (skb_queue_empty(&tp->out_of_order_queue)) {
                tp->rx_opt.num_sacks = 0;
-               tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
                return;
        }
 
@@ -4253,11 +4224,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
                this_sack++;
                sp++;
        }
-       if (num_sacks != tp->rx_opt.num_sacks) {
-               tp->rx_opt.num_sacks = num_sacks;
-               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks +
-                                      tp->rx_opt.dsack;
-       }
+       tp->rx_opt.num_sacks = num_sacks;
 }
 
 /* This one checks to see if we can put data from the
@@ -4333,10 +4300,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 
        TCP_ECN_accept_cwr(tp, skb);
 
-       if (tp->rx_opt.dsack) {
-               tp->rx_opt.dsack = 0;
-               tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
-       }
+       tp->rx_opt.dsack = 0;
 
        /*  Queue data for delivery to the user.
         *  Packets in sequence go to the receive queue.
@@ -4455,15 +4419,13 @@ drop:
                /* Initial out of order segment, build 1 SACK. */
                if (tcp_is_sack(tp)) {
                        tp->rx_opt.num_sacks = 1;
-                       tp->rx_opt.dsack     = 0;
-                       tp->rx_opt.eff_sacks = 1;
                        tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
                        tp->selective_acks[0].end_seq =
                                                TCP_SKB_CB(skb)->end_seq;
                }
                __skb_queue_head(&tp->out_of_order_queue, skb);
        } else {
-               struct sk_buff *skb1 = tp->out_of_order_queue.prev;
+               struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
                u32 seq = TCP_SKB_CB(skb)->seq;
                u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
@@ -4480,15 +4442,18 @@ drop:
                }
 
                /* Find place to insert this segment. */
-               do {
+               while (1) {
                        if (!after(TCP_SKB_CB(skb1)->seq, seq))
                                break;
-               } while ((skb1 = skb1->prev) !=
-                        (struct sk_buff *)&tp->out_of_order_queue);
+                       if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
+                               skb1 = NULL;
+                               break;
+                       }
+                       skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
+               }
 
                /* Do skb overlap to previous one? */
-               if (skb1 != (struct sk_buff *)&tp->out_of_order_queue &&
-                   before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+               if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
                        if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                                /* All the bits are present. Drop. */
                                __kfree_skb(skb);
@@ -4500,15 +4465,26 @@ drop:
                                tcp_dsack_set(sk, seq,
                                              TCP_SKB_CB(skb1)->end_seq);
                        } else {
-                               skb1 = skb1->prev;
+                               if (skb_queue_is_first(&tp->out_of_order_queue,
+                                                      skb1))
+                                       skb1 = NULL;
+                               else
+                                       skb1 = skb_queue_prev(
+                                               &tp->out_of_order_queue,
+                                               skb1);
                        }
                }
-               __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
+               if (!skb1)
+                       __skb_queue_head(&tp->out_of_order_queue, skb);
+               else
+                       __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
 
                /* And clean segments covered by new one as whole. */
-               while ((skb1 = skb->next) !=
-                      (struct sk_buff *)&tp->out_of_order_queue &&
-                      after(end_seq, TCP_SKB_CB(skb1)->seq)) {
+               while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
+                       skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
+
+                       if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
+                               break;
                        if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                                tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
                                                 end_seq);
@@ -4529,7 +4505,10 @@ add_sack:
 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
                                        struct sk_buff_head *list)
 {
-       struct sk_buff *next = skb->next;
+       struct sk_buff *next = NULL;
+
+       if (!skb_queue_is_last(list, skb))
+               next = skb_queue_next(list, skb);
 
        __skb_unlink(skb, list);
        __kfree_skb(skb);
@@ -4540,6 +4519,9 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
 
 /* Collapse contiguous sequence of skbs head..tail with
  * sequence numbers start..end.
+ *
+ * If tail is NULL, this means until the end of the list.
+ *
  * Segments with FIN/SYN are not collapsed (only because this
  * simplifies code)
  */
@@ -4548,15 +4530,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
             struct sk_buff *head, struct sk_buff *tail,
             u32 start, u32 end)
 {
-       struct sk_buff *skb;
+       struct sk_buff *skb, *n;
+       bool end_of_skbs;
 
        /* First, check that queue is collapsible and find
         * the point where collapsing can be useful. */
-       for (skb = head; skb != tail;) {
+       skb = head;
+restart:
+       end_of_skbs = true;
+       skb_queue_walk_from_safe(list, skb, n) {
+               if (skb == tail)
+                       break;
                /* No new bits? It is possible on ofo queue. */
                if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                        skb = tcp_collapse_one(sk, skb, list);
-                       continue;
+                       if (!skb)
+                               break;
+                       goto restart;
                }
 
                /* The first skb to collapse is:
@@ -4566,16 +4556,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                 */
                if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
                    (tcp_win_from_space(skb->truesize) > skb->len ||
-                    before(TCP_SKB_CB(skb)->seq, start) ||
-                    (skb->next != tail &&
-                     TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
+                    before(TCP_SKB_CB(skb)->seq, start))) {
+                       end_of_skbs = false;
                        break;
+               }
+
+               if (!skb_queue_is_last(list, skb)) {
+                       struct sk_buff *next = skb_queue_next(list, skb);
+                       if (next != tail &&
+                           TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
+                               end_of_skbs = false;
+                               break;
+                       }
+               }
 
                /* Decided to skip this, advance start seq. */
                start = TCP_SKB_CB(skb)->end_seq;
-               skb = skb->next;
        }
-       if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
+       if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
                return;
 
        while (before(start, end)) {
@@ -4620,7 +4618,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                        }
                        if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                                skb = tcp_collapse_one(sk, skb, list);
-                               if (skb == tail ||
+                               if (!skb ||
+                                   skb == tail ||
                                    tcp_hdr(skb)->syn ||
                                    tcp_hdr(skb)->fin)
                                        return;
@@ -4647,17 +4646,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        head = skb;
 
        for (;;) {
-               skb = skb->next;
+               struct sk_buff *next = NULL;
+
+               if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
+                       next = skb_queue_next(&tp->out_of_order_queue, skb);
+               skb = next;
 
                /* Segment is terminated when we see gap or when
                 * we are at the end of all the queue. */
-               if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
+               if (!skb ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
                        tcp_collapse(sk, &tp->out_of_order_queue,
                                     head, skb, start, end);
                        head = skb;
-                       if (skb == (struct sk_buff *)&tp->out_of_order_queue)
+                       if (!skb)
                                break;
                        /* Start new segment */
                        start = TCP_SKB_CB(skb)->seq;
@@ -4718,10 +4721,11 @@ static int tcp_prune_queue(struct sock *sk)
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
-       tcp_collapse(sk, &sk->sk_receive_queue,
-                    sk->sk_receive_queue.next,
-                    (struct sk_buff *)&sk->sk_receive_queue,
-                    tp->copied_seq, tp->rcv_nxt);
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               tcp_collapse(sk, &sk->sk_receive_queue,
+                            skb_peek(&sk->sk_receive_queue),
+                            NULL,
+                            tp->copied_seq, tp->rcv_nxt);
        sk_mem_reclaim(sk);
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
@@ -5177,7 +5181,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
         */
 
        if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
-           TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
+           TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
+           !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
                int tcp_header_len = tp->tcp_header_len;
 
                /* Timestamp header prediction: tcp_header_len
@@ -5330,8 +5335,8 @@ slow_path:
                return -res;
 
 step5:
-       if (th->ack)
-               tcp_ack(sk, skb, FLAG_SLOWPATH);
+       if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+               goto discard;
 
        tcp_rcv_rtt_measure_ts(sk, skb);
 
@@ -5429,7 +5434,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * never scaled.
                 */
                tp->snd_wnd = ntohs(th->window);
-               tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
+               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                if (!tp->rx_opt.wscale_ok) {
                        tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
@@ -5530,7 +5535,7 @@ discard:
 
        /* PAWS check. */
        if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
-           tcp_paws_check(&tp->rx_opt, 0))
+           tcp_paws_reject(&tp->rx_opt, 0))
                goto discard_and_undo;
 
        if (th->syn) {
@@ -5668,7 +5673,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
        /* step 5: check the ACK field */
        if (th->ack) {
-               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
+               int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
 
                switch (sk->sk_state) {
                case TCP_SYN_RECV:
@@ -5690,8 +5695,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
                                tp->snd_wnd = ntohs(th->window) <<
                                              tp->rx_opt.snd_wscale;
-                               tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq,
-                                           TCP_SKB_CB(skb)->seq);
+                               tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
                                /* tcp_ack considers this ACK as duplicate
                                 * and does not calculate rtt.