Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / net / ipv4 / tcp_input.c
index 56dcf97..3fddc69 100644 (file)
@@ -140,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
                 * "len" is invariant segment length, including TCP header.
                 */
                len += skb->data - skb_transport_header(skb);
-               if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
+               if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
                    /* If PSH is not set, packet should be
                     * full sized, provided peer TCP is not badly broken.
                     * This observation (if it is correct 8)) allows
@@ -411,7 +411,7 @@ void tcp_initialize_rcv_mss(struct sock *sk)
        unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
 
        hint = min(hint, tp->rcv_wnd / 2);
-       hint = min(hint, TCP_MIN_RCVMSS);
+       hint = min(hint, TCP_MSS_DEFAULT);
        hint = max(hint, TCP_MIN_MSS);
 
        inet_csk(sk)->icsk_ack.rcv_mss = hint;
@@ -597,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
                tcp_grow_window(sk, skb);
 }
 
-static u32 tcp_rto_min(struct sock *sk)
-{
-       struct dst_entry *dst = __sk_dst_get(sk);
-       u32 rto_min = TCP_RTO_MIN;
-
-       if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
-               rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
-       return rto_min;
-}
-
 /* Called to compute a smoothed rtt estimate. The data fed to this
  * routine either comes from timestamps, or from segments that were
  * known _not_ to have been retransmitted [see Karn/Partridge
@@ -695,7 +685,7 @@ static inline void tcp_set_rto(struct sock *sk)
         *    is invisible. Actually, Linux-2.4 also generates erratic
         *    ACKs in some circumstances.
         */
-       inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
+       inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
 
        /* 2. Fixups made earlier cannot be right.
         *    If we do not estimate RTO correctly without them,
@@ -706,8 +696,7 @@ static inline void tcp_set_rto(struct sock *sk)
        /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
         * guarantees that rto is higher.
         */
-       if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
-               inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+       tcp_bound_rto(sk);
 }
 
 /* Save metrics learned by this TCP session.
@@ -772,7 +761,7 @@ void tcp_update_metrics(struct sock *sk)
                        set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
                }
 
-               if (tp->snd_ssthresh >= 0xFFFF) {
+               if (tcp_in_initial_slowstart(tp)) {
                        /* Slow start still did not finish. */
                        if (dst_metric(dst, RTAX_SSTHRESH) &&
                            !dst_metric_locked(dst, RTAX_SSTHRESH) &&
@@ -2311,7 +2300,7 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
  * they differ. Since neither occurs due to loss, TCP should really
  * ignore them.
  */
-static inline int tcp_dupack_heurestics(struct tcp_sock *tp)
+static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
 {
        return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
 }
@@ -2436,7 +2425,7 @@ static int tcp_time_to_recover(struct sock *sk)
                return 1;
 
        /* Not-A-Trick#2 : Classic rule... */
-       if (tcp_dupack_heurestics(tp) > tp->reordering)
+       if (tcp_dupack_heuristics(tp) > tp->reordering)
                return 1;
 
        /* Trick#3 : when we use RFC2988 timer restart, fast
@@ -2728,6 +2717,35 @@ static void tcp_try_undo_dsack(struct sock *sk)
        }
 }
 
+/* We can clear retrans_stamp when there are no retransmissions in the
+ * window. It would seem that it is trivially available for us in
+ * tp->retrans_out, however, that kind of assumptions doesn't consider
+ * what will happen if errors occur when sending retransmission for the
+ * second time. ...It could the that such segment has only
+ * TCPCB_EVER_RETRANS set at the present time. It seems that checking
+ * the head skb is enough except for some reneging corner cases that
+ * are not worth the effort.
+ *
+ * Main reason for all this complexity is the fact that connection dying
+ * time now depends on the validity of the retrans_stamp, in particular,
+ * that successive retransmissions of a segment must not advance
+ * retrans_stamp under any conditions.
+ */
+static int tcp_any_retrans_done(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *skb;
+
+       if (tp->retrans_out)
+               return 1;
+
+       skb = tcp_write_queue_head(sk);
+       if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
+               return 1;
+
+       return 0;
+}
+
 /* Undo during fast recovery after partial ACK. */
 
 static int tcp_try_undo_partial(struct sock *sk, int acked)
@@ -2740,7 +2758,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
                /* Plain luck! Hole if filled with delayed
                 * packet, rather than with a retransmit.
                 */
-               if (tp->retrans_out == 0)
+               if (!tcp_any_retrans_done(sk))
                        tp->retrans_stamp = 0;
 
                tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
@@ -2799,7 +2817,7 @@ static void tcp_try_keep_open(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        int state = TCP_CA_Open;
 
-       if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
+       if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
                state = TCP_CA_Disorder;
 
        if (inet_csk(sk)->icsk_ca_state != state) {
@@ -2814,7 +2832,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
 
        tcp_verify_left_out(tp);
 
-       if (!tp->frto_counter && tp->retrans_out == 0)
+       if (!tp->frto_counter && !tcp_any_retrans_done(sk))
                tp->retrans_stamp = 0;
 
        if (flag & FLAG_ECE)
@@ -3709,7 +3727,7 @@ old_ack:
  * the fast version below fails.
  */
 void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
-                      int estab)
+                      u8 **hvpp, int estab)
 {
        unsigned char *ptr;
        struct tcphdr *th = tcp_hdr(skb);
@@ -3793,7 +3811,30 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                 */
                                break;
 #endif
-                       }
+                       case TCPOPT_COOKIE:
+                               /* This option is variable length.
+                                */
+                               switch (opsize) {
+                               case TCPOLEN_COOKIE_BASE:
+                                       /* not yet implemented */
+                                       break;
+                               case TCPOLEN_COOKIE_PAIR:
+                                       /* not yet implemented */
+                                       break;
+                               case TCPOLEN_COOKIE_MIN+0:
+                               case TCPOLEN_COOKIE_MIN+2:
+                               case TCPOLEN_COOKIE_MIN+4:
+                               case TCPOLEN_COOKIE_MIN+6:
+                               case TCPOLEN_COOKIE_MAX:
+                                       /* 16-bit multiple */
+                                       opt_rx->cookie_plus = opsize;
+                                       *hvpp = ptr;
+                               default:
+                                       /* ignore option */
+                                       break;
+                               };
+                               break;
+                       };
 
                        ptr += opsize-2;
                        length -= opsize;
@@ -3821,17 +3862,20 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
  * If it is wrong it falls back on tcp_parse_options().
  */
 static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
-                                 struct tcp_sock *tp)
+                                 struct tcp_sock *tp, u8 **hvpp)
 {
-       if (th->doff == sizeof(struct tcphdr) >> 2) {
+       /* In the spirit of fast parsing, compare doff directly to constant
+        * values.  Because equality is used, short doff can be ignored here.
+        */
+       if (th->doff == (sizeof(*th) / 4)) {
                tp->rx_opt.saw_tstamp = 0;
                return 0;
        } else if (tp->rx_opt.tstamp_ok &&
-                  th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
+                  th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
                if (tcp_parse_aligned_timestamp(tp, th))
                        return 1;
        }
-       tcp_parse_options(skb, &tp->rx_opt, 1);
+       tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
        return 1;
 }
 
@@ -4436,7 +4480,7 @@ drop:
                }
                __skb_queue_head(&tp->out_of_order_queue, skb);
        } else {
-               struct sk_buff *skb1 = tp->out_of_order_queue.prev;
+               struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
                u32 seq = TCP_SKB_CB(skb)->seq;
                u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
@@ -4453,15 +4497,18 @@ drop:
                }
 
                /* Find place to insert this segment. */
-               do {
+               while (1) {
                        if (!after(TCP_SKB_CB(skb1)->seq, seq))
                                break;
-               } while ((skb1 = skb1->prev) !=
-                        (struct sk_buff *)&tp->out_of_order_queue);
+                       if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
+                               skb1 = NULL;
+                               break;
+                       }
+                       skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
+               }
 
                /* Do skb overlap to previous one? */
-               if (skb1 != (struct sk_buff *)&tp->out_of_order_queue &&
-                   before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+               if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
                        if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                                /* All the bits are present. Drop. */
                                __kfree_skb(skb);
@@ -4473,15 +4520,26 @@ drop:
                                tcp_dsack_set(sk, seq,
                                              TCP_SKB_CB(skb1)->end_seq);
                        } else {
-                               skb1 = skb1->prev;
+                               if (skb_queue_is_first(&tp->out_of_order_queue,
+                                                      skb1))
+                                       skb1 = NULL;
+                               else
+                                       skb1 = skb_queue_prev(
+                                               &tp->out_of_order_queue,
+                                               skb1);
                        }
                }
-               __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
+               if (!skb1)
+                       __skb_queue_head(&tp->out_of_order_queue, skb);
+               else
+                       __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
 
                /* And clean segments covered by new one as whole. */
-               while ((skb1 = skb->next) !=
-                      (struct sk_buff *)&tp->out_of_order_queue &&
-                      after(end_seq, TCP_SKB_CB(skb1)->seq)) {
+               while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
+                       skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
+
+                       if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
+                               break;
                        if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                                tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
                                                 end_seq);
@@ -4502,7 +4560,10 @@ add_sack:
 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
                                        struct sk_buff_head *list)
 {
-       struct sk_buff *next = skb->next;
+       struct sk_buff *next = NULL;
+
+       if (!skb_queue_is_last(list, skb))
+               next = skb_queue_next(list, skb);
 
        __skb_unlink(skb, list);
        __kfree_skb(skb);
@@ -4513,6 +4574,9 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
 
 /* Collapse contiguous sequence of skbs head..tail with
  * sequence numbers start..end.
+ *
+ * If tail is NULL, this means until the end of the list.
+ *
  * Segments with FIN/SYN are not collapsed (only because this
  * simplifies code)
  */
@@ -4521,15 +4585,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
             struct sk_buff *head, struct sk_buff *tail,
             u32 start, u32 end)
 {
-       struct sk_buff *skb;
+       struct sk_buff *skb, *n;
+       bool end_of_skbs;
 
        /* First, check that queue is collapsible and find
         * the point where collapsing can be useful. */
-       for (skb = head; skb != tail;) {
+       skb = head;
+restart:
+       end_of_skbs = true;
+       skb_queue_walk_from_safe(list, skb, n) {
+               if (skb == tail)
+                       break;
                /* No new bits? It is possible on ofo queue. */
                if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                        skb = tcp_collapse_one(sk, skb, list);
-                       continue;
+                       if (!skb)
+                               break;
+                       goto restart;
                }
 
                /* The first skb to collapse is:
@@ -4539,16 +4611,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                 */
                if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
                    (tcp_win_from_space(skb->truesize) > skb->len ||
-                    before(TCP_SKB_CB(skb)->seq, start) ||
-                    (skb->next != tail &&
-                     TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
+                    before(TCP_SKB_CB(skb)->seq, start))) {
+                       end_of_skbs = false;
                        break;
+               }
+
+               if (!skb_queue_is_last(list, skb)) {
+                       struct sk_buff *next = skb_queue_next(list, skb);
+                       if (next != tail &&
+                           TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
+                               end_of_skbs = false;
+                               break;
+                       }
+               }
 
                /* Decided to skip this, advance start seq. */
                start = TCP_SKB_CB(skb)->end_seq;
-               skb = skb->next;
        }
-       if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
+       if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
                return;
 
        while (before(start, end)) {
@@ -4593,7 +4673,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                        }
                        if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                                skb = tcp_collapse_one(sk, skb, list);
-                               if (skb == tail ||
+                               if (!skb ||
+                                   skb == tail ||
                                    tcp_hdr(skb)->syn ||
                                    tcp_hdr(skb)->fin)
                                        return;
@@ -4620,17 +4701,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        head = skb;
 
        for (;;) {
-               skb = skb->next;
+               struct sk_buff *next = NULL;
+
+               if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
+                       next = skb_queue_next(&tp->out_of_order_queue, skb);
+               skb = next;
 
                /* Segment is terminated when we see gap or when
                 * we are at the end of all the queue. */
-               if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
+               if (!skb ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
                        tcp_collapse(sk, &tp->out_of_order_queue,
                                     head, skb, start, end);
                        head = skb;
-                       if (skb == (struct sk_buff *)&tp->out_of_order_queue)
+                       if (!skb)
                                break;
                        /* Start new segment */
                        start = TCP_SKB_CB(skb)->seq;
@@ -4691,10 +4776,11 @@ static int tcp_prune_queue(struct sock *sk)
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
-       tcp_collapse(sk, &sk->sk_receive_queue,
-                    sk->sk_receive_queue.next,
-                    (struct sk_buff *)&sk->sk_receive_queue,
-                    tp->copied_seq, tp->rcv_nxt);
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               tcp_collapse(sk, &sk->sk_receive_queue,
+                            skb_peek(&sk->sk_receive_queue),
+                            NULL,
+                            tp->copied_seq, tp->rcv_nxt);
        sk_mem_reclaim(sk);
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
@@ -4814,11 +4900,11 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
        struct tcp_sock *tp = tcp_sk(sk);
 
            /* More than one full frame received... */
-       if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
+       if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
             /* ... and right edge of window advances far enough.
              * (tcp_recvmsg() will send ACK otherwise). Or...
              */
-            && __tcp_select_window(sk) >= tp->rcv_wnd) ||
+            __tcp_select_window(sk) >= tp->rcv_wnd) ||
            /* We ACK each frame or... */
            tcp_in_quickack_mode(sk) ||
            /* We have out of order data. */
@@ -5039,10 +5125,12 @@ out:
 static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
                              struct tcphdr *th, int syn_inerr)
 {
+       u8 *hash_location;
        struct tcp_sock *tp = tcp_sk(sk);
 
        /* RFC1323: H1. Apply PAWS check first. */
-       if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
+       if (tcp_fast_parse_options(skb, th, tp, &hash_location) &&
+           tp->rx_opt.saw_tstamp &&
            tcp_paws_discard(sk, skb)) {
                if (!th->rst) {
                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
@@ -5330,11 +5418,13 @@ discard:
 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                                         struct tcphdr *th, unsigned len)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       u8 *hash_location;
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct tcp_cookie_values *cvp = tp->cookie_values;
        int saved_clamp = tp->rx_opt.mss_clamp;
 
-       tcp_parse_options(skb, &tp->rx_opt, 0);
+       tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
 
        if (th->ack) {
                /* rfc793:
@@ -5431,6 +5521,31 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
                tp->copied_seq = tp->rcv_nxt;
+
+               if (cvp != NULL &&
+                   cvp->cookie_pair_size > 0 &&
+                   tp->rx_opt.cookie_plus > 0) {
+                       int cookie_size = tp->rx_opt.cookie_plus
+                                       - TCPOLEN_COOKIE_BASE;
+                       int cookie_pair_size = cookie_size
+                                            + cvp->cookie_desired;
+
+                       /* A cookie extension option was sent and returned.
+                        * Note that each incoming SYNACK replaces the
+                        * Responder cookie.  The initial exchange is most
+                        * fragile, as protection against spoofing relies
+                        * entirely upon the sequence and timestamp (above).
+                        * This replacement strategy allows the correct pair to
+                        * pass through, while any others will be filtered via
+                        * Responder verification later.
+                        */
+                       if (sizeof(cvp->cookie_pair) >= cookie_pair_size) {
+                               memcpy(&cvp->cookie_pair[cvp->cookie_desired],
+                                      hash_location, cookie_size);
+                               cvp->cookie_pair_size = cookie_pair_size;
+                       }
+               }
+
                smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
 
@@ -5668,11 +5783,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
                                /* tcp_ack considers this ACK as duplicate
                                 * and does not calculate rtt.
-                                * Fix it at least with timestamps.
+                                * Force it here.
                                 */
-                               if (tp->rx_opt.saw_tstamp &&
-                                   tp->rx_opt.rcv_tsecr && !tp->srtt)
-                                       tcp_ack_saw_tstamp(sk, 0);
+                               tcp_ack_update_rtt(sk, 0, 0);
 
                                if (tp->rx_opt.tstamp_ok)
                                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;