[INET]: Consolidate xxx_frag_create()
[safe/jmp/linux-2.6] / net / ipv4 / tcp_output.c
index 29c53fb..324b420 100644 (file)
@@ -40,7 +40,6 @@
 
 #include <linux/compiler.h>
 #include <linux/module.h>
-#include <linux/smp_lock.h>
 
 /* People can turn this off for buggy TCP's found in printers etc. */
 int sysctl_tcp_retrans_collapse __read_mostly = 1;
@@ -62,12 +61,25 @@ int sysctl_tcp_base_mss __read_mostly = 512;
 /* By default, RFC2861 behavior.  */
 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
 
-static void update_send_head(struct sock *sk, struct tcp_sock *tp,
-                            struct sk_buff *skb)
+static inline void tcp_packets_out_inc(struct sock *sk,
+                                      const struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+       int orig = tp->packets_out;
+
+       tp->packets_out += tcp_skb_pcount(skb);
+       if (!orig)
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                                         inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+}
+
+static void update_send_head(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
        tcp_advance_send_head(sk, skb);
        tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
-       tcp_packets_out_inc(sk, tp, skb);
+       tcp_packets_out_inc(sk, skb);
 }
 
 /* SND.NXT, if window was not shrunk.
@@ -76,8 +88,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp,
  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  * invalid. OK, let's make this for now:
  */
-static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
+static inline __u32 tcp_acceptable_seq(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
+
        if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
                return tp->snd_nxt;
        else
@@ -267,6 +281,56 @@ static u16 tcp_select_window(struct sock *sk)
        return new_win;
 }
 
+static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
+                                      struct sk_buff *skb)
+{
+       TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
+       if (!(tp->ecn_flags&TCP_ECN_OK))
+               TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
+}
+
+static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tp->ecn_flags = 0;
+       if (sysctl_tcp_ecn) {
+               TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
+               tp->ecn_flags = TCP_ECN_OK;
+       }
+}
+
+static __inline__ void
+TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
+{
+       if (inet_rsk(req)->ecn_ok)
+               th->ece = 1;
+}
+
+static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
+                               int tcp_header_len)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (tp->ecn_flags & TCP_ECN_OK) {
+               /* Not-retransmitted data segment: set ECT and inject CWR. */
+               if (skb->len != tcp_header_len &&
+                   !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
+                       INET_ECN_xmit(sk);
+                       if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
+                               tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
+                               tcp_hdr(skb)->cwr = 1;
+                               skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+                       }
+               } else {
+                       /* ACK or retransmitted segment: clear ECT|CE */
+                       INET_ECN_dontxmit(sk);
+               }
+               if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
+                       tcp_hdr(skb)->ece = 1;
+       }
+}
+
 static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
                                         __u32 tstamp, __u8 **md5_hash)
 {
@@ -406,7 +470,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        /* If congestion control is doing timestamping, we must
         * take such a timestamp before we potentially clone/copy.
         */
-       if (icsk->icsk_ca_ops->rtt_sample)
+       if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
                __net_timestamp(skb);
 
        if (likely(clone_it)) {
@@ -516,7 +580,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                                             md5 ? &md5_hash_location :
 #endif
                                             NULL);
-               TCP_ECN_send(sk, tp, skb, tcp_header_size);
+               TCP_ECN_send(sk, skb, tcp_header_size);
        }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -582,16 +646,32 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
                skb_shinfo(skb)->gso_size = 0;
                skb_shinfo(skb)->gso_type = 0;
        } else {
-               unsigned int factor;
-
-               factor = skb->len + (mss_now - 1);
-               factor /= mss_now;
-               skb_shinfo(skb)->gso_segs = factor;
+               skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
                skb_shinfo(skb)->gso_size = mss_now;
                skb_shinfo(skb)->gso_type = sk->sk_gso_type;
        }
 }
 
+/* When a modification to fackets out becomes necessary, we need to check
+ * skb is counted to fackets_out or not. Another important thing is to
+ * tweak SACK fastpath hint too as it would overwrite all changes unless
+ * hint is also changed.
+ */
+static void tcp_adjust_fackets_out(struct tcp_sock *tp, struct sk_buff *skb,
+                                  int decr)
+{
+       if (!tp->sacked_out || tcp_is_reno(tp))
+               return;
+
+       if (!before(tp->highest_sack, TCP_SKB_CB(skb)->seq))
+               tp->fackets_out -= decr;
+
+       /* cnt_hint is "off-by-one" compared with fackets_out (see sacktag) */
+       if (tp->fastpath_skb_hint != NULL &&
+           after(TCP_SKB_CB(tp->fastpath_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+               tp->fastpath_cnt_hint -= decr;
+}
+
 /* Function to create two new TCP segments.  Shrinks the given segment
  * to the specified size and appends a new segment with the rest of the
  * packet to the list.  This won't be called frequently, I hope.
@@ -607,7 +687,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
 
        BUG_ON(len > skb->len);
 
-       clear_all_retrans_hints(tp);
+       tcp_clear_retrans_hints_partial(tp);
        nsize = skb_headlen(skb) - len;
        if (nsize < 0)
                nsize = 0;
@@ -632,6 +712,10 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
        TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
        TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
 
+       if (tcp_is_sack(tp) && tp->sacked_out &&
+           (TCP_SKB_CB(skb)->seq == tp->highest_sack))
+               tp->highest_sack = TCP_SKB_CB(buff)->seq;
+
        /* PSH and FIN should only be set in the second packet. */
        flags = TCP_SKB_CB(skb)->flags;
        TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
@@ -680,24 +764,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
                if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
                        tp->retrans_out -= diff;
 
-               if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
+               if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
                        tp->lost_out -= diff;
-                       tp->left_out -= diff;
-               }
-
-               if (diff > 0) {
-                       /* Adjust Reno SACK estimate. */
-                       if (!tp->rx_opt.sack_ok) {
-                               tp->sacked_out -= diff;
-                               if ((int)tp->sacked_out < 0)
-                                       tp->sacked_out = 0;
-                               tcp_sync_left_out(tp);
-                       }
 
-                       tp->fackets_out -= diff;
-                       if ((int)tp->fackets_out < 0)
-                               tp->fackets_out = 0;
+               /* Adjust Reno SACK estimate. */
+               if (tcp_is_reno(tp) && diff > 0) {
+                       tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
+                       tcp_verify_left_out(tp);
                }
+               tcp_adjust_fackets_out(tp, skb, diff);
        }
 
        /* Link BUFF into the send queue. */
@@ -733,7 +808,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
        }
        skb_shinfo(skb)->nr_frags = k;
 
-       skb->tail = skb->data;
+       skb_reset_tail_pointer(skb);
        skb->data_len -= len;
        skb->len = skb->data_len;
 }
@@ -927,8 +1002,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
 
 /* Congestion window validation. (RFC2861) */
 
-static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
+static void tcp_cwnd_validate(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        __u32 packets_out = tp->packets_out;
 
        if (packets_out >= tp->snd_cwnd) {
@@ -1031,8 +1107,10 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
        if (nonagle & TCP_NAGLE_PUSH)
                return 1;
 
-       /* Don't use the nagle rule for urgent data (or for the final FIN).  */
-       if (tp->urg_mode ||
+       /* Don't use the nagle rule for urgent data (or for the final FIN).
+        * Nagle can be ignored during F-RTO too (see RFC4138).
+        */
+       if (tp->urg_mode || (tp->frto_counter == 2) ||
            (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
                return 1;
 
@@ -1076,8 +1154,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
        return cwnd_quota;
 }
 
-int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
+int tcp_may_send_now(struct sock *sk)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb = tcp_send_head(sk);
 
        return (skb &&
@@ -1144,8 +1223,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
  *
  * This algorithm is from John Heffner.
  */
-static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
+static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 send_win, cong_win, limit, in_flight;
 
@@ -1324,7 +1404,7 @@ static int tcp_mtu_probe(struct sock *sk)
                /* Decrement cwnd here because we are sending
                * effectively two packets. */
                tp->snd_cwnd--;
-               update_send_head(sk, tp, nskb);
+               update_send_head(sk, nskb);
 
                icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
                tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
@@ -1387,7 +1467,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
                                                      nonagle : TCP_NAGLE_PUSH))))
                                break;
                } else {
-                       if (tcp_tso_should_defer(sk, tp, skb))
+                       if (tcp_tso_should_defer(sk, skb))
                                break;
                }
 
@@ -1416,14 +1496,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
                /* Advance the send_head.  This one is sent out.
                 * This call will increment packets_out.
                 */
-               update_send_head(sk, tp, skb);
+               update_send_head(sk, skb);
 
                tcp_minshall_update(tp, mss_now, skb);
                sent_pkts++;
        }
 
        if (likely(sent_pkts)) {
-               tcp_cwnd_validate(sk, tp);
+               tcp_cwnd_validate(sk);
                return 0;
        }
        return !tp->packets_out && tcp_send_head(sk);
@@ -1433,14 +1513,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
  * TCP_CORK or attempt at coalescing tiny packets.
  * The socket must be locked by the caller.
  */
-void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
-                              unsigned int cur_mss, int nonagle)
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+                              int nonagle)
 {
        struct sk_buff *skb = tcp_send_head(sk);
 
        if (skb) {
                if (tcp_write_xmit(sk, cur_mss, nonagle))
-                       tcp_check_probe_timer(sk, tp);
+                       tcp_check_probe_timer(sk);
        }
 }
 
@@ -1484,8 +1564,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
                TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
                if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
-                       update_send_head(sk, tp, skb);
-                       tcp_cwnd_validate(sk, tp);
+                       update_send_head(sk, skb);
+                       tcp_cwnd_validate(sk);
                        return;
                }
        }
@@ -1600,7 +1680,7 @@ u32 __tcp_select_window(struct sock *sk)
                if (window <= free_space - mss || window > free_space)
                        window = (free_space/mss)*mss;
                else if (mss == full_space &&
-                        free_space > window + full_space/2)
+                        free_space > window + full_space/2)
                        window = free_space;
        }
 
@@ -1639,13 +1719,16 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
                BUG_ON(tcp_skb_pcount(skb) != 1 ||
                       tcp_skb_pcount(next_skb) != 1);
 
-               /* changing transmit queue under us so clear hints */
-               clear_all_retrans_hints(tp);
+               if (WARN_ON(tcp_is_sack(tp) && tp->sacked_out &&
+                   (TCP_SKB_CB(next_skb)->seq == tp->highest_sack)))
+                       return;
 
                /* Ok.  We will be able to collapse the packet. */
                tcp_unlink_write_queue(next_skb, sk);
 
-               memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
+               skb_copy_from_linear_data(next_skb,
+                                         skb_put(skb, next_skb_size),
+                                         next_skb_size);
 
                if (next_skb->ip_summed == CHECKSUM_PARTIAL)
                        skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1666,21 +1749,23 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
                TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
                if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
                        tp->retrans_out -= tcp_skb_pcount(next_skb);
-               if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
+               if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST)
                        tp->lost_out -= tcp_skb_pcount(next_skb);
-                       tp->left_out -= tcp_skb_pcount(next_skb);
-               }
                /* Reno case is special. Sigh... */
-               if (!tp->rx_opt.sack_ok && tp->sacked_out) {
+               if (tcp_is_reno(tp) && tp->sacked_out)
                        tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
-                       tp->left_out -= tcp_skb_pcount(next_skb);
+
+               tcp_adjust_fackets_out(tp, next_skb, tcp_skb_pcount(next_skb));
+               tp->packets_out -= tcp_skb_pcount(next_skb);
+
+               /* changed transmit queue under us so clear hints */
+               tcp_clear_retrans_hints_partial(tp);
+               /* manually tune sacktag skb hint */
+               if (tp->fastpath_skb_hint == next_skb) {
+                       tp->fastpath_skb_hint = skb;
+                       tp->fastpath_cnt_hint -= tcp_skb_pcount(skb);
                }
 
-               /* Not quite right: it can be > snd.fack, but
-                * it is better to underestimate fackets.
-                */
-               tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
-               tcp_packets_out_dec(tp, next_skb);
                sk_stream_free_skb(sk, next_skb);
        }
 }
@@ -1714,12 +1799,12 @@ void tcp_simple_retransmit(struct sock *sk)
                }
        }
 
-       clear_all_retrans_hints(tp);
+       tcp_clear_all_retrans_hints(tp);
 
        if (!lost)
                return;
 
-       tcp_sync_left_out(tp);
+       tcp_verify_left_out(tp);
 
        /* Don't muck with the congestion window here.
         * Reason is that we do not increase amount of _data_
@@ -1829,6 +1914,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                                printk(KERN_DEBUG "retrans_out leaked.\n");
                }
 #endif
+               if (!tp->retrans_out)
+                       tp->lost_retrans_low = tp->snd_nxt;
                TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
                tp->retrans_out += tcp_skb_pcount(skb);
 
@@ -1921,40 +2008,35 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                return;
 
        /* No forward retransmissions in Reno are possible. */
-       if (!tp->rx_opt.sack_ok)
+       if (tcp_is_reno(tp))
                return;
 
        /* Yeah, we have to make difficult choice between forward transmission
         * and retransmission... Both ways have their merits...
         *
         * For now we do not retransmit anything, while we have some new
-        * segments to send.
+        * segments to send. In the other cases, follow rule 3 for
+        * NextSeg() specified in RFC3517.
         */
 
-       if (tcp_may_send_now(sk, tp))
+       if (tcp_may_send_now(sk))
+               return;
+
+       /* If nothing is SACKed, highest_sack in the loop won't be valid */
+       if (!tp->sacked_out)
                return;
 
-       if (tp->forward_skb_hint) {
+       if (tp->forward_skb_hint)
                skb = tp->forward_skb_hint;
-               packet_cnt = tp->forward_cnt_hint;
-       } else{
+       else
                skb = tcp_write_queue_head(sk);
-               packet_cnt = 0;
-       }
 
        tcp_for_write_queue_from(skb, sk) {
                if (skb == tcp_send_head(sk))
                        break;
-               tp->forward_cnt_hint = packet_cnt;
                tp->forward_skb_hint = skb;
 
-               /* Similar to the retransmit loop above we
-                * can pretend that the retransmitted SKB
-                * we send out here will be composed of one
-                * real MSS sized packet because tcp_retransmit_skb()
-                * will fragment it if necessary.
-                */
-               if (++packet_cnt > tp->fackets_out)
+               if (after(TCP_SKB_CB(skb)->seq, tp->highest_sack))
                        break;
 
                if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
@@ -2021,17 +2103,16 @@ void tcp_send_fin(struct sock *sk)
                TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
                tcp_queue_skb(sk, skb);
        }
-       __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
+       __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
 }
 
 /* We get here when a process closes a file descriptor (either due to
  * an explicit close() or as a byproduct of exit()'ing) and there
  * was unread data in the receive queue.  This behavior is recommended
- * by draft-ietf-tcpimpl-prob-03.txt section 3.10.  -DaveM
+ * by RFC 2525, section 2.17.  -DaveM
  */
 void tcp_send_active_reset(struct sock *sk, gfp_t priority)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
 
        /* NOTE: No TCP options attached and we never retransmit this. */
@@ -2051,7 +2132,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
        skb_shinfo(skb)->gso_type = 0;
 
        /* Send it off. */
-       TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
+       TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
        TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
        if (tcp_transmit_skb(sk, skb, 0, priority))
@@ -2269,7 +2350,7 @@ int tcp_connect(struct sock *sk)
        skb_reserve(buff, MAX_TCP_HEADER);
 
        TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
-       TCP_ECN_send_syn(sk, tp, buff);
+       TCP_ECN_send_syn(sk, buff);
        TCP_SKB_CB(buff)->sacked = 0;
        skb_shinfo(buff)->gso_segs = 1;
        skb_shinfo(buff)->gso_size = 0;
@@ -2361,7 +2442,6 @@ void tcp_send_ack(struct sock *sk)
 {
        /* If we have been reset, we may not send again. */
        if (sk->sk_state != TCP_CLOSE) {
-               struct tcp_sock *tp = tcp_sk(sk);
                struct sk_buff *buff;
 
                /* We are not putting this on the write queue, so
@@ -2387,7 +2467,7 @@ void tcp_send_ack(struct sock *sk)
                skb_shinfo(buff)->gso_type = 0;
 
                /* Send it off, this clears delayed acks for us. */
-               TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
+               TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
                TCP_SKB_CB(buff)->when = tcp_time_stamp;
                tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
        }
@@ -2465,7 +2545,7 @@ int tcp_write_wakeup(struct sock *sk)
                        TCP_SKB_CB(skb)->when = tcp_time_stamp;
                        err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
                        if (!err) {
-                               update_send_head(sk, tp, skb);
+                               update_send_head(sk, skb);
                        }
                        return err;
                } else {