gre: fix netns vs proto registration ordering
[safe/jmp/linux-2.6] / net / ipv4 / tcp_minisocks.c
index 463d51b..f206ee5 100644 (file)
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 
-#ifdef CONFIG_SYSCTL
-#define SYNC_INIT 0 /* let the user enable it */
-#else
-#define SYNC_INIT 1
-#endif
-
-int sysctl_tcp_syncookies __read_mostly = SYNC_INIT;
+int sysctl_tcp_syncookies __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_syncookies);
 
 int sysctl_tcp_abort_on_overflow __read_mostly;
@@ -96,13 +90,14 @@ enum tcp_tw_status
 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                           const struct tcphdr *th)
 {
-       struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
        struct tcp_options_received tmp_opt;
+       u8 *hash_location;
+       struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
        int paws_reject = 0;
 
+       tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
-               tmp_opt.tstamp_ok = 1;
-               tcp_parse_options(skb, &tmp_opt, 1, NULL);
+               tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
 
                if (tmp_opt.saw_tstamp) {
                        tmp_opt.ts_recent       = tcptw->tw_ts_recent;
@@ -389,14 +384,43 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                const struct inet_request_sock *ireq = inet_rsk(req);
                struct tcp_request_sock *treq = tcp_rsk(req);
                struct inet_connection_sock *newicsk = inet_csk(newsk);
-               struct tcp_sock *newtp;
+               struct tcp_sock *newtp = tcp_sk(newsk);
+               struct tcp_sock *oldtp = tcp_sk(sk);
+               struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
+
+               /* TCP Cookie Transactions require space for the cookie pair,
+                * as it differs for each connection.  There is no need to
+                * copy any s_data_payload stored at the original socket.
+                * Failure will prevent resuming the connection.
+                *
+                * Presumed copied, in order of appearance:
+                *      cookie_in_always, cookie_out_never
+                */
+               if (oldcvp != NULL) {
+                       struct tcp_cookie_values *newcvp =
+                               kzalloc(sizeof(*newtp->cookie_values),
+                                       GFP_ATOMIC);
+
+                       if (newcvp != NULL) {
+                               kref_init(&newcvp->kref);
+                               newcvp->cookie_desired =
+                                               oldcvp->cookie_desired;
+                               newtp->cookie_values = newcvp;
+                       } else {
+                               /* Not Yet Implemented */
+                               newtp->cookie_values = NULL;
+                       }
+               }
 
                /* Now setup tcp_sock */
-               newtp = tcp_sk(newsk);
                newtp->pred_flags = 0;
-               newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1;
-               newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1;
-               newtp->snd_up = treq->snt_isn + 1;
+
+               newtp->rcv_wup = newtp->copied_seq =
+               newtp->rcv_nxt = treq->rcv_isn + 1;
+
+               newtp->snd_sml = newtp->snd_una =
+               newtp->snd_nxt = newtp->snd_up =
+                       treq->snt_isn + 1 + tcp_s_data_size(oldtp);
 
                tcp_prequeue_init(newtp);
 
@@ -429,8 +453,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
                skb_queue_head_init(&newtp->out_of_order_queue);
-               newtp->write_seq = treq->snt_isn + 1;
-               newtp->pushed_seq = newtp->write_seq;
+               newtp->write_seq = newtp->pushed_seq =
+                       treq->snt_isn + 1 + tcp_s_data_size(oldtp);
 
                newtp->rx_opt.saw_tstamp = 0;
 
@@ -476,7 +500,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                if (newtp->af_specific->md5_lookup(sk, newsk))
                        newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
 #endif
-               if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
+               if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
                        newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
                newtp->rx_opt.mss_clamp = req->mss;
                TCP_ECN_openreq_child(newtp, req);
@@ -495,16 +519,16 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                           struct request_sock *req,
                           struct request_sock **prev)
 {
+       struct tcp_options_received tmp_opt;
+       u8 *hash_location;
+       struct sock *child;
        const struct tcphdr *th = tcp_hdr(skb);
        __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
        int paws_reject = 0;
-       struct tcp_options_received tmp_opt;
-       struct sock *child;
-       struct dst_entry *dst = inet_csk_route_req(sk, req);
 
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(struct tcphdr)>>2)) {
-               tcp_parse_options(skb, &tmp_opt, 0, dst);
+               tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
 
                if (tmp_opt.saw_tstamp) {
                        tmp_opt.ts_recent = req->ts_recent;
@@ -517,8 +541,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                }
        }
 
-       dst_release(dst);
-
        /* Check for pure retransmitted SYN. */
        if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
            flg == TCP_FLAG_SYN &&
@@ -540,7 +562,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                 * Enforce "SYN-ACK" according to figure 8, figure 6
                 * of RFC793, fixed by RFC1122.
                 */
-               req->rsk_ops->rtx_syn_ack(sk, req);
+               req->rsk_ops->rtx_syn_ack(sk, req, NULL);
                return NULL;
        }
 
@@ -599,7 +621,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
         * Invalid ACK: reset will be sent by listening socket
         */
        if ((flg & TCP_FLAG_ACK) &&
-           (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
+           (TCP_SKB_CB(skb)->ack_seq !=
+            tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
                return sk;
 
        /* Also, it would be not so bad idea to check rcv_tsecr, which