Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
[safe/jmp/linux-2.6] / net / ipv4 / tcp_minisocks.c
index 43bbba7..f206ee5 100644 (file)
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 
-#ifdef CONFIG_SYSCTL
-#define SYNC_INIT 0 /* let the user enable it */
-#else
-#define SYNC_INIT 1
-#endif
-
-int sysctl_tcp_syncookies __read_mostly = SYNC_INIT;
+int sysctl_tcp_syncookies __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_syncookies);
 
 int sysctl_tcp_abort_on_overflow __read_mostly;
@@ -96,13 +90,14 @@ enum tcp_tw_status
 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                           const struct tcphdr *th)
 {
-       struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
        struct tcp_options_received tmp_opt;
+       u8 *hash_location;
+       struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
        int paws_reject = 0;
 
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
-               tcp_parse_options(skb, &tmp_opt, 0);
+               tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
 
                if (tmp_opt.saw_tstamp) {
                        tmp_opt.ts_recent       = tcptw->tw_ts_recent;
@@ -128,7 +123,8 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                        goto kill_with_rst;
 
                /* Dup ACK? */
-               if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
+               if (!th->ack ||
+                   !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
                    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
                        inet_twsk_put(tw);
                        return TCP_TW_SUCCESS;
@@ -321,7 +317,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                        if (key != NULL) {
                                memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
                                tcptw->tw_md5_keylen = key->keylen;
-                               if (tcp_alloc_md5sig_pool() == NULL)
+                               if (tcp_alloc_md5sig_pool(sk) == NULL)
                                        BUG();
                        }
                } while (0);
@@ -362,7 +358,7 @@ void tcp_twsk_destructor(struct sock *sk)
 #ifdef CONFIG_TCP_MD5SIG
        struct tcp_timewait_sock *twsk = tcp_twsk(sk);
        if (twsk->tw_md5_keylen)
-               tcp_put_md5sig_pool();
+               tcp_free_md5sig_pool();
 #endif
 }
 
@@ -388,14 +384,43 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                const struct inet_request_sock *ireq = inet_rsk(req);
                struct tcp_request_sock *treq = tcp_rsk(req);
                struct inet_connection_sock *newicsk = inet_csk(newsk);
-               struct tcp_sock *newtp;
+               struct tcp_sock *newtp = tcp_sk(newsk);
+               struct tcp_sock *oldtp = tcp_sk(sk);
+               struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
+
+               /* TCP Cookie Transactions require space for the cookie pair,
+                * as it differs for each connection.  There is no need to
+                * copy any s_data_payload stored at the original socket.
+                * Failure will prevent resuming the connection.
+                *
+                * Presumed copied, in order of appearance:
+                *      cookie_in_always, cookie_out_never
+                */
+               if (oldcvp != NULL) {
+                       struct tcp_cookie_values *newcvp =
+                               kzalloc(sizeof(*newtp->cookie_values),
+                                       GFP_ATOMIC);
+
+                       if (newcvp != NULL) {
+                               kref_init(&newcvp->kref);
+                               newcvp->cookie_desired =
+                                               oldcvp->cookie_desired;
+                               newtp->cookie_values = newcvp;
+                       } else {
+                               /* Not Yet Implemented */
+                               newtp->cookie_values = NULL;
+                       }
+               }
 
                /* Now setup tcp_sock */
-               newtp = tcp_sk(newsk);
                newtp->pred_flags = 0;
-               newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1;
-               newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1;
-               newtp->snd_up = treq->snt_isn + 1;
+
+               newtp->rcv_wup = newtp->copied_seq =
+               newtp->rcv_nxt = treq->rcv_isn + 1;
+
+               newtp->snd_sml = newtp->snd_una =
+               newtp->snd_nxt = newtp->snd_up =
+                       treq->snt_isn + 1 + tcp_s_data_size(oldtp);
 
                tcp_prequeue_init(newtp);
 
@@ -409,7 +434,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                newtp->retrans_out = 0;
                newtp->sacked_out = 0;
                newtp->fackets_out = 0;
-               newtp->snd_ssthresh = 0x7fffffff;
+               newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 
                /* So many TCP implementations out there (incorrectly) count the
                 * initial SYN frame in their delayed-ACK and congestion control
@@ -428,8 +453,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
                skb_queue_head_init(&newtp->out_of_order_queue);
-               newtp->write_seq = treq->snt_isn + 1;
-               newtp->pushed_seq = newtp->write_seq;
+               newtp->write_seq = newtp->pushed_seq =
+                       treq->snt_isn + 1 + tcp_s_data_size(oldtp);
 
                newtp->rx_opt.saw_tstamp = 0;
 
@@ -475,7 +500,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                if (newtp->af_specific->md5_lookup(sk, newsk))
                        newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
 #endif
-               if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
+               if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
                        newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
                newtp->rx_opt.mss_clamp = req->mss;
                TCP_ECN_openreq_child(newtp, req);
@@ -494,15 +519,16 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                           struct request_sock *req,
                           struct request_sock **prev)
 {
+       struct tcp_options_received tmp_opt;
+       u8 *hash_location;
+       struct sock *child;
        const struct tcphdr *th = tcp_hdr(skb);
        __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
        int paws_reject = 0;
-       struct tcp_options_received tmp_opt;
-       struct sock *child;
 
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(struct tcphdr)>>2)) {
-               tcp_parse_options(skb, &tmp_opt, 0);
+               tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
 
                if (tmp_opt.saw_tstamp) {
                        tmp_opt.ts_recent = req->ts_recent;
@@ -536,7 +562,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
                 * Enforce "SYN-ACK" according to figure 8, figure 6
                 * of RFC793, fixed by RFC1122.
                 */
-               req->rsk_ops->rtx_syn_ack(sk, req);
+               req->rsk_ops->rtx_syn_ack(sk, req, NULL);
                return NULL;
        }
 
@@ -595,7 +621,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
         * Invalid ACK: reset will be sent by listening socket
         */
        if ((flg & TCP_FLAG_ACK) &&
-           (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
+           (TCP_SKB_CB(skb)->ack_seq !=
+            tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
                return sk;
 
        /* Also, it would be not so bad idea to check rcv_tsecr, which
@@ -640,8 +667,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        if (!(flg & TCP_FLAG_ACK))
                return NULL;
 
-       /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
-       if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
+       /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
+       if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
            TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
                inet_rsk(req)->acked = 1;
                return NULL;
@@ -656,29 +683,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
        if (child == NULL)
                goto listen_overflow;
-#ifdef CONFIG_TCP_MD5SIG
-       else {
-               /* Copy over the MD5 key from the original socket */
-               struct tcp_md5sig_key *key;
-               struct tcp_sock *tp = tcp_sk(sk);
-               key = tp->af_specific->md5_lookup(sk, child);
-               if (key != NULL) {
-                       /*
-                        * We're using one, so create a matching key on the
-                        * newsk structure. If we fail to get memory then we
-                        * end up not copying the key across. Shucks.
-                        */
-                       char *newkey = kmemdup(key->key, key->keylen,
-                                              GFP_ATOMIC);
-                       if (newkey) {
-                               if (!tcp_alloc_md5sig_pool())
-                                       BUG();
-                               tp->af_specific->md5_add(child, child, newkey,
-                                                        key->keylen);
-                       }
-               }
-       }
-#endif
 
        inet_csk_reqsk_queue_unlink(sk, req, prev);
        inet_csk_reqsk_queue_removed(sk, req);