2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
54 #include <linux/bottom_half.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/module.h>
58 #include <linux/random.h>
59 #include <linux/cache.h>
60 #include <linux/jhash.h>
61 #include <linux/init.h>
62 #include <linux/times.h>
64 #include <net/net_namespace.h>
66 #include <net/inet_hashtables.h>
68 #include <net/transp_v6.h>
70 #include <net/inet_common.h>
71 #include <net/timewait_sock.h>
73 #include <net/netdma.h>
75 #include <linux/inet.h>
76 #include <linux/ipv6.h>
77 #include <linux/stddef.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
81 #include <linux/crypto.h>
82 #include <linux/scatterlist.h>
84 int sysctl_tcp_tw_reuse __read_mostly;
85 int sysctl_tcp_low_latency __read_mostly;
88 #ifdef CONFIG_TCP_MD5SIG
89 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, struct tcphdr *th);
95 struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
101 struct inet_hashinfo tcp_hashinfo;
103 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
105 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
108 tcp_hdr(skb)->source);
111 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
127 if (tcptw->tw_ts_recent_stamp &&
128 (twp == NULL || (sysctl_tcp_tw_reuse &&
129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
142 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
144 /* This will initiate an outgoing connection. */
145 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
147 struct inet_sock *inet = inet_sk(sk);
148 struct tcp_sock *tp = tcp_sk(sk);
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
151 __be32 daddr, nexthop;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 if (inet->opt && inet->opt->srr) {
165 nexthop = inet->opt->faddr;
168 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
169 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
171 inet->inet_sport, usin->sin_port, sk, 1);
173 if (tmp == -ENETUNREACH)
174 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
178 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 if (!inet->opt || !inet->opt->srr)
186 if (!inet->inet_saddr)
187 inet->inet_saddr = rt->rt_src;
188 inet->inet_rcv_saddr = inet->inet_saddr;
190 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
191 /* Reset inherited state */
192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0;
197 if (tcp_death_row.sysctl_tw_recycle &&
198 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
199 struct inet_peer *peer = rt_get_peer(rt);
201 * VJ's idea. We save last timestamp seen from
202 * the destination in peer table, when entering state
203 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
204 * when trying new connection.
207 (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts;
213 inet->inet_dport = usin->sin_port;
214 inet->inet_daddr = daddr;
216 inet_csk(sk)->icsk_ext_hdr_len = 0;
218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
220 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
222 /* Socket identity is still unknown (sport may be zero).
223 * However we set state to SYN-SENT and not releasing socket
224 * lock select source port, enter ourselves into the hash tables and
225 * complete initialization after this.
227 tcp_set_state(sk, TCP_SYN_SENT);
228 err = inet_hash_connect(&tcp_death_row, sk);
232 err = ip_route_newports(&rt, IPPROTO_TCP,
233 inet->inet_sport, inet->inet_dport, sk);
237 /* OK, now commit destination to socket. */
238 sk->sk_gso_type = SKB_GSO_TCPV4;
239 sk_setup_caps(sk, &rt->u.dst);
242 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 inet->inet_id = tp->write_seq ^ jiffies;
249 err = tcp_connect(sk);
258 * This unhashes the socket and releases the local port,
261 tcp_set_state(sk, TCP_CLOSE);
263 sk->sk_route_caps = 0;
264 inet->inet_dport = 0;
269 * This routine does path mtu discovery as defined in RFC1191.
271 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
276 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
277 * send out by Linux are always <576bytes so they should go through
280 if (sk->sk_state == TCP_LISTEN)
283 /* We don't check in the destentry if pmtu discovery is forbidden
284 * on this route. We just assume that no packet_to_big packets
285 * are send back when pmtu discovery is not active.
286 * There is a small race when the user changes this flag in the
287 * route, but I think that's acceptable.
289 if ((dst = __sk_dst_check(sk, 0)) == NULL)
292 dst->ops->update_pmtu(dst, mtu);
294 /* Something is about to be wrong... Remember soft error
295 * for the case, if this connection will not able to recover.
297 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
298 sk->sk_err_soft = EMSGSIZE;
302 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
303 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
304 tcp_sync_mss(sk, mtu);
306 /* Resend the TCP packet because it's
307 * clear that the old packet has been
308 * dropped. This is the new "fast" path mtu
311 tcp_simple_retransmit(sk);
312 } /* else let the usual retransmit timer handle it */
316 * This routine is called by the ICMP module when it gets some
317 * sort of error condition. If err < 0 then the socket should
318 * be closed and the error returned to the user. If err > 0
319 * it's just the icmp type << 8 | icmp code. After adjustment
320 * header points to the first 8 bytes of the tcp header. We need
321 * to find the appropriate port.
323 * The locking strategy used here is very "optimistic". When
324 * someone else accesses the socket the ICMP is just dropped
325 * and for some paths there is no check at all.
326 * A more general error queue to queue errors for later handling
327 * is probably better.
331 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
333 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
334 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
335 struct inet_connection_sock *icsk;
337 struct inet_sock *inet;
338 const int type = icmp_hdr(icmp_skb)->type;
339 const int code = icmp_hdr(icmp_skb)->code;
345 struct net *net = dev_net(icmp_skb->dev);
347 if (icmp_skb->len < (iph->ihl << 2) + 8) {
348 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
352 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
353 iph->saddr, th->source, inet_iif(icmp_skb));
355 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
358 if (sk->sk_state == TCP_TIME_WAIT) {
359 inet_twsk_put(inet_twsk(sk));
364 /* If too many ICMPs get dropped on busy
365 * servers this needs to be solved differently.
367 if (sock_owned_by_user(sk))
368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
370 if (sk->sk_state == TCP_CLOSE)
375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) {
378 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
383 case ICMP_SOURCE_QUENCH:
384 /* Just silently ignore these. */
386 case ICMP_PARAMETERPROB:
389 case ICMP_DEST_UNREACH:
390 if (code > NR_ICMP_UNREACH)
393 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
394 if (!sock_owned_by_user(sk))
395 do_pmtu_discovery(sk, iph, info);
399 err = icmp_err_convert[code].errno;
400 /* check if icmp_skb allows revert of backoff
401 * (see draft-zimmermann-tcp-lcd) */
402 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
404 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
408 icsk->icsk_backoff--;
409 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
413 skb = tcp_write_queue_head(sk);
416 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
417 tcp_time_stamp - TCP_SKB_CB(skb)->when);
420 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
421 remaining, TCP_RTO_MAX);
422 } else if (sock_owned_by_user(sk)) {
423 /* RTO revert clocked out retransmission,
424 * but socket is locked. Will defer. */
425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
428 /* RTO revert clocked out retransmission.
429 * Will retransmit now */
430 tcp_retransmit_timer(sk);
434 case ICMP_TIME_EXCEEDED:
441 switch (sk->sk_state) {
442 struct request_sock *req, **prev;
444 if (sock_owned_by_user(sk))
447 req = inet_csk_search_req(sk, &prev, th->dest,
448 iph->daddr, iph->saddr);
452 /* ICMPs are not backlogged, hence we cannot get
453 an established socket here.
457 if (seq != tcp_rsk(req)->snt_isn) {
458 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
463 * Still in SYN_RECV, just remove it silently.
464 * There is no good way to pass the error to the newly
465 * created socket, and POSIX does not want network
466 * errors returned from accept().
468 inet_csk_reqsk_queue_drop(sk, req, prev);
472 case TCP_SYN_RECV: /* Cannot happen.
473 It can f.e. if SYNs crossed.
475 if (!sock_owned_by_user(sk)) {
478 sk->sk_error_report(sk);
482 sk->sk_err_soft = err;
487 /* If we've already connected we will keep trying
488 * until we time out, or the user gives up.
490 * rfc1122 4.2.3.9 allows to consider as hard errors
491 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
492 * but it is obsoleted by pmtu discovery).
494 * Note, that in modern internet, where routing is unreliable
495 * and in each dark corner broken firewalls sit, sending random
496 * errors ordered by their masters even this two messages finally lose
497 * their original sense (even Linux sends invalid PORT_UNREACHs)
499 * Now we are in compliance with RFCs.
504 if (!sock_owned_by_user(sk) && inet->recverr) {
506 sk->sk_error_report(sk);
507 } else { /* Only an error on timeout */
508 sk->sk_err_soft = err;
516 /* This routine computes an IPv4 TCP checksum. */
517 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
519 struct inet_sock *inet = inet_sk(sk);
520 struct tcphdr *th = tcp_hdr(skb);
522 if (skb->ip_summed == CHECKSUM_PARTIAL) {
523 th->check = ~tcp_v4_check(len, inet->inet_saddr,
524 inet->inet_daddr, 0);
525 skb->csum_start = skb_transport_header(skb) - skb->head;
526 skb->csum_offset = offsetof(struct tcphdr, check);
528 th->check = tcp_v4_check(len, inet->inet_saddr,
536 int tcp_v4_gso_send_check(struct sk_buff *skb)
538 const struct iphdr *iph;
541 if (!pskb_may_pull(skb, sizeof(*th)))
548 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
549 skb->csum_start = skb_transport_header(skb) - skb->head;
550 skb->csum_offset = offsetof(struct tcphdr, check);
551 skb->ip_summed = CHECKSUM_PARTIAL;
556 * This routine will send an RST to the other tcp.
558 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
560 * Answer: if a packet caused RST, it is not for a socket
561 * existing in our system, if it is matched to a socket,
562 * it is just duplicate segment or bug in other side's TCP.
563 * So that we build reply only basing on parameters
564 * arrived with segment.
565 * Exception: precedence violation. We do not implement it in any case.
568 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
570 struct tcphdr *th = tcp_hdr(skb);
573 #ifdef CONFIG_TCP_MD5SIG
574 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
577 struct ip_reply_arg arg;
578 #ifdef CONFIG_TCP_MD5SIG
579 struct tcp_md5sig_key *key;
583 /* Never send a reset in response to a reset. */
587 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
590 /* Swap the send and the receive. */
591 memset(&rep, 0, sizeof(rep));
592 rep.th.dest = th->source;
593 rep.th.source = th->dest;
594 rep.th.doff = sizeof(struct tcphdr) / 4;
598 rep.th.seq = th->ack_seq;
601 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
602 skb->len - (th->doff << 2));
605 memset(&arg, 0, sizeof(arg));
606 arg.iov[0].iov_base = (unsigned char *)&rep;
607 arg.iov[0].iov_len = sizeof(rep.th);
609 #ifdef CONFIG_TCP_MD5SIG
610 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
612 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
614 (TCPOPT_MD5SIG << 8) |
616 /* Update length and the length the header thinks exists */
617 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
618 rep.th.doff = arg.iov[0].iov_len / 4;
620 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
621 key, ip_hdr(skb)->saddr,
622 ip_hdr(skb)->daddr, &rep.th);
625 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
626 ip_hdr(skb)->saddr, /* XXX */
627 arg.iov[0].iov_len, IPPROTO_TCP, 0);
628 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
629 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
631 net = dev_net(skb_dst(skb)->dev);
632 ip_send_reply(net->ipv4.tcp_sock, skb,
633 &arg, arg.iov[0].iov_len);
635 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
636 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
639 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
640 outside socket context is ugly, certainly. What can I do?
643 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
644 u32 win, u32 ts, int oif,
645 struct tcp_md5sig_key *key,
648 struct tcphdr *th = tcp_hdr(skb);
651 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
652 #ifdef CONFIG_TCP_MD5SIG
653 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
657 struct ip_reply_arg arg;
658 struct net *net = dev_net(skb_dst(skb)->dev);
660 memset(&rep.th, 0, sizeof(struct tcphdr));
661 memset(&arg, 0, sizeof(arg));
663 arg.iov[0].iov_base = (unsigned char *)&rep;
664 arg.iov[0].iov_len = sizeof(rep.th);
666 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
667 (TCPOPT_TIMESTAMP << 8) |
669 rep.opt[1] = htonl(tcp_time_stamp);
670 rep.opt[2] = htonl(ts);
671 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
674 /* Swap the send and the receive. */
675 rep.th.dest = th->source;
676 rep.th.source = th->dest;
677 rep.th.doff = arg.iov[0].iov_len / 4;
678 rep.th.seq = htonl(seq);
679 rep.th.ack_seq = htonl(ack);
681 rep.th.window = htons(win);
683 #ifdef CONFIG_TCP_MD5SIG
685 int offset = (ts) ? 3 : 0;
687 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
689 (TCPOPT_MD5SIG << 8) |
691 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
692 rep.th.doff = arg.iov[0].iov_len/4;
694 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
695 key, ip_hdr(skb)->saddr,
696 ip_hdr(skb)->daddr, &rep.th);
699 arg.flags = reply_flags;
700 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
701 ip_hdr(skb)->saddr, /* XXX */
702 arg.iov[0].iov_len, IPPROTO_TCP, 0);
703 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
705 arg.bound_dev_if = oif;
707 ip_send_reply(net->ipv4.tcp_sock, skb,
708 &arg, arg.iov[0].iov_len);
710 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
713 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
715 struct inet_timewait_sock *tw = inet_twsk(sk);
716 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
718 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
719 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
722 tcp_twsk_md5_key(tcptw),
723 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
729 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
730 struct request_sock *req)
732 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
733 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
736 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
737 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
741 * Send a SYN-ACK after having received a SYN.
742 * This still operates on a request_sock only, not on a big
745 static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
746 struct request_sock *req,
747 struct request_values *rvp)
749 const struct inet_request_sock *ireq = inet_rsk(req);
751 struct sk_buff * skb;
753 /* First, grab a route. */
754 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
757 skb = tcp_make_synack(sk, dst, req, rvp);
760 struct tcphdr *th = tcp_hdr(skb);
762 th->check = tcp_v4_check(skb->len,
765 csum_partial(th, skb->len,
768 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
771 err = net_xmit_eval(err);
778 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
779 struct request_values *rvp)
781 return __tcp_v4_send_synack(sk, NULL, req, rvp);
785 * IPv4 request_sock destructor.
787 static void tcp_v4_reqsk_destructor(struct request_sock *req)
789 kfree(inet_rsk(req)->opt);
792 #ifdef CONFIG_SYN_COOKIES
793 static void syn_flood_warning(struct sk_buff *skb)
795 static unsigned long warntime;
797 if (time_after(jiffies, (warntime + HZ * 60))) {
800 "possible SYN flooding on port %d. Sending cookies.\n",
801 ntohs(tcp_hdr(skb)->dest));
807 * Save and compile IPv4 options into the request_sock if needed.
809 static struct ip_options *tcp_v4_save_options(struct sock *sk,
812 struct ip_options *opt = &(IPCB(skb)->opt);
813 struct ip_options *dopt = NULL;
815 if (opt && opt->optlen) {
816 int opt_size = optlength(opt);
817 dopt = kmalloc(opt_size, GFP_ATOMIC);
819 if (ip_options_echo(dopt, skb)) {
828 #ifdef CONFIG_TCP_MD5SIG
830 * RFC2385 MD5 checksumming requires a mapping of
831 * IP address->MD5 Key.
832 * We need to maintain these in the sk structure.
835 /* Find the Key structure for an address. */
836 static struct tcp_md5sig_key *
837 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
839 struct tcp_sock *tp = tcp_sk(sk);
842 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
844 for (i = 0; i < tp->md5sig_info->entries4; i++) {
845 if (tp->md5sig_info->keys4[i].addr == addr)
846 return &tp->md5sig_info->keys4[i].base;
851 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
852 struct sock *addr_sk)
854 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
857 EXPORT_SYMBOL(tcp_v4_md5_lookup);
859 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
860 struct request_sock *req)
862 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
865 /* This can be called on a newly created socket, from other files */
866 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
867 u8 *newkey, u8 newkeylen)
869 /* Add Key to the list */
870 struct tcp_md5sig_key *key;
871 struct tcp_sock *tp = tcp_sk(sk);
872 struct tcp4_md5sig_key *keys;
874 key = tcp_v4_md5_do_lookup(sk, addr);
876 /* Pre-existing entry - just update that one. */
879 key->keylen = newkeylen;
881 struct tcp_md5sig_info *md5sig;
883 if (!tp->md5sig_info) {
884 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
886 if (!tp->md5sig_info) {
890 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
892 if (tcp_alloc_md5sig_pool(sk) == NULL) {
896 md5sig = tp->md5sig_info;
898 if (md5sig->alloced4 == md5sig->entries4) {
899 keys = kmalloc((sizeof(*keys) *
900 (md5sig->entries4 + 1)), GFP_ATOMIC);
903 tcp_free_md5sig_pool();
907 if (md5sig->entries4)
908 memcpy(keys, md5sig->keys4,
909 sizeof(*keys) * md5sig->entries4);
911 /* Free old key list, and reference new one */
912 kfree(md5sig->keys4);
913 md5sig->keys4 = keys;
917 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
918 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
919 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
924 EXPORT_SYMBOL(tcp_v4_md5_do_add);
926 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
927 u8 *newkey, u8 newkeylen)
929 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
933 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
935 struct tcp_sock *tp = tcp_sk(sk);
938 for (i = 0; i < tp->md5sig_info->entries4; i++) {
939 if (tp->md5sig_info->keys4[i].addr == addr) {
941 kfree(tp->md5sig_info->keys4[i].base.key);
942 tp->md5sig_info->entries4--;
944 if (tp->md5sig_info->entries4 == 0) {
945 kfree(tp->md5sig_info->keys4);
946 tp->md5sig_info->keys4 = NULL;
947 tp->md5sig_info->alloced4 = 0;
948 } else if (tp->md5sig_info->entries4 != i) {
949 /* Need to do some manipulation */
950 memmove(&tp->md5sig_info->keys4[i],
951 &tp->md5sig_info->keys4[i+1],
952 (tp->md5sig_info->entries4 - i) *
953 sizeof(struct tcp4_md5sig_key));
955 tcp_free_md5sig_pool();
962 EXPORT_SYMBOL(tcp_v4_md5_do_del);
964 static void tcp_v4_clear_md5_list(struct sock *sk)
966 struct tcp_sock *tp = tcp_sk(sk);
968 /* Free each key, then the set of key keys,
969 * the crypto element, and then decrement our
970 * hold on the last resort crypto.
972 if (tp->md5sig_info->entries4) {
974 for (i = 0; i < tp->md5sig_info->entries4; i++)
975 kfree(tp->md5sig_info->keys4[i].base.key);
976 tp->md5sig_info->entries4 = 0;
977 tcp_free_md5sig_pool();
979 if (tp->md5sig_info->keys4) {
980 kfree(tp->md5sig_info->keys4);
981 tp->md5sig_info->keys4 = NULL;
982 tp->md5sig_info->alloced4 = 0;
986 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
989 struct tcp_md5sig cmd;
990 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
993 if (optlen < sizeof(cmd))
996 if (copy_from_user(&cmd, optval, sizeof(cmd)))
999 if (sin->sin_family != AF_INET)
1002 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1003 if (!tcp_sk(sk)->md5sig_info)
1005 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1008 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1011 if (!tcp_sk(sk)->md5sig_info) {
1012 struct tcp_sock *tp = tcp_sk(sk);
1013 struct tcp_md5sig_info *p;
1015 p = kzalloc(sizeof(*p), sk->sk_allocation);
1019 tp->md5sig_info = p;
1020 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1023 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1026 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1027 newkey, cmd.tcpm_keylen);
1030 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1031 __be32 daddr, __be32 saddr, int nbytes)
1033 struct tcp4_pseudohdr *bp;
1034 struct scatterlist sg;
1036 bp = &hp->md5_blk.ip4;
1039 * 1. the TCP pseudo-header (in the order: source IP address,
1040 * destination IP address, zero-padded protocol number, and
1046 bp->protocol = IPPROTO_TCP;
1047 bp->len = cpu_to_be16(nbytes);
1049 sg_init_one(&sg, bp, sizeof(*bp));
1050 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1053 static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1054 __be32 daddr, __be32 saddr, struct tcphdr *th)
1056 struct tcp_md5sig_pool *hp;
1057 struct hash_desc *desc;
1059 hp = tcp_get_md5sig_pool();
1061 goto clear_hash_noput;
1062 desc = &hp->md5_desc;
1064 if (crypto_hash_init(desc))
1066 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1068 if (tcp_md5_hash_header(hp, th))
1070 if (tcp_md5_hash_key(hp, key))
1072 if (crypto_hash_final(desc, md5_hash))
1075 tcp_put_md5sig_pool();
1079 tcp_put_md5sig_pool();
1081 memset(md5_hash, 0, 16);
1085 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1086 struct sock *sk, struct request_sock *req,
1087 struct sk_buff *skb)
1089 struct tcp_md5sig_pool *hp;
1090 struct hash_desc *desc;
1091 struct tcphdr *th = tcp_hdr(skb);
1092 __be32 saddr, daddr;
1095 saddr = inet_sk(sk)->inet_saddr;
1096 daddr = inet_sk(sk)->inet_daddr;
1098 saddr = inet_rsk(req)->loc_addr;
1099 daddr = inet_rsk(req)->rmt_addr;
1101 const struct iphdr *iph = ip_hdr(skb);
1106 hp = tcp_get_md5sig_pool();
1108 goto clear_hash_noput;
1109 desc = &hp->md5_desc;
1111 if (crypto_hash_init(desc))
1114 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1116 if (tcp_md5_hash_header(hp, th))
1118 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1120 if (tcp_md5_hash_key(hp, key))
1122 if (crypto_hash_final(desc, md5_hash))
1125 tcp_put_md5sig_pool();
1129 tcp_put_md5sig_pool();
1131 memset(md5_hash, 0, 16);
1135 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1137 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1140 * This gets called for each TCP segment that arrives
1141 * so we want to be efficient.
1142 * We have 3 drop cases:
1143 * o No MD5 hash and one expected.
1144 * o MD5 hash and we're not expecting one.
1145 * o MD5 hash and its wrong.
1147 __u8 *hash_location = NULL;
1148 struct tcp_md5sig_key *hash_expected;
1149 const struct iphdr *iph = ip_hdr(skb);
1150 struct tcphdr *th = tcp_hdr(skb);
1152 unsigned char newhash[16];
1154 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1155 hash_location = tcp_parse_md5sig_option(th);
1157 /* We've parsed the options - do we have a hash? */
1158 if (!hash_expected && !hash_location)
1161 if (hash_expected && !hash_location) {
1162 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1166 if (!hash_expected && hash_location) {
1167 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1171 /* Okay, so this is hash_expected and hash_location -
1172 * so we need to calculate the checksum.
1174 genhash = tcp_v4_md5_hash_skb(newhash,
1178 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1179 if (net_ratelimit()) {
1180 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1181 &iph->saddr, ntohs(th->source),
1182 &iph->daddr, ntohs(th->dest),
1183 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1192 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1194 .obj_size = sizeof(struct tcp_request_sock),
1195 .rtx_syn_ack = tcp_v4_send_synack,
1196 .send_ack = tcp_v4_reqsk_send_ack,
1197 .destructor = tcp_v4_reqsk_destructor,
1198 .send_reset = tcp_v4_send_reset,
1201 #ifdef CONFIG_TCP_MD5SIG
1202 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1203 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1204 .calc_md5_hash = tcp_v4_md5_hash_skb,
1208 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1209 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1210 .twsk_unique = tcp_twsk_unique,
1211 .twsk_destructor= tcp_twsk_destructor,
1214 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1216 struct tcp_options_received tmp_opt;
1217 struct request_sock *req;
1218 struct inet_request_sock *ireq;
1219 struct dst_entry *dst = NULL;
1220 __be32 saddr = ip_hdr(skb)->saddr;
1221 __be32 daddr = ip_hdr(skb)->daddr;
1222 __u32 isn = TCP_SKB_CB(skb)->when;
1223 #ifdef CONFIG_SYN_COOKIES
1224 int want_cookie = 0;
1226 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1229 /* Never answer to SYNs send to broadcast or multicast */
1230 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1233 /* TW buckets are converted to open requests without
1234 * limitations, they conserve resources and peer is
1235 * evidently real one.
1237 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1238 #ifdef CONFIG_SYN_COOKIES
1239 if (sysctl_tcp_syncookies) {
1246 /* Accept backlog is full. If we have already queued enough
1247 * of warm entries in syn queue, drop request. It is better than
1248 * clogging syn queue with openreqs with exponentially increasing
1251 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1254 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1258 #ifdef CONFIG_TCP_MD5SIG
1259 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1262 ireq = inet_rsk(req);
1263 ireq->loc_addr = daddr;
1264 ireq->rmt_addr = saddr;
1265 ireq->no_srccheck = inet_sk(sk)->transparent;
1266 ireq->opt = tcp_v4_save_options(sk, skb);
1268 dst = inet_csk_route_req(sk, req);
1272 tcp_clear_options(&tmp_opt);
1273 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1274 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1276 tcp_parse_options(skb, &tmp_opt, 0, dst);
1278 if (want_cookie && !tmp_opt.saw_tstamp)
1279 tcp_clear_options(&tmp_opt);
1281 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1283 tcp_openreq_init(req, &tmp_opt, skb);
1285 if (security_inet_conn_request(sk, skb, req))
1286 goto drop_and_release;
1289 TCP_ECN_create_request(req, tcp_hdr(skb));
1292 #ifdef CONFIG_SYN_COOKIES
1293 syn_flood_warning(skb);
1294 req->cookie_ts = tmp_opt.tstamp_ok;
1296 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1298 struct inet_peer *peer = NULL;
1300 /* VJ's idea. We save last timestamp seen
1301 * from the destination in peer table, when entering
1302 * state TIME-WAIT, and check against it before
1303 * accepting new connection request.
1305 * If "isn" is not zero, this request hit alive
1306 * timewait bucket, so that all the necessary checks
1307 * are made in the function processing timewait state.
1309 if (tmp_opt.saw_tstamp &&
1310 tcp_death_row.sysctl_tw_recycle &&
1311 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1312 peer->v4daddr == saddr) {
1313 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1314 (s32)(peer->tcp_ts - req->ts_recent) >
1316 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1317 goto drop_and_release;
1320 /* Kill the following clause, if you dislike this way. */
1321 else if (!sysctl_tcp_syncookies &&
1322 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1323 (sysctl_max_syn_backlog >> 2)) &&
1324 (!peer || !peer->tcp_ts_stamp) &&
1325 (!dst || !dst_metric(dst, RTAX_RTT))) {
1326 /* Without syncookies last quarter of
1327 * backlog is filled with destinations,
1328 * proven to be alive.
1329 * It means that we continue to communicate
1330 * to destinations, already remembered
1331 * to the moment of synflood.
1333 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1334 &saddr, ntohs(tcp_hdr(skb)->source));
1335 goto drop_and_release;
1338 isn = tcp_v4_init_sequence(skb);
1340 tcp_rsk(req)->snt_isn = isn;
1342 if (__tcp_v4_send_synack(sk, dst, req, NULL) || want_cookie)
1345 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1358 * The three way handshake has completed - we got a valid synack -
1359 * now create the new socket.
1361 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1362 struct request_sock *req,
1363 struct dst_entry *dst)
1365 struct inet_request_sock *ireq;
1366 struct inet_sock *newinet;
1367 struct tcp_sock *newtp;
1369 #ifdef CONFIG_TCP_MD5SIG
1370 struct tcp_md5sig_key *key;
1373 if (sk_acceptq_is_full(sk))
1376 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1379 newsk = tcp_create_openreq_child(sk, req, skb);
1383 newsk->sk_gso_type = SKB_GSO_TCPV4;
1384 sk_setup_caps(newsk, dst);
1386 newtp = tcp_sk(newsk);
1387 newinet = inet_sk(newsk);
1388 ireq = inet_rsk(req);
1389 newinet->inet_daddr = ireq->rmt_addr;
1390 newinet->inet_rcv_saddr = ireq->loc_addr;
1391 newinet->inet_saddr = ireq->loc_addr;
1392 newinet->opt = ireq->opt;
1394 newinet->mc_index = inet_iif(skb);
1395 newinet->mc_ttl = ip_hdr(skb)->ttl;
1396 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1398 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1399 newinet->inet_id = newtp->write_seq ^ jiffies;
1401 tcp_mtup_init(newsk);
1402 tcp_sync_mss(newsk, dst_mtu(dst));
1403 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1404 if (tcp_sk(sk)->rx_opt.user_mss &&
1405 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1406 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1408 tcp_initialize_rcv_mss(newsk);
1410 #ifdef CONFIG_TCP_MD5SIG
1411 /* Copy over the MD5 key from the original socket */
1412 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1415 * We're using one, so create a matching key
1416 * on the newsk structure. If we fail to get
1417 * memory, then we end up not copying the key
1420 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1422 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
1423 newkey, key->keylen);
1424 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1428 __inet_hash_nolisten(newsk);
1429 __inet_inherit_port(sk, newsk);
1434 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1436 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1441 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1443 struct tcphdr *th = tcp_hdr(skb);
1444 const struct iphdr *iph = ip_hdr(skb);
1446 struct request_sock **prev;
1447 /* Find possible connection requests. */
1448 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1449 iph->saddr, iph->daddr);
1451 return tcp_check_req(sk, skb, req, prev);
1453 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1454 th->source, iph->daddr, th->dest, inet_iif(skb));
1457 if (nsk->sk_state != TCP_TIME_WAIT) {
1461 inet_twsk_put(inet_twsk(nsk));
1465 #ifdef CONFIG_SYN_COOKIES
1466 if (!th->rst && !th->syn && th->ack)
1467 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1472 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1474 const struct iphdr *iph = ip_hdr(skb);
1476 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1477 if (!tcp_v4_check(skb->len, iph->saddr,
1478 iph->daddr, skb->csum)) {
1479 skb->ip_summed = CHECKSUM_UNNECESSARY;
1484 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1485 skb->len, IPPROTO_TCP, 0);
1487 if (skb->len <= 76) {
1488 return __skb_checksum_complete(skb);
1494 /* The socket must have it's spinlock held when we get
1497 * We have a potential double-lock case here, so even when
1498 * doing backlog processing we use the BH locking scheme.
1499 * This is because we cannot sleep with the original spinlock
1502 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1505 #ifdef CONFIG_TCP_MD5SIG
1507 * We really want to reject the packet as early as possible
1509 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1510 * o There is an MD5 option and we're not expecting one
1512 if (tcp_v4_inbound_md5_hash(sk, skb))
1516 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1517 TCP_CHECK_TIMER(sk);
1518 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1522 TCP_CHECK_TIMER(sk);
1526 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1529 if (sk->sk_state == TCP_LISTEN) {
1530 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1535 if (tcp_child_process(sk, nsk, skb)) {
1543 TCP_CHECK_TIMER(sk);
1544 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1548 TCP_CHECK_TIMER(sk);
1552 tcp_v4_send_reset(rsk, skb);
1555 /* Be careful here. If this function gets more complicated and
1556 * gcc suffers from register pressure on the x86, sk (in %ebx)
1557 * might be destroyed here. This current version compiles correctly,
1558 * but you have been warned.
1563 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1571 int tcp_v4_rcv(struct sk_buff *skb)
1573 const struct iphdr *iph;
1577 struct net *net = dev_net(skb->dev);
1579 if (skb->pkt_type != PACKET_HOST)
1582 /* Count it even if it's bad */
1583 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1585 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1590 if (th->doff < sizeof(struct tcphdr) / 4)
1592 if (!pskb_may_pull(skb, th->doff * 4))
1595 /* An explanation is required here, I think.
1596 * Packet length and doff are validated by header prediction,
1597 * provided case of th->doff==0 is eliminated.
1598 * So, we defer the checks. */
1599 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1604 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1605 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1606 skb->len - th->doff * 4);
1607 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1608 TCP_SKB_CB(skb)->when = 0;
1609 TCP_SKB_CB(skb)->flags = iph->tos;
1610 TCP_SKB_CB(skb)->sacked = 0;
1612 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1617 if (sk->sk_state == TCP_TIME_WAIT)
1620 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1621 goto discard_and_relse;
1624 if (sk_filter(sk, skb))
1625 goto discard_and_relse;
1629 bh_lock_sock_nested(sk);
1631 if (!sock_owned_by_user(sk)) {
1632 #ifdef CONFIG_NET_DMA
1633 struct tcp_sock *tp = tcp_sk(sk);
1634 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1635 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1636 if (tp->ucopy.dma_chan)
1637 ret = tcp_v4_do_rcv(sk, skb);
1641 if (!tcp_prequeue(sk, skb))
1642 ret = tcp_v4_do_rcv(sk, skb);
1645 sk_add_backlog(sk, skb);
1653 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1656 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1658 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1660 tcp_v4_send_reset(NULL, skb);
1664 /* Discard frame. */
1673 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1674 inet_twsk_put(inet_twsk(sk));
1678 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1679 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1680 inet_twsk_put(inet_twsk(sk));
1683 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1685 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1687 iph->daddr, th->dest,
1690 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1691 inet_twsk_put(inet_twsk(sk));
1695 /* Fall through to ACK */
1698 tcp_v4_timewait_ack(sk, skb);
1702 case TCP_TW_SUCCESS:;
1707 /* VJ's idea. Save last timestamp seen from this destination
1708 * and hold it at least for normal timewait interval to use for duplicate
1709 * segment detection in subsequent connections, before they enter synchronized
1713 int tcp_v4_remember_stamp(struct sock *sk)
1715 struct inet_sock *inet = inet_sk(sk);
1716 struct tcp_sock *tp = tcp_sk(sk);
1717 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1718 struct inet_peer *peer = NULL;
1721 if (!rt || rt->rt_dst != inet->inet_daddr) {
1722 peer = inet_getpeer(inet->inet_daddr, 1);
1726 rt_bind_peer(rt, 1);
1731 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1732 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1733 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1734 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1735 peer->tcp_ts = tp->rx_opt.ts_recent;
1745 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1747 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1750 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1752 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1753 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1754 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1755 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
1756 peer->tcp_ts = tcptw->tw_ts_recent;
1765 const struct inet_connection_sock_af_ops ipv4_specific = {
1766 .queue_xmit = ip_queue_xmit,
1767 .send_check = tcp_v4_send_check,
1768 .rebuild_header = inet_sk_rebuild_header,
1769 .conn_request = tcp_v4_conn_request,
1770 .syn_recv_sock = tcp_v4_syn_recv_sock,
1771 .remember_stamp = tcp_v4_remember_stamp,
1772 .net_header_len = sizeof(struct iphdr),
1773 .setsockopt = ip_setsockopt,
1774 .getsockopt = ip_getsockopt,
1775 .addr2sockaddr = inet_csk_addr2sockaddr,
1776 .sockaddr_len = sizeof(struct sockaddr_in),
1777 .bind_conflict = inet_csk_bind_conflict,
1778 #ifdef CONFIG_COMPAT
1779 .compat_setsockopt = compat_ip_setsockopt,
1780 .compat_getsockopt = compat_ip_getsockopt,
1784 #ifdef CONFIG_TCP_MD5SIG
1785 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1786 .md5_lookup = tcp_v4_md5_lookup,
1787 .calc_md5_hash = tcp_v4_md5_hash_skb,
1788 .md5_add = tcp_v4_md5_add_func,
1789 .md5_parse = tcp_v4_parse_md5_keys,
1793 /* NOTE: A lot of things set to zero explicitly by call to
1794 * sk_alloc() so need not be done here.
1796 static int tcp_v4_init_sock(struct sock *sk)
1798 struct inet_connection_sock *icsk = inet_csk(sk);
1799 struct tcp_sock *tp = tcp_sk(sk);
1801 skb_queue_head_init(&tp->out_of_order_queue);
1802 tcp_init_xmit_timers(sk);
1803 tcp_prequeue_init(tp);
1805 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1806 tp->mdev = TCP_TIMEOUT_INIT;
1808 /* So many TCP implementations out there (incorrectly) count the
1809 * initial SYN frame in their delayed-ACK and congestion control
1810 * algorithms that we must have the following bandaid to talk
1811 * efficiently to them. -DaveM
1815 /* See draft-stevens-tcpca-spec-01 for discussion of the
1816 * initialization of these values.
1818 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1819 tp->snd_cwnd_clamp = ~0;
1820 tp->mss_cache = TCP_MSS_DEFAULT;
1822 tp->reordering = sysctl_tcp_reordering;
1823 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1825 sk->sk_state = TCP_CLOSE;
1827 sk->sk_write_space = sk_stream_write_space;
1828 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1830 icsk->icsk_af_ops = &ipv4_specific;
1831 icsk->icsk_sync_mss = tcp_sync_mss;
1832 #ifdef CONFIG_TCP_MD5SIG
1833 tp->af_specific = &tcp_sock_ipv4_specific;
1836 /* TCP Cookie Transactions */
1837 if (sysctl_tcp_cookie_size > 0) {
1838 /* Default, cookies without s_data_payload. */
1840 kzalloc(sizeof(*tp->cookie_values),
1842 if (tp->cookie_values != NULL)
1843 kref_init(&tp->cookie_values->kref);
1845 /* Presumed zeroed, in order of appearance:
1846 * cookie_in_always, cookie_out_never,
1847 * s_data_constant, s_data_in, s_data_out
1849 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1850 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1853 percpu_counter_inc(&tcp_sockets_allocated);
1859 void tcp_v4_destroy_sock(struct sock *sk)
1861 struct tcp_sock *tp = tcp_sk(sk);
1863 tcp_clear_xmit_timers(sk);
1865 tcp_cleanup_congestion_control(sk);
1867 /* Cleanup up the write buffer. */
1868 tcp_write_queue_purge(sk);
1870 /* Cleans up our, hopefully empty, out_of_order_queue. */
1871 __skb_queue_purge(&tp->out_of_order_queue);
1873 #ifdef CONFIG_TCP_MD5SIG
1874 /* Clean up the MD5 key list, if any */
1875 if (tp->md5sig_info) {
1876 tcp_v4_clear_md5_list(sk);
1877 kfree(tp->md5sig_info);
1878 tp->md5sig_info = NULL;
1882 #ifdef CONFIG_NET_DMA
1883 /* Cleans up our sk_async_wait_queue */
1884 __skb_queue_purge(&sk->sk_async_wait_queue);
1887 /* Clean prequeue, it must be empty really */
1888 __skb_queue_purge(&tp->ucopy.prequeue);
1890 /* Clean up a referenced TCP bind bucket. */
1891 if (inet_csk(sk)->icsk_bind_hash)
1895 * If sendmsg cached page exists, toss it.
1897 if (sk->sk_sndmsg_page) {
1898 __free_page(sk->sk_sndmsg_page);
1899 sk->sk_sndmsg_page = NULL;
1902 /* TCP Cookie Transactions */
1903 if (tp->cookie_values != NULL) {
1904 kref_put(&tp->cookie_values->kref,
1905 tcp_cookie_values_release);
1906 tp->cookie_values = NULL;
1909 percpu_counter_dec(&tcp_sockets_allocated);
1912 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1914 #ifdef CONFIG_PROC_FS
1915 /* Proc filesystem TCP sock list dumping. */
1917 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1919 return hlist_nulls_empty(head) ? NULL :
1920 list_entry(head->first, struct inet_timewait_sock, tw_node);
1923 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1925 return !is_a_nulls(tw->tw_node.next) ?
1926 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1929 static void *listening_get_next(struct seq_file *seq, void *cur)
1931 struct inet_connection_sock *icsk;
1932 struct hlist_nulls_node *node;
1933 struct sock *sk = cur;
1934 struct inet_listen_hashbucket *ilb;
1935 struct tcp_iter_state *st = seq->private;
1936 struct net *net = seq_file_net(seq);
1940 ilb = &tcp_hashinfo.listening_hash[0];
1941 spin_lock_bh(&ilb->lock);
1942 sk = sk_nulls_head(&ilb->head);
1945 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1948 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1949 struct request_sock *req = cur;
1951 icsk = inet_csk(st->syn_wait_sk);
1955 if (req->rsk_ops->family == st->family) {
1961 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1964 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1966 sk = sk_next(st->syn_wait_sk);
1967 st->state = TCP_SEQ_STATE_LISTENING;
1968 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1970 icsk = inet_csk(sk);
1971 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1972 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1974 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1978 sk_nulls_for_each_from(sk, node) {
1979 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1983 icsk = inet_csk(sk);
1984 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1985 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1987 st->uid = sock_i_uid(sk);
1988 st->syn_wait_sk = sk;
1989 st->state = TCP_SEQ_STATE_OPENREQ;
1993 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1995 spin_unlock_bh(&ilb->lock);
1996 if (++st->bucket < INET_LHTABLE_SIZE) {
1997 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1998 spin_lock_bh(&ilb->lock);
1999 sk = sk_nulls_head(&ilb->head);
2007 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2009 void *rc = listening_get_next(seq, NULL);
2011 while (rc && *pos) {
2012 rc = listening_get_next(seq, rc);
2018 static inline int empty_bucket(struct tcp_iter_state *st)
2020 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2021 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2024 static void *established_get_first(struct seq_file *seq)
2026 struct tcp_iter_state *st = seq->private;
2027 struct net *net = seq_file_net(seq);
2030 for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2032 struct hlist_nulls_node *node;
2033 struct inet_timewait_sock *tw;
2034 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2036 /* Lockless fast path for the common case of empty buckets */
2037 if (empty_bucket(st))
2041 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2042 if (sk->sk_family != st->family ||
2043 !net_eq(sock_net(sk), net)) {
2049 st->state = TCP_SEQ_STATE_TIME_WAIT;
2050 inet_twsk_for_each(tw, node,
2051 &tcp_hashinfo.ehash[st->bucket].twchain) {
2052 if (tw->tw_family != st->family ||
2053 !net_eq(twsk_net(tw), net)) {
2059 spin_unlock_bh(lock);
2060 st->state = TCP_SEQ_STATE_ESTABLISHED;
2066 static void *established_get_next(struct seq_file *seq, void *cur)
2068 struct sock *sk = cur;
2069 struct inet_timewait_sock *tw;
2070 struct hlist_nulls_node *node;
2071 struct tcp_iter_state *st = seq->private;
2072 struct net *net = seq_file_net(seq);
2076 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2080 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2087 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2088 st->state = TCP_SEQ_STATE_ESTABLISHED;
2090 /* Look for next non empty bucket */
2091 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2094 if (st->bucket > tcp_hashinfo.ehash_mask)
2097 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2098 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2100 sk = sk_nulls_next(sk);
2102 sk_nulls_for_each_from(sk, node) {
2103 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2107 st->state = TCP_SEQ_STATE_TIME_WAIT;
2108 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2116 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2118 void *rc = established_get_first(seq);
2121 rc = established_get_next(seq, rc);
2127 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2130 struct tcp_iter_state *st = seq->private;
2132 st->state = TCP_SEQ_STATE_LISTENING;
2133 rc = listening_get_idx(seq, &pos);
2136 st->state = TCP_SEQ_STATE_ESTABLISHED;
2137 rc = established_get_idx(seq, pos);
2143 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2145 struct tcp_iter_state *st = seq->private;
2146 st->state = TCP_SEQ_STATE_LISTENING;
2148 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2151 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2154 struct tcp_iter_state *st;
2156 if (v == SEQ_START_TOKEN) {
2157 rc = tcp_get_idx(seq, 0);
2162 switch (st->state) {
2163 case TCP_SEQ_STATE_OPENREQ:
2164 case TCP_SEQ_STATE_LISTENING:
2165 rc = listening_get_next(seq, v);
2167 st->state = TCP_SEQ_STATE_ESTABLISHED;
2168 rc = established_get_first(seq);
2171 case TCP_SEQ_STATE_ESTABLISHED:
2172 case TCP_SEQ_STATE_TIME_WAIT:
2173 rc = established_get_next(seq, v);
2181 static void tcp_seq_stop(struct seq_file *seq, void *v)
2183 struct tcp_iter_state *st = seq->private;
2185 switch (st->state) {
2186 case TCP_SEQ_STATE_OPENREQ:
2188 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2189 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2191 case TCP_SEQ_STATE_LISTENING:
2192 if (v != SEQ_START_TOKEN)
2193 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2195 case TCP_SEQ_STATE_TIME_WAIT:
2196 case TCP_SEQ_STATE_ESTABLISHED:
2198 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2203 static int tcp_seq_open(struct inode *inode, struct file *file)
2205 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2206 struct tcp_iter_state *s;
2209 err = seq_open_net(inode, file, &afinfo->seq_ops,
2210 sizeof(struct tcp_iter_state));
2214 s = ((struct seq_file *)file->private_data)->private;
2215 s->family = afinfo->family;
2219 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2222 struct proc_dir_entry *p;
2224 afinfo->seq_fops.open = tcp_seq_open;
2225 afinfo->seq_fops.read = seq_read;
2226 afinfo->seq_fops.llseek = seq_lseek;
2227 afinfo->seq_fops.release = seq_release_net;
2229 afinfo->seq_ops.start = tcp_seq_start;
2230 afinfo->seq_ops.next = tcp_seq_next;
2231 afinfo->seq_ops.stop = tcp_seq_stop;
2233 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2234 &afinfo->seq_fops, afinfo);
2240 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2242 proc_net_remove(net, afinfo->name);
2245 static void get_openreq4(struct sock *sk, struct request_sock *req,
2246 struct seq_file *f, int i, int uid, int *len)
2248 const struct inet_request_sock *ireq = inet_rsk(req);
2249 int ttd = req->expires - jiffies;
2251 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2252 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2255 ntohs(inet_sk(sk)->inet_sport),
2257 ntohs(ireq->rmt_port),
2259 0, 0, /* could print option size, but that is af dependent. */
2260 1, /* timers active (only the expire timer) */
2261 jiffies_to_clock_t(ttd),
2264 0, /* non standard timer */
2265 0, /* open_requests have no inode */
2266 atomic_read(&sk->sk_refcnt),
2271 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2274 unsigned long timer_expires;
2275 struct tcp_sock *tp = tcp_sk(sk);
2276 const struct inet_connection_sock *icsk = inet_csk(sk);
2277 struct inet_sock *inet = inet_sk(sk);
2278 __be32 dest = inet->inet_daddr;
2279 __be32 src = inet->inet_rcv_saddr;
2280 __u16 destp = ntohs(inet->inet_dport);
2281 __u16 srcp = ntohs(inet->inet_sport);
2283 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2285 timer_expires = icsk->icsk_timeout;
2286 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2288 timer_expires = icsk->icsk_timeout;
2289 } else if (timer_pending(&sk->sk_timer)) {
2291 timer_expires = sk->sk_timer.expires;
2294 timer_expires = jiffies;
2297 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2298 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2299 i, src, srcp, dest, destp, sk->sk_state,
2300 tp->write_seq - tp->snd_una,
2301 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
2302 (tp->rcv_nxt - tp->copied_seq),
2304 jiffies_to_clock_t(timer_expires - jiffies),
2305 icsk->icsk_retransmits,
2307 icsk->icsk_probes_out,
2309 atomic_read(&sk->sk_refcnt), sk,
2310 jiffies_to_clock_t(icsk->icsk_rto),
2311 jiffies_to_clock_t(icsk->icsk_ack.ato),
2312 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2314 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2318 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2319 struct seq_file *f, int i, int *len)
2323 int ttd = tw->tw_ttd - jiffies;
2328 dest = tw->tw_daddr;
2329 src = tw->tw_rcv_saddr;
2330 destp = ntohs(tw->tw_dport);
2331 srcp = ntohs(tw->tw_sport);
2333 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2334 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2335 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2336 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2337 atomic_read(&tw->tw_refcnt), tw, len);
2342 static int tcp4_seq_show(struct seq_file *seq, void *v)
2344 struct tcp_iter_state *st;
2347 if (v == SEQ_START_TOKEN) {
2348 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2349 " sl local_address rem_address st tx_queue "
2350 "rx_queue tr tm->when retrnsmt uid timeout "
2356 switch (st->state) {
2357 case TCP_SEQ_STATE_LISTENING:
2358 case TCP_SEQ_STATE_ESTABLISHED:
2359 get_tcp4_sock(v, seq, st->num, &len);
2361 case TCP_SEQ_STATE_OPENREQ:
2362 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2364 case TCP_SEQ_STATE_TIME_WAIT:
2365 get_timewait4_sock(v, seq, st->num, &len);
2368 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2373 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2377 .owner = THIS_MODULE,
2380 .show = tcp4_seq_show,
2384 static int tcp4_proc_init_net(struct net *net)
2386 return tcp_proc_register(net, &tcp4_seq_afinfo);
2389 static void tcp4_proc_exit_net(struct net *net)
2391 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2394 static struct pernet_operations tcp4_net_ops = {
2395 .init = tcp4_proc_init_net,
2396 .exit = tcp4_proc_exit_net,
2399 int __init tcp4_proc_init(void)
2401 return register_pernet_subsys(&tcp4_net_ops);
2404 void tcp4_proc_exit(void)
2406 unregister_pernet_subsys(&tcp4_net_ops);
2408 #endif /* CONFIG_PROC_FS */
2410 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2412 struct iphdr *iph = skb_gro_network_header(skb);
2414 switch (skb->ip_summed) {
2415 case CHECKSUM_COMPLETE:
2416 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2418 skb->ip_summed = CHECKSUM_UNNECESSARY;
2424 NAPI_GRO_CB(skb)->flush = 1;
2428 return tcp_gro_receive(head, skb);
2430 EXPORT_SYMBOL(tcp4_gro_receive);
2432 int tcp4_gro_complete(struct sk_buff *skb)
2434 struct iphdr *iph = ip_hdr(skb);
2435 struct tcphdr *th = tcp_hdr(skb);
2437 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2438 iph->saddr, iph->daddr, 0);
2439 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2441 return tcp_gro_complete(skb);
2443 EXPORT_SYMBOL(tcp4_gro_complete);
2445 struct proto tcp_prot = {
2447 .owner = THIS_MODULE,
2449 .connect = tcp_v4_connect,
2450 .disconnect = tcp_disconnect,
2451 .accept = inet_csk_accept,
2453 .init = tcp_v4_init_sock,
2454 .destroy = tcp_v4_destroy_sock,
2455 .shutdown = tcp_shutdown,
2456 .setsockopt = tcp_setsockopt,
2457 .getsockopt = tcp_getsockopt,
2458 .recvmsg = tcp_recvmsg,
2459 .backlog_rcv = tcp_v4_do_rcv,
2461 .unhash = inet_unhash,
2462 .get_port = inet_csk_get_port,
2463 .enter_memory_pressure = tcp_enter_memory_pressure,
2464 .sockets_allocated = &tcp_sockets_allocated,
2465 .orphan_count = &tcp_orphan_count,
2466 .memory_allocated = &tcp_memory_allocated,
2467 .memory_pressure = &tcp_memory_pressure,
2468 .sysctl_mem = sysctl_tcp_mem,
2469 .sysctl_wmem = sysctl_tcp_wmem,
2470 .sysctl_rmem = sysctl_tcp_rmem,
2471 .max_header = MAX_TCP_HEADER,
2472 .obj_size = sizeof(struct tcp_sock),
2473 .slab_flags = SLAB_DESTROY_BY_RCU,
2474 .twsk_prot = &tcp_timewait_sock_ops,
2475 .rsk_prot = &tcp_request_sock_ops,
2476 .h.hashinfo = &tcp_hashinfo,
2477 #ifdef CONFIG_COMPAT
2478 .compat_setsockopt = compat_tcp_setsockopt,
2479 .compat_getsockopt = compat_tcp_getsockopt,
2484 static int __net_init tcp_sk_init(struct net *net)
2486 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2487 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2490 static void __net_exit tcp_sk_exit(struct net *net)
2492 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2493 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
2496 static struct pernet_operations __net_initdata tcp_sk_ops = {
2497 .init = tcp_sk_init,
2498 .exit = tcp_sk_exit,
2501 void __init tcp_v4_init(void)
2503 inet_hashinfo_init(&tcp_hashinfo);
2504 if (register_pernet_subsys(&tcp_sk_ops))
2505 panic("Failed to create the TCP control socket.\n");
2508 EXPORT_SYMBOL(ipv4_specific);
2509 EXPORT_SYMBOL(tcp_hashinfo);
2510 EXPORT_SYMBOL(tcp_prot);
2511 EXPORT_SYMBOL(tcp_v4_conn_request);
2512 EXPORT_SYMBOL(tcp_v4_connect);
2513 EXPORT_SYMBOL(tcp_v4_do_rcv);
2514 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2515 EXPORT_SYMBOL(tcp_v4_send_check);
2516 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2518 #ifdef CONFIG_PROC_FS
2519 EXPORT_SYMBOL(tcp_proc_register);
2520 EXPORT_SYMBOL(tcp_proc_unregister);
2522 EXPORT_SYMBOL(sysctl_tcp_low_latency);