2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
10 * IPv4 specific functions
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
18 * See tcp.c for author information
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
39 * request_sock handling and moved
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
42 * Added new listen semantics.
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
65 #include <net/net_namespace.h>
67 #include <net/inet_hashtables.h>
69 #include <net/transp_v6.h>
71 #include <net/inet_common.h>
72 #include <net/timewait_sock.h>
74 #include <net/netdma.h>
76 #include <linux/inet.h>
77 #include <linux/ipv6.h>
78 #include <linux/stddef.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
82 #include <linux/crypto.h>
83 #include <linux/scatterlist.h>
85 int sysctl_tcp_tw_reuse __read_mostly;
86 int sysctl_tcp_low_latency __read_mostly;
88 /* Check TCP sequence numbers in ICMP packets. */
89 #define ICMP_MIN_LENGTH 8
91 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
93 #ifdef CONFIG_TCP_MD5SIG
94 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
96 static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
97 __be32 saddr, __be32 daddr,
98 struct tcphdr *th, int protocol,
102 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
103 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
104 .lhash_users = ATOMIC_INIT(0),
105 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
108 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
110 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
113 tcp_hdr(skb)->source);
116 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
118 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
119 struct tcp_sock *tp = tcp_sk(sk);
121 /* With PAWS, it is safe from the viewpoint
122 of data integrity. Even without PAWS it is safe provided sequence
123 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
125 Actually, the idea is close to VJ's one, only timestamp cache is
126 held not per host, but per port pair and TW bucket is used as state
129 If TW bucket has been already destroyed we fall back to VJ's scheme
130 and use initial timestamp retrieved from peer table.
132 if (tcptw->tw_ts_recent_stamp &&
133 (twp == NULL || (sysctl_tcp_tw_reuse &&
134 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
135 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
136 if (tp->write_seq == 0)
138 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
139 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
147 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
152 struct inet_sock *inet = inet_sk(sk);
153 struct tcp_sock *tp = tcp_sk(sk);
154 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
156 __be32 daddr, nexthop;
160 if (addr_len < sizeof(struct sockaddr_in))
163 if (usin->sin_family != AF_INET)
164 return -EAFNOSUPPORT;
166 nexthop = daddr = usin->sin_addr.s_addr;
167 if (inet->opt && inet->opt->srr) {
170 nexthop = inet->opt->faddr;
173 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
176 inet->sport, usin->sin_port, sk, 1);
178 if (tmp == -ENETUNREACH)
179 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
188 if (!inet->opt || !inet->opt->srr)
192 inet->saddr = rt->rt_src;
193 inet->rcv_saddr = inet->saddr;
195 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
204 struct inet_peer *peer = rt_get_peer(rt);
206 * VJ's idea. We save last timestamp seen from
207 * the destination in peer table, when entering state
208 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
209 * when trying new connection.
212 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
213 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
214 tp->rx_opt.ts_recent = peer->tcp_ts;
218 inet->dport = usin->sin_port;
221 inet_csk(sk)->icsk_ext_hdr_len = 0;
223 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
225 tp->rx_opt.mss_clamp = 536;
227 /* Socket identity is still unknown (sport may be zero).
228 * However we set state to SYN-SENT and not releasing socket
229 * lock select source port, enter ourselves into the hash tables and
230 * complete initialization after this.
232 tcp_set_state(sk, TCP_SYN_SENT);
233 err = inet_hash_connect(&tcp_death_row, sk);
237 err = ip_route_newports(&rt, IPPROTO_TCP,
238 inet->sport, inet->dport, sk);
242 /* OK, now commit destination to socket. */
243 sk->sk_gso_type = SKB_GSO_TCPV4;
244 sk_setup_caps(sk, &rt->u.dst);
247 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
252 inet->id = tp->write_seq ^ jiffies;
254 err = tcp_connect(sk);
263 * This unhashes the socket and releases the local port,
266 tcp_set_state(sk, TCP_CLOSE);
268 sk->sk_route_caps = 0;
274 * This routine does path mtu discovery as defined in RFC1191.
276 static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
278 struct dst_entry *dst;
279 struct inet_sock *inet = inet_sk(sk);
281 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
282 * send out by Linux are always <576bytes so they should go through
285 if (sk->sk_state == TCP_LISTEN)
288 /* We don't check in the destentry if pmtu discovery is forbidden
289 * on this route. We just assume that no packet_to_big packets
290 * are send back when pmtu discovery is not active.
291 * There is a small race when the user changes this flag in the
292 * route, but I think that's acceptable.
294 if ((dst = __sk_dst_check(sk, 0)) == NULL)
297 dst->ops->update_pmtu(dst, mtu);
299 /* Something is about to be wrong... Remember soft error
300 * for the case, if this connection will not able to recover.
302 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 sk->sk_err_soft = EMSGSIZE;
307 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
308 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
309 tcp_sync_mss(sk, mtu);
311 /* Resend the TCP packet because it's
312 * clear that the old packet has been
313 * dropped. This is the new "fast" path mtu
316 tcp_simple_retransmit(sk);
317 } /* else let the usual retransmit timer handle it */
321 * This routine is called by the ICMP module when it gets some
322 * sort of error condition. If err < 0 then the socket should
323 * be closed and the error returned to the user. If err > 0
324 * it's just the icmp type << 8 | icmp code. After adjustment
325 * header points to the first 8 bytes of the tcp header. We need
326 * to find the appropriate port.
328 * The locking strategy used here is very "optimistic". When
329 * someone else accesses the socket the ICMP is just dropped
330 * and for some paths there is no check at all.
331 * A more general error queue to queue errors for later handling
332 * is probably better.
336 void tcp_v4_err(struct sk_buff *skb, u32 info)
338 struct iphdr *iph = (struct iphdr *)skb->data;
339 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
341 struct inet_sock *inet;
342 const int type = icmp_hdr(skb)->type;
343 const int code = icmp_hdr(skb)->code;
348 if (skb->len < (iph->ihl << 2) + 8) {
349 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
353 sk = inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest,
354 iph->saddr, th->source, inet_iif(skb));
356 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
359 if (sk->sk_state == TCP_TIME_WAIT) {
360 inet_twsk_put(inet_twsk(sk));
365 /* If too many ICMPs get dropped on busy
366 * servers this needs to be solved differently.
368 if (sock_owned_by_user(sk))
369 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
371 if (sk->sk_state == TCP_CLOSE)
375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) {
378 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
383 case ICMP_SOURCE_QUENCH:
384 /* Just silently ignore these. */
386 case ICMP_PARAMETERPROB:
389 case ICMP_DEST_UNREACH:
390 if (code > NR_ICMP_UNREACH)
393 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
394 if (!sock_owned_by_user(sk))
395 do_pmtu_discovery(sk, iph, info);
399 err = icmp_err_convert[code].errno;
401 case ICMP_TIME_EXCEEDED:
408 switch (sk->sk_state) {
409 struct request_sock *req, **prev;
411 if (sock_owned_by_user(sk))
414 req = inet_csk_search_req(sk, &prev, th->dest,
415 iph->daddr, iph->saddr);
419 /* ICMPs are not backlogged, hence we cannot get
420 an established socket here.
424 if (seq != tcp_rsk(req)->snt_isn) {
425 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
430 * Still in SYN_RECV, just remove it silently.
431 * There is no good way to pass the error to the newly
432 * created socket, and POSIX does not want network
433 * errors returned from accept().
435 inet_csk_reqsk_queue_drop(sk, req, prev);
439 case TCP_SYN_RECV: /* Cannot happen.
440 It can f.e. if SYNs crossed.
442 if (!sock_owned_by_user(sk)) {
445 sk->sk_error_report(sk);
449 sk->sk_err_soft = err;
454 /* If we've already connected we will keep trying
455 * until we time out, or the user gives up.
457 * rfc1122 4.2.3.9 allows to consider as hard errors
458 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
459 * but it is obsoleted by pmtu discovery).
461 * Note, that in modern internet, where routing is unreliable
462 * and in each dark corner broken firewalls sit, sending random
463 * errors ordered by their masters even this two messages finally lose
464 * their original sense (even Linux sends invalid PORT_UNREACHs)
466 * Now we are in compliance with RFCs.
471 if (!sock_owned_by_user(sk) && inet->recverr) {
473 sk->sk_error_report(sk);
474 } else { /* Only an error on timeout */
475 sk->sk_err_soft = err;
483 /* This routine computes an IPv4 TCP checksum. */
484 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
486 struct inet_sock *inet = inet_sk(sk);
487 struct tcphdr *th = tcp_hdr(skb);
489 if (skb->ip_summed == CHECKSUM_PARTIAL) {
490 th->check = ~tcp_v4_check(len, inet->saddr,
492 skb->csum_start = skb_transport_header(skb) - skb->head;
493 skb->csum_offset = offsetof(struct tcphdr, check);
495 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
496 csum_partial((char *)th,
502 int tcp_v4_gso_send_check(struct sk_buff *skb)
504 const struct iphdr *iph;
507 if (!pskb_may_pull(skb, sizeof(*th)))
514 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
515 skb->csum_start = skb_transport_header(skb) - skb->head;
516 skb->csum_offset = offsetof(struct tcphdr, check);
517 skb->ip_summed = CHECKSUM_PARTIAL;
522 * This routine will send an RST to the other tcp.
524 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
526 * Answer: if a packet caused RST, it is not for a socket
527 * existing in our system, if it is matched to a socket,
528 * it is just duplicate segment or bug in other side's TCP.
529 * So that we build reply only basing on parameters
530 * arrived with segment.
531 * Exception: precedence violation. We do not implement it in any case.
534 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
536 struct tcphdr *th = tcp_hdr(skb);
539 #ifdef CONFIG_TCP_MD5SIG
540 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
543 struct ip_reply_arg arg;
544 #ifdef CONFIG_TCP_MD5SIG
545 struct tcp_md5sig_key *key;
548 /* Never send a reset in response to a reset. */
552 if (skb->rtable->rt_type != RTN_LOCAL)
555 /* Swap the send and the receive. */
556 memset(&rep, 0, sizeof(rep));
557 rep.th.dest = th->source;
558 rep.th.source = th->dest;
559 rep.th.doff = sizeof(struct tcphdr) / 4;
563 rep.th.seq = th->ack_seq;
566 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
567 skb->len - (th->doff << 2));
570 memset(&arg, 0, sizeof(arg));
571 arg.iov[0].iov_base = (unsigned char *)&rep;
572 arg.iov[0].iov_len = sizeof(rep.th);
574 #ifdef CONFIG_TCP_MD5SIG
575 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
577 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
579 (TCPOPT_MD5SIG << 8) |
581 /* Update length and the length the header thinks exists */
582 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
583 rep.th.doff = arg.iov[0].iov_len / 4;
585 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
589 &rep.th, IPPROTO_TCP,
593 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
594 ip_hdr(skb)->saddr, /* XXX */
595 sizeof(struct tcphdr), IPPROTO_TCP, 0);
596 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
598 ip_send_reply(dev_net(skb->dst->dev)->ipv4.tcp_sock, skb,
599 &arg, arg.iov[0].iov_len);
601 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
602 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
605 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
606 outside socket context is ugly, certainly. What can I do?
609 static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
610 struct sk_buff *skb, u32 seq, u32 ack,
613 struct tcphdr *th = tcp_hdr(skb);
616 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
617 #ifdef CONFIG_TCP_MD5SIG
618 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
622 struct ip_reply_arg arg;
623 #ifdef CONFIG_TCP_MD5SIG
624 struct tcp_md5sig_key *key;
625 struct tcp_md5sig_key tw_key;
628 memset(&rep.th, 0, sizeof(struct tcphdr));
629 memset(&arg, 0, sizeof(arg));
631 arg.iov[0].iov_base = (unsigned char *)&rep;
632 arg.iov[0].iov_len = sizeof(rep.th);
634 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
635 (TCPOPT_TIMESTAMP << 8) |
637 rep.opt[1] = htonl(tcp_time_stamp);
638 rep.opt[2] = htonl(ts);
639 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
642 /* Swap the send and the receive. */
643 rep.th.dest = th->source;
644 rep.th.source = th->dest;
645 rep.th.doff = arg.iov[0].iov_len / 4;
646 rep.th.seq = htonl(seq);
647 rep.th.ack_seq = htonl(ack);
649 rep.th.window = htons(win);
651 #ifdef CONFIG_TCP_MD5SIG
653 * The SKB holds an imcoming packet, but may not have a valid ->sk
654 * pointer. This is especially the case when we're dealing with a
655 * TIME_WAIT ack, because the sk structure is long gone, and only
656 * the tcp_timewait_sock remains. So the md5 key is stashed in that
657 * structure, and we use it in preference. I believe that (twsk ||
658 * skb->sk) holds true, but we program defensively.
660 if (!twsk && skb->sk) {
661 key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr);
662 } else if (twsk && twsk->tw_md5_keylen) {
663 tw_key.key = twsk->tw_md5_key;
664 tw_key.keylen = twsk->tw_md5_keylen;
670 int offset = (ts) ? 3 : 0;
672 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
674 (TCPOPT_MD5SIG << 8) |
676 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
677 rep.th.doff = arg.iov[0].iov_len/4;
679 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
683 &rep.th, IPPROTO_TCP,
687 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
688 ip_hdr(skb)->saddr, /* XXX */
689 arg.iov[0].iov_len, IPPROTO_TCP, 0);
690 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
692 arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if;
694 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb,
695 &arg, arg.iov[0].iov_len);
697 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
700 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
702 struct inet_timewait_sock *tw = inet_twsk(sk);
703 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
705 tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
706 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
707 tcptw->tw_ts_recent);
712 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
713 struct request_sock *req)
715 tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1,
716 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
721 * Send a SYN-ACK after having received a SYN.
722 * This still operates on a request_sock only, not on a big
725 static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
726 struct dst_entry *dst)
728 const struct inet_request_sock *ireq = inet_rsk(req);
730 struct sk_buff * skb;
732 /* First, grab a route. */
733 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
736 skb = tcp_make_synack(sk, dst, req);
739 struct tcphdr *th = tcp_hdr(skb);
741 th->check = tcp_v4_check(skb->len,
744 csum_partial((char *)th, skb->len,
747 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
750 err = net_xmit_eval(err);
757 static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
759 return __tcp_v4_send_synack(sk, req, NULL);
763 * IPv4 request_sock destructor.
765 static void tcp_v4_reqsk_destructor(struct request_sock *req)
767 kfree(inet_rsk(req)->opt);
770 #ifdef CONFIG_SYN_COOKIES
771 static void syn_flood_warning(struct sk_buff *skb)
773 static unsigned long warntime;
775 if (time_after(jiffies, (warntime + HZ * 60))) {
778 "possible SYN flooding on port %d. Sending cookies.\n",
779 ntohs(tcp_hdr(skb)->dest));
785 * Save and compile IPv4 options into the request_sock if needed.
787 static struct ip_options *tcp_v4_save_options(struct sock *sk,
790 struct ip_options *opt = &(IPCB(skb)->opt);
791 struct ip_options *dopt = NULL;
793 if (opt && opt->optlen) {
794 int opt_size = optlength(opt);
795 dopt = kmalloc(opt_size, GFP_ATOMIC);
797 if (ip_options_echo(dopt, skb)) {
806 #ifdef CONFIG_TCP_MD5SIG
808 * RFC2385 MD5 checksumming requires a mapping of
809 * IP address->MD5 Key.
810 * We need to maintain these in the sk structure.
813 /* Find the Key structure for an address. */
814 static struct tcp_md5sig_key *
815 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
817 struct tcp_sock *tp = tcp_sk(sk);
820 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
822 for (i = 0; i < tp->md5sig_info->entries4; i++) {
823 if (tp->md5sig_info->keys4[i].addr == addr)
824 return &tp->md5sig_info->keys4[i].base;
829 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
830 struct sock *addr_sk)
832 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
835 EXPORT_SYMBOL(tcp_v4_md5_lookup);
837 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
838 struct request_sock *req)
840 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
843 /* This can be called on a newly created socket, from other files */
844 int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
845 u8 *newkey, u8 newkeylen)
847 /* Add Key to the list */
848 struct tcp_md5sig_key *key;
849 struct tcp_sock *tp = tcp_sk(sk);
850 struct tcp4_md5sig_key *keys;
852 key = tcp_v4_md5_do_lookup(sk, addr);
854 /* Pre-existing entry - just update that one. */
857 key->keylen = newkeylen;
859 struct tcp_md5sig_info *md5sig;
861 if (!tp->md5sig_info) {
862 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
864 if (!tp->md5sig_info) {
868 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
870 if (tcp_alloc_md5sig_pool() == NULL) {
874 md5sig = tp->md5sig_info;
876 if (md5sig->alloced4 == md5sig->entries4) {
877 keys = kmalloc((sizeof(*keys) *
878 (md5sig->entries4 + 1)), GFP_ATOMIC);
881 tcp_free_md5sig_pool();
885 if (md5sig->entries4)
886 memcpy(keys, md5sig->keys4,
887 sizeof(*keys) * md5sig->entries4);
889 /* Free old key list, and reference new one */
890 kfree(md5sig->keys4);
891 md5sig->keys4 = keys;
895 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
896 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
897 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
902 EXPORT_SYMBOL(tcp_v4_md5_do_add);
904 static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
905 u8 *newkey, u8 newkeylen)
907 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
911 int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
913 struct tcp_sock *tp = tcp_sk(sk);
916 for (i = 0; i < tp->md5sig_info->entries4; i++) {
917 if (tp->md5sig_info->keys4[i].addr == addr) {
919 kfree(tp->md5sig_info->keys4[i].base.key);
920 tp->md5sig_info->entries4--;
922 if (tp->md5sig_info->entries4 == 0) {
923 kfree(tp->md5sig_info->keys4);
924 tp->md5sig_info->keys4 = NULL;
925 tp->md5sig_info->alloced4 = 0;
926 } else if (tp->md5sig_info->entries4 != i) {
927 /* Need to do some manipulation */
928 memmove(&tp->md5sig_info->keys4[i],
929 &tp->md5sig_info->keys4[i+1],
930 (tp->md5sig_info->entries4 - i) *
931 sizeof(struct tcp4_md5sig_key));
933 tcp_free_md5sig_pool();
940 EXPORT_SYMBOL(tcp_v4_md5_do_del);
942 static void tcp_v4_clear_md5_list(struct sock *sk)
944 struct tcp_sock *tp = tcp_sk(sk);
946 /* Free each key, then the set of key keys,
947 * the crypto element, and then decrement our
948 * hold on the last resort crypto.
950 if (tp->md5sig_info->entries4) {
952 for (i = 0; i < tp->md5sig_info->entries4; i++)
953 kfree(tp->md5sig_info->keys4[i].base.key);
954 tp->md5sig_info->entries4 = 0;
955 tcp_free_md5sig_pool();
957 if (tp->md5sig_info->keys4) {
958 kfree(tp->md5sig_info->keys4);
959 tp->md5sig_info->keys4 = NULL;
960 tp->md5sig_info->alloced4 = 0;
964 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
967 struct tcp_md5sig cmd;
968 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
971 if (optlen < sizeof(cmd))
974 if (copy_from_user(&cmd, optval, sizeof(cmd)))
977 if (sin->sin_family != AF_INET)
980 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
981 if (!tcp_sk(sk)->md5sig_info)
983 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
986 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
989 if (!tcp_sk(sk)->md5sig_info) {
990 struct tcp_sock *tp = tcp_sk(sk);
991 struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
997 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1000 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
1003 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1004 newkey, cmd.tcpm_keylen);
1007 static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1008 __be32 saddr, __be32 daddr,
1009 struct tcphdr *th, int protocol,
1010 unsigned int tcplen)
1012 struct scatterlist sg[4];
1015 __sum16 old_checksum;
1016 struct tcp_md5sig_pool *hp;
1017 struct tcp4_pseudohdr *bp;
1018 struct hash_desc *desc;
1020 unsigned int nbytes = 0;
1023 * Okay, so RFC2385 is turned on for this connection,
1024 * so we need to generate the MD5 hash for the packet now.
1027 hp = tcp_get_md5sig_pool();
1029 goto clear_hash_noput;
1031 bp = &hp->md5_blk.ip4;
1032 desc = &hp->md5_desc;
1035 * 1. the TCP pseudo-header (in the order: source IP address,
1036 * destination IP address, zero-padded protocol number, and
1042 bp->protocol = protocol;
1043 bp->len = htons(tcplen);
1045 sg_init_table(sg, 4);
1047 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1048 nbytes += sizeof(*bp);
1050 /* 2. the TCP header, excluding options, and assuming a
1053 old_checksum = th->check;
1055 sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
1056 nbytes += sizeof(struct tcphdr);
1058 /* 3. the TCP segment data (if any) */
1059 data_len = tcplen - (th->doff << 2);
1061 unsigned char *data = (unsigned char *)th + (th->doff << 2);
1062 sg_set_buf(&sg[block++], data, data_len);
1066 /* 4. an independently-specified key or password, known to both
1067 * TCPs and presumably connection-specific
1069 sg_set_buf(&sg[block++], key->key, key->keylen);
1070 nbytes += key->keylen;
1072 sg_mark_end(&sg[block - 1]);
1074 /* Now store the Hash into the packet */
1075 err = crypto_hash_init(desc);
1078 err = crypto_hash_update(desc, sg, nbytes);
1081 err = crypto_hash_final(desc, md5_hash);
1085 /* Reset header, and free up the crypto */
1086 tcp_put_md5sig_pool();
1087 th->check = old_checksum;
1092 tcp_put_md5sig_pool();
1094 memset(md5_hash, 0, 16);
1098 int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1100 struct dst_entry *dst,
1101 struct request_sock *req,
1102 struct tcphdr *th, int protocol,
1103 unsigned int tcplen)
1105 __be32 saddr, daddr;
1108 saddr = inet_sk(sk)->saddr;
1109 daddr = inet_sk(sk)->daddr;
1111 struct rtable *rt = (struct rtable *)dst;
1116 return tcp_v4_do_calc_md5_hash(md5_hash, key,
1118 th, protocol, tcplen);
1121 EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
1123 static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1126 * This gets called for each TCP segment that arrives
1127 * so we want to be efficient.
1128 * We have 3 drop cases:
1129 * o No MD5 hash and one expected.
1130 * o MD5 hash and we're not expecting one.
1131 * o MD5 hash and its wrong.
1133 __u8 *hash_location = NULL;
1134 struct tcp_md5sig_key *hash_expected;
1135 const struct iphdr *iph = ip_hdr(skb);
1136 struct tcphdr *th = tcp_hdr(skb);
1138 unsigned char newhash[16];
1140 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1141 hash_location = tcp_parse_md5sig_option(th);
1143 /* We've parsed the options - do we have a hash? */
1144 if (!hash_expected && !hash_location)
1147 if (hash_expected && !hash_location) {
1148 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found "
1149 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1150 NIPQUAD(iph->saddr), ntohs(th->source),
1151 NIPQUAD(iph->daddr), ntohs(th->dest));
1155 if (!hash_expected && hash_location) {
1156 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found "
1157 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1158 NIPQUAD(iph->saddr), ntohs(th->source),
1159 NIPQUAD(iph->daddr), ntohs(th->dest));
1163 /* Okay, so this is hash_expected and hash_location -
1164 * so we need to calculate the checksum.
1166 genhash = tcp_v4_do_calc_md5_hash(newhash,
1168 iph->saddr, iph->daddr,
1169 th, sk->sk_protocol,
1172 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1173 if (net_ratelimit()) {
1174 printk(KERN_INFO "MD5 Hash failed for "
1175 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
1176 NIPQUAD(iph->saddr), ntohs(th->source),
1177 NIPQUAD(iph->daddr), ntohs(th->dest),
1178 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1187 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1189 .obj_size = sizeof(struct tcp_request_sock),
1190 .rtx_syn_ack = tcp_v4_send_synack,
1191 .send_ack = tcp_v4_reqsk_send_ack,
1192 .destructor = tcp_v4_reqsk_destructor,
1193 .send_reset = tcp_v4_send_reset,
1196 #ifdef CONFIG_TCP_MD5SIG
1197 static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1198 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1202 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1203 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1204 .twsk_unique = tcp_twsk_unique,
1205 .twsk_destructor= tcp_twsk_destructor,
1208 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1210 struct inet_request_sock *ireq;
1211 struct tcp_options_received tmp_opt;
1212 struct request_sock *req;
1213 __be32 saddr = ip_hdr(skb)->saddr;
1214 __be32 daddr = ip_hdr(skb)->daddr;
1215 __u32 isn = TCP_SKB_CB(skb)->when;
1216 struct dst_entry *dst = NULL;
1217 #ifdef CONFIG_SYN_COOKIES
1218 int want_cookie = 0;
1220 #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1223 /* Never answer to SYNs send to broadcast or multicast */
1224 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1227 /* TW buckets are converted to open requests without
1228 * limitations, they conserve resources and peer is
1229 * evidently real one.
1231 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1232 #ifdef CONFIG_SYN_COOKIES
1233 if (sysctl_tcp_syncookies) {
1240 /* Accept backlog is full. If we have already queued enough
1241 * of warm entries in syn queue, drop request. It is better than
1242 * clogging syn queue with openreqs with exponentially increasing
1245 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1248 req = reqsk_alloc(&tcp_request_sock_ops);
1252 #ifdef CONFIG_TCP_MD5SIG
1253 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1256 tcp_clear_options(&tmp_opt);
1257 tmp_opt.mss_clamp = 536;
1258 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1260 tcp_parse_options(skb, &tmp_opt, 0);
1262 if (want_cookie && !tmp_opt.saw_tstamp)
1263 tcp_clear_options(&tmp_opt);
1265 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
1266 /* Some OSes (unknown ones, but I see them on web server, which
1267 * contains information interesting only for windows'
1268 * users) do not send their stamp in SYN. It is easy case.
1269 * We simply do not advertise TS support.
1271 tmp_opt.saw_tstamp = 0;
1272 tmp_opt.tstamp_ok = 0;
1274 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1276 tcp_openreq_init(req, &tmp_opt, skb);
1278 if (security_inet_conn_request(sk, skb, req))
1281 ireq = inet_rsk(req);
1282 ireq->loc_addr = daddr;
1283 ireq->rmt_addr = saddr;
1284 ireq->opt = tcp_v4_save_options(sk, skb);
1286 TCP_ECN_create_request(req, tcp_hdr(skb));
1289 #ifdef CONFIG_SYN_COOKIES
1290 syn_flood_warning(skb);
1291 req->cookie_ts = tmp_opt.tstamp_ok;
1293 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1295 struct inet_peer *peer = NULL;
1297 /* VJ's idea. We save last timestamp seen
1298 * from the destination in peer table, when entering
1299 * state TIME-WAIT, and check against it before
1300 * accepting new connection request.
1302 * If "isn" is not zero, this request hit alive
1303 * timewait bucket, so that all the necessary checks
1304 * are made in the function processing timewait state.
1306 if (tmp_opt.saw_tstamp &&
1307 tcp_death_row.sysctl_tw_recycle &&
1308 (dst = inet_csk_route_req(sk, req)) != NULL &&
1309 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1310 peer->v4daddr == saddr) {
1311 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1312 (s32)(peer->tcp_ts - req->ts_recent) >
1314 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
1315 goto drop_and_release;
1318 /* Kill the following clause, if you dislike this way. */
1319 else if (!sysctl_tcp_syncookies &&
1320 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1321 (sysctl_max_syn_backlog >> 2)) &&
1322 (!peer || !peer->tcp_ts_stamp) &&
1323 (!dst || !dst_metric(dst, RTAX_RTT))) {
1324 /* Without syncookies last quarter of
1325 * backlog is filled with destinations,
1326 * proven to be alive.
1327 * It means that we continue to communicate
1328 * to destinations, already remembered
1329 * to the moment of synflood.
1331 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
1332 "request from " NIPQUAD_FMT "/%u\n",
1334 ntohs(tcp_hdr(skb)->source));
1335 goto drop_and_release;
1338 isn = tcp_v4_init_sequence(skb);
1340 tcp_rsk(req)->snt_isn = isn;
1342 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1345 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1358 * The three way handshake has completed - we got a valid synack -
1359 * now create the new socket.
1361 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1362 struct request_sock *req,
1363 struct dst_entry *dst)
1365 struct inet_request_sock *ireq;
1366 struct inet_sock *newinet;
1367 struct tcp_sock *newtp;
1369 #ifdef CONFIG_TCP_MD5SIG
1370 struct tcp_md5sig_key *key;
1373 if (sk_acceptq_is_full(sk))
1376 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1379 newsk = tcp_create_openreq_child(sk, req, skb);
1383 newsk->sk_gso_type = SKB_GSO_TCPV4;
1384 sk_setup_caps(newsk, dst);
1386 newtp = tcp_sk(newsk);
1387 newinet = inet_sk(newsk);
1388 ireq = inet_rsk(req);
1389 newinet->daddr = ireq->rmt_addr;
1390 newinet->rcv_saddr = ireq->loc_addr;
1391 newinet->saddr = ireq->loc_addr;
1392 newinet->opt = ireq->opt;
1394 newinet->mc_index = inet_iif(skb);
1395 newinet->mc_ttl = ip_hdr(skb)->ttl;
1396 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1398 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1399 newinet->id = newtp->write_seq ^ jiffies;
1401 tcp_mtup_init(newsk);
1402 tcp_sync_mss(newsk, dst_mtu(dst));
1403 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1404 tcp_initialize_rcv_mss(newsk);
1406 #ifdef CONFIG_TCP_MD5SIG
1407 /* Copy over the MD5 key from the original socket */
1408 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
1410 * We're using one, so create a matching key
1411 * on the newsk structure. If we fail to get
1412 * memory, then we end up not copying the key
1415 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1417 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1418 newkey, key->keylen);
1422 __inet_hash_nolisten(newsk);
1423 __inet_inherit_port(sk, newsk);
1428 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1430 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1435 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1437 struct tcphdr *th = tcp_hdr(skb);
1438 const struct iphdr *iph = ip_hdr(skb);
1440 struct request_sock **prev;
1441 /* Find possible connection requests. */
1442 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1443 iph->saddr, iph->daddr);
1445 return tcp_check_req(sk, skb, req, prev);
1447 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1448 th->source, iph->daddr, th->dest, inet_iif(skb));
1451 if (nsk->sk_state != TCP_TIME_WAIT) {
1455 inet_twsk_put(inet_twsk(nsk));
1459 #ifdef CONFIG_SYN_COOKIES
1460 if (!th->rst && !th->syn && th->ack)
1461 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1466 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1468 const struct iphdr *iph = ip_hdr(skb);
1470 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1471 if (!tcp_v4_check(skb->len, iph->saddr,
1472 iph->daddr, skb->csum)) {
1473 skb->ip_summed = CHECKSUM_UNNECESSARY;
1478 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1479 skb->len, IPPROTO_TCP, 0);
1481 if (skb->len <= 76) {
1482 return __skb_checksum_complete(skb);
1488 /* The socket must have it's spinlock held when we get
1491 * We have a potential double-lock case here, so even when
1492 * doing backlog processing we use the BH locking scheme.
1493 * This is because we cannot sleep with the original spinlock
1496 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1499 #ifdef CONFIG_TCP_MD5SIG
1501 * We really want to reject the packet as early as possible
1503 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1504 * o There is an MD5 option and we're not expecting one
1506 if (tcp_v4_inbound_md5_hash(sk, skb))
1510 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1511 TCP_CHECK_TIMER(sk);
1512 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1516 TCP_CHECK_TIMER(sk);
1520 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1523 if (sk->sk_state == TCP_LISTEN) {
1524 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1529 if (tcp_child_process(sk, nsk, skb)) {
1537 TCP_CHECK_TIMER(sk);
1538 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1542 TCP_CHECK_TIMER(sk);
1546 tcp_v4_send_reset(rsk, skb);
1549 /* Be careful here. If this function gets more complicated and
1550 * gcc suffers from register pressure on the x86, sk (in %ebx)
1551 * might be destroyed here. This current version compiles correctly,
1552 * but you have been warned.
1557 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1565 int tcp_v4_rcv(struct sk_buff *skb)
1567 const struct iphdr *iph;
1572 if (skb->pkt_type != PACKET_HOST)
1575 /* Count it even if it's bad */
1576 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1578 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1583 if (th->doff < sizeof(struct tcphdr) / 4)
1585 if (!pskb_may_pull(skb, th->doff * 4))
1588 /* An explanation is required here, I think.
1589 * Packet length and doff are validated by header prediction,
1590 * provided case of th->doff==0 is eliminated.
1591 * So, we defer the checks. */
1592 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1597 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1598 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1599 skb->len - th->doff * 4);
1600 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1601 TCP_SKB_CB(skb)->when = 0;
1602 TCP_SKB_CB(skb)->flags = iph->tos;
1603 TCP_SKB_CB(skb)->sacked = 0;
1605 sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr,
1606 th->source, iph->daddr, th->dest, inet_iif(skb));
1611 if (sk->sk_state == TCP_TIME_WAIT)
1614 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1615 goto discard_and_relse;
1618 if (sk_filter(sk, skb))
1619 goto discard_and_relse;
1623 bh_lock_sock_nested(sk);
1625 if (!sock_owned_by_user(sk)) {
1626 #ifdef CONFIG_NET_DMA
1627 struct tcp_sock *tp = tcp_sk(sk);
1628 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1629 tp->ucopy.dma_chan = get_softnet_dma();
1630 if (tp->ucopy.dma_chan)
1631 ret = tcp_v4_do_rcv(sk, skb);
1635 if (!tcp_prequeue(sk, skb))
1636 ret = tcp_v4_do_rcv(sk, skb);
1639 sk_add_backlog(sk, skb);
1647 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1650 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1652 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1654 tcp_v4_send_reset(NULL, skb);
1658 /* Discard frame. */
1667 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1668 inet_twsk_put(inet_twsk(sk));
1672 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1673 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1674 inet_twsk_put(inet_twsk(sk));
1677 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1679 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1681 iph->daddr, th->dest,
1684 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1685 inet_twsk_put(inet_twsk(sk));
1689 /* Fall through to ACK */
1692 tcp_v4_timewait_ack(sk, skb);
1696 case TCP_TW_SUCCESS:;
1701 /* VJ's idea. Save last timestamp seen from this destination
1702 * and hold it at least for normal timewait interval to use for duplicate
1703 * segment detection in subsequent connections, before they enter synchronized
1707 int tcp_v4_remember_stamp(struct sock *sk)
1709 struct inet_sock *inet = inet_sk(sk);
1710 struct tcp_sock *tp = tcp_sk(sk);
1711 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1712 struct inet_peer *peer = NULL;
1715 if (!rt || rt->rt_dst != inet->daddr) {
1716 peer = inet_getpeer(inet->daddr, 1);
1720 rt_bind_peer(rt, 1);
1725 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1726 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1727 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1728 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1729 peer->tcp_ts = tp->rx_opt.ts_recent;
1739 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1741 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1744 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1746 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1747 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1748 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1749 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1750 peer->tcp_ts = tcptw->tw_ts_recent;
1759 struct inet_connection_sock_af_ops ipv4_specific = {
1760 .queue_xmit = ip_queue_xmit,
1761 .send_check = tcp_v4_send_check,
1762 .rebuild_header = inet_sk_rebuild_header,
1763 .conn_request = tcp_v4_conn_request,
1764 .syn_recv_sock = tcp_v4_syn_recv_sock,
1765 .remember_stamp = tcp_v4_remember_stamp,
1766 .net_header_len = sizeof(struct iphdr),
1767 .setsockopt = ip_setsockopt,
1768 .getsockopt = ip_getsockopt,
1769 .addr2sockaddr = inet_csk_addr2sockaddr,
1770 .sockaddr_len = sizeof(struct sockaddr_in),
1771 .bind_conflict = inet_csk_bind_conflict,
1772 #ifdef CONFIG_COMPAT
1773 .compat_setsockopt = compat_ip_setsockopt,
1774 .compat_getsockopt = compat_ip_getsockopt,
1778 #ifdef CONFIG_TCP_MD5SIG
1779 static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1780 .md5_lookup = tcp_v4_md5_lookup,
1781 .calc_md5_hash = tcp_v4_calc_md5_hash,
1782 .md5_add = tcp_v4_md5_add_func,
1783 .md5_parse = tcp_v4_parse_md5_keys,
1787 /* NOTE: A lot of things set to zero explicitly by call to
1788 * sk_alloc() so need not be done here.
1790 static int tcp_v4_init_sock(struct sock *sk)
1792 struct inet_connection_sock *icsk = inet_csk(sk);
1793 struct tcp_sock *tp = tcp_sk(sk);
1795 skb_queue_head_init(&tp->out_of_order_queue);
1796 tcp_init_xmit_timers(sk);
1797 tcp_prequeue_init(tp);
1799 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1800 tp->mdev = TCP_TIMEOUT_INIT;
1802 /* So many TCP implementations out there (incorrectly) count the
1803 * initial SYN frame in their delayed-ACK and congestion control
1804 * algorithms that we must have the following bandaid to talk
1805 * efficiently to them. -DaveM
1809 /* See draft-stevens-tcpca-spec-01 for discussion of the
1810 * initialization of these values.
1812 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1813 tp->snd_cwnd_clamp = ~0;
1814 tp->mss_cache = 536;
1816 tp->reordering = sysctl_tcp_reordering;
1817 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1819 sk->sk_state = TCP_CLOSE;
1821 sk->sk_write_space = sk_stream_write_space;
1822 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1824 icsk->icsk_af_ops = &ipv4_specific;
1825 icsk->icsk_sync_mss = tcp_sync_mss;
1826 #ifdef CONFIG_TCP_MD5SIG
1827 tp->af_specific = &tcp_sock_ipv4_specific;
1830 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1831 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1833 atomic_inc(&tcp_sockets_allocated);
1838 int tcp_v4_destroy_sock(struct sock *sk)
1840 struct tcp_sock *tp = tcp_sk(sk);
1842 tcp_clear_xmit_timers(sk);
1844 tcp_cleanup_congestion_control(sk);
1846 /* Cleanup up the write buffer. */
1847 tcp_write_queue_purge(sk);
1849 /* Cleans up our, hopefully empty, out_of_order_queue. */
1850 __skb_queue_purge(&tp->out_of_order_queue);
1852 #ifdef CONFIG_TCP_MD5SIG
1853 /* Clean up the MD5 key list, if any */
1854 if (tp->md5sig_info) {
1855 tcp_v4_clear_md5_list(sk);
1856 kfree(tp->md5sig_info);
1857 tp->md5sig_info = NULL;
1861 #ifdef CONFIG_NET_DMA
1862 /* Cleans up our sk_async_wait_queue */
1863 __skb_queue_purge(&sk->sk_async_wait_queue);
1866 /* Clean prequeue, it must be empty really */
1867 __skb_queue_purge(&tp->ucopy.prequeue);
1869 /* Clean up a referenced TCP bind bucket. */
1870 if (inet_csk(sk)->icsk_bind_hash)
1874 * If sendmsg cached page exists, toss it.
1876 if (sk->sk_sndmsg_page) {
1877 __free_page(sk->sk_sndmsg_page);
1878 sk->sk_sndmsg_page = NULL;
1881 if (tp->defer_tcp_accept.request) {
1882 reqsk_free(tp->defer_tcp_accept.request);
1883 sock_put(tp->defer_tcp_accept.listen_sk);
1885 tp->defer_tcp_accept.listen_sk = NULL;
1886 tp->defer_tcp_accept.request = NULL;
1889 atomic_dec(&tcp_sockets_allocated);
1894 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1896 #ifdef CONFIG_PROC_FS
1897 /* Proc filesystem TCP sock list dumping. */
1899 static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1901 return hlist_empty(head) ? NULL :
1902 list_entry(head->first, struct inet_timewait_sock, tw_node);
1905 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1907 return tw->tw_node.next ?
1908 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1911 static void *listening_get_next(struct seq_file *seq, void *cur)
1913 struct inet_connection_sock *icsk;
1914 struct hlist_node *node;
1915 struct sock *sk = cur;
1916 struct tcp_iter_state* st = seq->private;
1917 struct net *net = seq_file_net(seq);
1921 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1927 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1928 struct request_sock *req = cur;
1930 icsk = inet_csk(st->syn_wait_sk);
1934 if (req->rsk_ops->family == st->family &&
1935 net_eq(sock_net(req->sk), net)) {
1941 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1944 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1946 sk = sk_next(st->syn_wait_sk);
1947 st->state = TCP_SEQ_STATE_LISTENING;
1948 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1950 icsk = inet_csk(sk);
1951 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1952 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1954 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1958 sk_for_each_from(sk, node) {
1959 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1963 icsk = inet_csk(sk);
1964 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1965 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1967 st->uid = sock_i_uid(sk);
1968 st->syn_wait_sk = sk;
1969 st->state = TCP_SEQ_STATE_OPENREQ;
1973 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1975 if (++st->bucket < INET_LHTABLE_SIZE) {
1976 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1984 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1986 void *rc = listening_get_next(seq, NULL);
1988 while (rc && *pos) {
1989 rc = listening_get_next(seq, rc);
1995 static void *established_get_first(struct seq_file *seq)
1997 struct tcp_iter_state* st = seq->private;
1998 struct net *net = seq_file_net(seq);
2001 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
2003 struct hlist_node *node;
2004 struct inet_timewait_sock *tw;
2005 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2008 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2009 if (sk->sk_family != st->family ||
2010 !net_eq(sock_net(sk), net)) {
2016 st->state = TCP_SEQ_STATE_TIME_WAIT;
2017 inet_twsk_for_each(tw, node,
2018 &tcp_hashinfo.ehash[st->bucket].twchain) {
2019 if (tw->tw_family != st->family ||
2020 !net_eq(twsk_net(tw), net)) {
2026 read_unlock_bh(lock);
2027 st->state = TCP_SEQ_STATE_ESTABLISHED;
2033 static void *established_get_next(struct seq_file *seq, void *cur)
2035 struct sock *sk = cur;
2036 struct inet_timewait_sock *tw;
2037 struct hlist_node *node;
2038 struct tcp_iter_state* st = seq->private;
2039 struct net *net = seq_file_net(seq);
2043 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2047 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2054 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2055 st->state = TCP_SEQ_STATE_ESTABLISHED;
2057 if (++st->bucket < tcp_hashinfo.ehash_size) {
2058 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2059 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2067 sk_for_each_from(sk, node) {
2068 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2072 st->state = TCP_SEQ_STATE_TIME_WAIT;
2073 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2081 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2083 void *rc = established_get_first(seq);
2086 rc = established_get_next(seq, rc);
2092 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2095 struct tcp_iter_state* st = seq->private;
2097 inet_listen_lock(&tcp_hashinfo);
2098 st->state = TCP_SEQ_STATE_LISTENING;
2099 rc = listening_get_idx(seq, &pos);
2102 inet_listen_unlock(&tcp_hashinfo);
2103 st->state = TCP_SEQ_STATE_ESTABLISHED;
2104 rc = established_get_idx(seq, pos);
2110 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2112 struct tcp_iter_state* st = seq->private;
2113 st->state = TCP_SEQ_STATE_LISTENING;
2115 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2118 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2121 struct tcp_iter_state* st;
2123 if (v == SEQ_START_TOKEN) {
2124 rc = tcp_get_idx(seq, 0);
2129 switch (st->state) {
2130 case TCP_SEQ_STATE_OPENREQ:
2131 case TCP_SEQ_STATE_LISTENING:
2132 rc = listening_get_next(seq, v);
2134 inet_listen_unlock(&tcp_hashinfo);
2135 st->state = TCP_SEQ_STATE_ESTABLISHED;
2136 rc = established_get_first(seq);
2139 case TCP_SEQ_STATE_ESTABLISHED:
2140 case TCP_SEQ_STATE_TIME_WAIT:
2141 rc = established_get_next(seq, v);
2149 static void tcp_seq_stop(struct seq_file *seq, void *v)
2151 struct tcp_iter_state* st = seq->private;
2153 switch (st->state) {
2154 case TCP_SEQ_STATE_OPENREQ:
2156 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2157 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2159 case TCP_SEQ_STATE_LISTENING:
2160 if (v != SEQ_START_TOKEN)
2161 inet_listen_unlock(&tcp_hashinfo);
2163 case TCP_SEQ_STATE_TIME_WAIT:
2164 case TCP_SEQ_STATE_ESTABLISHED:
2166 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2171 static int tcp_seq_open(struct inode *inode, struct file *file)
2173 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2174 struct tcp_iter_state *s;
2177 err = seq_open_net(inode, file, &afinfo->seq_ops,
2178 sizeof(struct tcp_iter_state));
2182 s = ((struct seq_file *)file->private_data)->private;
2183 s->family = afinfo->family;
2187 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2190 struct proc_dir_entry *p;
2192 afinfo->seq_fops.open = tcp_seq_open;
2193 afinfo->seq_fops.read = seq_read;
2194 afinfo->seq_fops.llseek = seq_lseek;
2195 afinfo->seq_fops.release = seq_release_net;
2197 afinfo->seq_ops.start = tcp_seq_start;
2198 afinfo->seq_ops.next = tcp_seq_next;
2199 afinfo->seq_ops.stop = tcp_seq_stop;
2201 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2202 &afinfo->seq_fops, afinfo);
2208 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2210 proc_net_remove(net, afinfo->name);
2213 static void get_openreq4(struct sock *sk, struct request_sock *req,
2214 struct seq_file *f, int i, int uid, int *len)
2216 const struct inet_request_sock *ireq = inet_rsk(req);
2217 int ttd = req->expires - jiffies;
2219 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2220 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2223 ntohs(inet_sk(sk)->sport),
2225 ntohs(ireq->rmt_port),
2227 0, 0, /* could print option size, but that is af dependent. */
2228 1, /* timers active (only the expire timer) */
2229 jiffies_to_clock_t(ttd),
2232 0, /* non standard timer */
2233 0, /* open_requests have no inode */
2234 atomic_read(&sk->sk_refcnt),
2239 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2242 unsigned long timer_expires;
2243 struct tcp_sock *tp = tcp_sk(sk);
2244 const struct inet_connection_sock *icsk = inet_csk(sk);
2245 struct inet_sock *inet = inet_sk(sk);
2246 __be32 dest = inet->daddr;
2247 __be32 src = inet->rcv_saddr;
2248 __u16 destp = ntohs(inet->dport);
2249 __u16 srcp = ntohs(inet->sport);
2251 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2253 timer_expires = icsk->icsk_timeout;
2254 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2256 timer_expires = icsk->icsk_timeout;
2257 } else if (timer_pending(&sk->sk_timer)) {
2259 timer_expires = sk->sk_timer.expires;
2262 timer_expires = jiffies;
2265 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2266 "%08X %5d %8d %lu %d %p %u %u %u %u %d%n",
2267 i, src, srcp, dest, destp, sk->sk_state,
2268 tp->write_seq - tp->snd_una,
2269 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
2270 (tp->rcv_nxt - tp->copied_seq),
2272 jiffies_to_clock_t(timer_expires - jiffies),
2273 icsk->icsk_retransmits,
2275 icsk->icsk_probes_out,
2277 atomic_read(&sk->sk_refcnt), sk,
2280 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2282 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
2286 static void get_timewait4_sock(struct inet_timewait_sock *tw,
2287 struct seq_file *f, int i, int *len)
2291 int ttd = tw->tw_ttd - jiffies;
2296 dest = tw->tw_daddr;
2297 src = tw->tw_rcv_saddr;
2298 destp = ntohs(tw->tw_dport);
2299 srcp = ntohs(tw->tw_sport);
2301 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2302 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2303 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2304 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2305 atomic_read(&tw->tw_refcnt), tw, len);
2310 static int tcp4_seq_show(struct seq_file *seq, void *v)
2312 struct tcp_iter_state* st;
2315 if (v == SEQ_START_TOKEN) {
2316 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2317 " sl local_address rem_address st tx_queue "
2318 "rx_queue tr tm->when retrnsmt uid timeout "
2324 switch (st->state) {
2325 case TCP_SEQ_STATE_LISTENING:
2326 case TCP_SEQ_STATE_ESTABLISHED:
2327 get_tcp4_sock(v, seq, st->num, &len);
2329 case TCP_SEQ_STATE_OPENREQ:
2330 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2332 case TCP_SEQ_STATE_TIME_WAIT:
2333 get_timewait4_sock(v, seq, st->num, &len);
2336 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2341 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2345 .owner = THIS_MODULE,
2348 .show = tcp4_seq_show,
2352 static int tcp4_proc_init_net(struct net *net)
2354 return tcp_proc_register(net, &tcp4_seq_afinfo);
2357 static void tcp4_proc_exit_net(struct net *net)
2359 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2362 static struct pernet_operations tcp4_net_ops = {
2363 .init = tcp4_proc_init_net,
2364 .exit = tcp4_proc_exit_net,
2367 int __init tcp4_proc_init(void)
2369 return register_pernet_subsys(&tcp4_net_ops);
2372 void tcp4_proc_exit(void)
2374 unregister_pernet_subsys(&tcp4_net_ops);
2376 #endif /* CONFIG_PROC_FS */
2378 struct proto tcp_prot = {
2380 .owner = THIS_MODULE,
2382 .connect = tcp_v4_connect,
2383 .disconnect = tcp_disconnect,
2384 .accept = inet_csk_accept,
2386 .init = tcp_v4_init_sock,
2387 .destroy = tcp_v4_destroy_sock,
2388 .shutdown = tcp_shutdown,
2389 .setsockopt = tcp_setsockopt,
2390 .getsockopt = tcp_getsockopt,
2391 .recvmsg = tcp_recvmsg,
2392 .backlog_rcv = tcp_v4_do_rcv,
2394 .unhash = inet_unhash,
2395 .get_port = inet_csk_get_port,
2396 .enter_memory_pressure = tcp_enter_memory_pressure,
2397 .sockets_allocated = &tcp_sockets_allocated,
2398 .orphan_count = &tcp_orphan_count,
2399 .memory_allocated = &tcp_memory_allocated,
2400 .memory_pressure = &tcp_memory_pressure,
2401 .sysctl_mem = sysctl_tcp_mem,
2402 .sysctl_wmem = sysctl_tcp_wmem,
2403 .sysctl_rmem = sysctl_tcp_rmem,
2404 .max_header = MAX_TCP_HEADER,
2405 .obj_size = sizeof(struct tcp_sock),
2406 .twsk_prot = &tcp_timewait_sock_ops,
2407 .rsk_prot = &tcp_request_sock_ops,
2408 .h.hashinfo = &tcp_hashinfo,
2409 #ifdef CONFIG_COMPAT
2410 .compat_setsockopt = compat_tcp_setsockopt,
2411 .compat_getsockopt = compat_tcp_getsockopt,
2416 static int __net_init tcp_sk_init(struct net *net)
2418 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2419 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2422 static void __net_exit tcp_sk_exit(struct net *net)
2424 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2427 static struct pernet_operations __net_initdata tcp_sk_ops = {
2428 .init = tcp_sk_init,
2429 .exit = tcp_sk_exit,
2432 void __init tcp_v4_init(void)
2434 if (register_pernet_device(&tcp_sk_ops))
2435 panic("Failed to create the TCP control socket.\n");
2438 EXPORT_SYMBOL(ipv4_specific);
2439 EXPORT_SYMBOL(tcp_hashinfo);
2440 EXPORT_SYMBOL(tcp_prot);
2441 EXPORT_SYMBOL(tcp_v4_conn_request);
2442 EXPORT_SYMBOL(tcp_v4_connect);
2443 EXPORT_SYMBOL(tcp_v4_do_rcv);
2444 EXPORT_SYMBOL(tcp_v4_remember_stamp);
2445 EXPORT_SYMBOL(tcp_v4_send_check);
2446 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2448 #ifdef CONFIG_PROC_FS
2449 EXPORT_SYMBOL(tcp_proc_register);
2450 EXPORT_SYMBOL(tcp_proc_unregister);
2452 EXPORT_SYMBOL(sysctl_tcp_low_latency);