2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
22 #define FASTRETRANS_DEBUG 1
24 #include <linux/list.h>
25 #include <linux/tcp.h>
26 #include <linux/slab.h>
27 #include <linux/cache.h>
28 #include <linux/percpu.h>
29 #include <linux/skbuff.h>
30 #include <linux/dmaengine.h>
31 #include <linux/crypto.h>
32 #include <linux/cryptohash.h>
34 #include <net/inet_connection_sock.h>
35 #include <net/inet_timewait_sock.h>
36 #include <net/inet_hashtables.h>
37 #include <net/checksum.h>
38 #include <net/request_sock.h>
42 #include <net/tcp_states.h>
43 #include <net/inet_ecn.h>
46 #include <linux/seq_file.h>
48 extern struct inet_hashinfo tcp_hashinfo;
50 extern struct percpu_counter tcp_orphan_count;
51 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
53 #define MAX_TCP_HEADER (128 + MAX_HEADER)
54 #define MAX_TCP_OPTION_SPACE 40
57 * Never offer a window over 32767 without using window scaling. Some
58 * poor stacks do signed 16bit maths!
60 #define MAX_TCP_WINDOW 32767U
62 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
63 #define TCP_MIN_MSS 88U
65 /* Minimal RCV_MSS. */
66 #define TCP_MIN_RCVMSS 536U
68 /* The least MTU to use for probing */
69 #define TCP_BASE_MSS 512
71 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
72 #define TCP_FASTRETRANS_THRESH 3
74 /* Maximal reordering. */
75 #define TCP_MAX_REORDERING 127
77 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
78 #define TCP_MAX_QUICKACKS 16U
81 #define TCP_URG_VALID 0x0100
82 #define TCP_URG_NOTYET 0x0200
83 #define TCP_URG_READ 0x0400
85 #define TCP_RETR1 3 /*
86 * This is how many retries it does before it
87 * tries to figure out if the gateway is
88 * down. Minimal RFC value is 3; it corresponds
89 * to ~3sec-8min depending on RTO.
92 #define TCP_RETR2 15 /*
93 * This should take at least
94 * 90 minutes to time out.
95 * RFC1122 says that the limit is 100 sec.
96 * 15 is ~13-30min depending on RTO.
99 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
100 * connection: ~180sec is RFC minimum */
102 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
103 * connection: ~180sec is RFC minimum */
106 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
107 * socket. 7 is ~50sec-16min.
111 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
112 * state, about 60 seconds */
113 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
114 /* BSD style FIN_WAIT2 deadlock breaker.
115 * It used to be 3min, new value is 60sec,
116 * to combine FIN-WAIT-2 timeout with
120 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
122 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
123 #define TCP_ATO_MIN ((unsigned)(HZ/25))
125 #define TCP_DELACK_MIN 4U
126 #define TCP_ATO_MIN 4U
128 #define TCP_RTO_MAX ((unsigned)(120*HZ))
129 #define TCP_RTO_MIN ((unsigned)(HZ/5))
130 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
132 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
133 * for local resources.
136 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
137 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
138 #define TCP_KEEPALIVE_INTVL (75*HZ)
140 #define MAX_TCP_KEEPIDLE 32767
141 #define MAX_TCP_KEEPINTVL 32767
142 #define MAX_TCP_KEEPCNT 127
143 #define MAX_TCP_SYNCNT 127
145 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
147 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
148 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
149 * after this time. It should be equal
150 * (or greater than) TCP_TIMEWAIT_LEN
151 * to provide reliability equal to one
152 * provided by timewait state.
154 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
155 * timestamps. It must be less than
156 * minimal timewait lifetime.
162 #define TCPOPT_NOP 1 /* Padding */
163 #define TCPOPT_EOL 0 /* End of options */
164 #define TCPOPT_MSS 2 /* Segment size negotiating */
165 #define TCPOPT_WINDOW 3 /* Window scaling */
166 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
167 #define TCPOPT_SACK 5 /* SACK Block */
168 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
169 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
175 #define TCPOLEN_MSS 4
176 #define TCPOLEN_WINDOW 3
177 #define TCPOLEN_SACK_PERM 2
178 #define TCPOLEN_TIMESTAMP 10
179 #define TCPOLEN_MD5SIG 18
181 /* But this is what stacks really send out. */
182 #define TCPOLEN_TSTAMP_ALIGNED 12
183 #define TCPOLEN_WSCALE_ALIGNED 4
184 #define TCPOLEN_SACKPERM_ALIGNED 4
185 #define TCPOLEN_SACK_BASE 2
186 #define TCPOLEN_SACK_BASE_ALIGNED 4
187 #define TCPOLEN_SACK_PERBLOCK 8
188 #define TCPOLEN_MD5SIG_ALIGNED 20
189 #define TCPOLEN_MSS_ALIGNED 4
191 /* Flags in tp->nonagle */
192 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
193 #define TCP_NAGLE_CORK 2 /* Socket is corked */
194 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
196 extern struct inet_timewait_death_row tcp_death_row;
198 /* sysctl variables for tcp */
199 extern int sysctl_tcp_timestamps;
200 extern int sysctl_tcp_window_scaling;
201 extern int sysctl_tcp_sack;
202 extern int sysctl_tcp_fin_timeout;
203 extern int sysctl_tcp_keepalive_time;
204 extern int sysctl_tcp_keepalive_probes;
205 extern int sysctl_tcp_keepalive_intvl;
206 extern int sysctl_tcp_syn_retries;
207 extern int sysctl_tcp_synack_retries;
208 extern int sysctl_tcp_retries1;
209 extern int sysctl_tcp_retries2;
210 extern int sysctl_tcp_orphan_retries;
211 extern int sysctl_tcp_syncookies;
212 extern int sysctl_tcp_retrans_collapse;
213 extern int sysctl_tcp_stdurg;
214 extern int sysctl_tcp_rfc1337;
215 extern int sysctl_tcp_abort_on_overflow;
216 extern int sysctl_tcp_max_orphans;
217 extern int sysctl_tcp_fack;
218 extern int sysctl_tcp_reordering;
219 extern int sysctl_tcp_ecn;
220 extern int sysctl_tcp_dsack;
221 extern int sysctl_tcp_mem[3];
222 extern int sysctl_tcp_wmem[3];
223 extern int sysctl_tcp_rmem[3];
224 extern int sysctl_tcp_app_win;
225 extern int sysctl_tcp_adv_win_scale;
226 extern int sysctl_tcp_tw_reuse;
227 extern int sysctl_tcp_frto;
228 extern int sysctl_tcp_frto_response;
229 extern int sysctl_tcp_low_latency;
230 extern int sysctl_tcp_dma_copybreak;
231 extern int sysctl_tcp_nometrics_save;
232 extern int sysctl_tcp_moderate_rcvbuf;
233 extern int sysctl_tcp_tso_win_divisor;
234 extern int sysctl_tcp_abc;
235 extern int sysctl_tcp_mtu_probing;
236 extern int sysctl_tcp_base_mss;
237 extern int sysctl_tcp_workaround_signed_windows;
238 extern int sysctl_tcp_slow_start_after_idle;
239 extern int sysctl_tcp_max_ssthresh;
241 extern atomic_t tcp_memory_allocated;
242 extern struct percpu_counter tcp_sockets_allocated;
243 extern int tcp_memory_pressure;
246 * The next routines deal with comparing 32 bit unsigned ints
247 * and worry about wraparound (automatic with unsigned arithmetic).
250 static inline int before(__u32 seq1, __u32 seq2)
252 return (__s32)(seq1-seq2) < 0;
254 #define after(seq2, seq1) before(seq1, seq2)
256 /* is s2<=s1<=s3 ? */
257 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
259 return seq3 - seq2 >= seq1 - seq2;
262 static inline int tcp_too_many_orphans(struct sock *sk, int num)
264 return (num > sysctl_tcp_max_orphans) ||
265 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
266 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
269 /* syncookies: remember time of last synqueue overflow */
270 static inline void tcp_synq_overflow(struct sock *sk)
272 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
275 /* syncookies: no recent synqueue overflow on this listening socket? */
276 static inline int tcp_synq_no_recent_overflow(const struct sock *sk)
278 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
279 return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT);
282 extern struct proto tcp_prot;
284 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
285 #define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
286 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
287 #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
289 extern void tcp_v4_err(struct sk_buff *skb, u32);
291 extern void tcp_shutdown (struct sock *sk, int how);
293 extern int tcp_v4_rcv(struct sk_buff *skb);
295 extern int tcp_v4_remember_stamp(struct sock *sk);
297 extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
299 extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock,
300 struct msghdr *msg, size_t size);
301 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
303 extern int tcp_ioctl(struct sock *sk,
307 extern int tcp_rcv_state_process(struct sock *sk,
312 extern int tcp_rcv_established(struct sock *sk,
317 extern void tcp_rcv_space_adjust(struct sock *sk);
319 extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
321 extern int tcp_twsk_unique(struct sock *sk,
322 struct sock *sktw, void *twp);
324 extern void tcp_twsk_destructor(struct sock *sk);
326 extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
327 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
329 static inline void tcp_dec_quickack_mode(struct sock *sk,
330 const unsigned int pkts)
332 struct inet_connection_sock *icsk = inet_csk(sk);
334 if (icsk->icsk_ack.quick) {
335 if (pkts >= icsk->icsk_ack.quick) {
336 icsk->icsk_ack.quick = 0;
337 /* Leaving quickack mode we deflate ATO. */
338 icsk->icsk_ack.ato = TCP_ATO_MIN;
340 icsk->icsk_ack.quick -= pkts;
344 extern void tcp_enter_quickack_mode(struct sock *sk);
346 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
348 rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
352 #define TCP_ECN_QUEUE_CWR 2
353 #define TCP_ECN_DEMAND_CWR 4
355 static __inline__ void
356 TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
358 if (sysctl_tcp_ecn && th->ece && th->cwr)
359 inet_rsk(req)->ecn_ok = 1;
371 extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
373 const struct tcphdr *th);
375 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
376 struct request_sock *req,
377 struct request_sock **prev);
378 extern int tcp_child_process(struct sock *parent,
380 struct sk_buff *skb);
381 extern int tcp_use_frto(struct sock *sk);
382 extern void tcp_enter_frto(struct sock *sk);
383 extern void tcp_enter_loss(struct sock *sk, int how);
384 extern void tcp_clear_retrans(struct tcp_sock *tp);
385 extern void tcp_update_metrics(struct sock *sk);
387 extern void tcp_close(struct sock *sk,
389 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
391 extern int tcp_getsockopt(struct sock *sk, int level,
395 extern int tcp_setsockopt(struct sock *sk, int level,
396 int optname, char __user *optval,
397 unsigned int optlen);
398 extern int compat_tcp_getsockopt(struct sock *sk,
399 int level, int optname,
400 char __user *optval, int __user *optlen);
401 extern int compat_tcp_setsockopt(struct sock *sk,
402 int level, int optname,
403 char __user *optval, unsigned int optlen);
404 extern void tcp_set_keepalive(struct sock *sk, int val);
405 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
407 size_t len, int nonblock,
408 int flags, int *addr_len);
410 extern void tcp_parse_options(struct sk_buff *skb,
411 struct tcp_options_received *opt_rx,
413 struct dst_entry *dst);
415 extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
418 * TCP v4 functions exported for the inet6 API
421 extern void tcp_v4_send_check(struct sock *sk, int len,
422 struct sk_buff *skb);
424 extern int tcp_v4_conn_request(struct sock *sk,
425 struct sk_buff *skb);
427 extern struct sock * tcp_create_openreq_child(struct sock *sk,
428 struct request_sock *req,
429 struct sk_buff *skb);
431 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
433 struct request_sock *req,
434 struct dst_entry *dst);
436 extern int tcp_v4_do_rcv(struct sock *sk,
437 struct sk_buff *skb);
439 extern int tcp_v4_connect(struct sock *sk,
440 struct sockaddr *uaddr,
443 extern int tcp_connect(struct sock *sk);
445 extern struct sk_buff * tcp_make_synack(struct sock *sk,
446 struct dst_entry *dst,
447 struct request_sock *req);
449 extern int tcp_disconnect(struct sock *sk, int flags);
452 /* From syncookies.c */
453 extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
454 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
455 struct ip_options *opt);
456 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
459 extern __u32 cookie_init_timestamp(struct request_sock *req);
460 extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt);
462 /* From net/ipv6/syncookies.c */
463 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
464 extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
469 extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
471 extern int tcp_may_send_now(struct sock *sk);
472 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
473 extern void tcp_retransmit_timer(struct sock *sk);
474 extern void tcp_xmit_retransmit_queue(struct sock *);
475 extern void tcp_simple_retransmit(struct sock *);
476 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
477 extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
479 extern void tcp_send_probe0(struct sock *);
480 extern void tcp_send_partial(struct sock *);
481 extern int tcp_write_wakeup(struct sock *);
482 extern void tcp_send_fin(struct sock *sk);
483 extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
484 extern int tcp_send_synack(struct sock *);
485 extern void tcp_push_one(struct sock *, unsigned int mss_now);
486 extern void tcp_send_ack(struct sock *sk);
487 extern void tcp_send_delayed_ack(struct sock *sk);
490 extern void tcp_cwnd_application_limited(struct sock *sk);
493 extern void tcp_init_xmit_timers(struct sock *);
494 static inline void tcp_clear_xmit_timers(struct sock *sk)
496 inet_csk_clear_xmit_timers(sk);
499 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
500 extern unsigned int tcp_current_mss(struct sock *sk);
502 /* Bound MSS / TSO packet size with the half of the window */
503 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
505 if (tp->max_window && pktsize > (tp->max_window >> 1))
506 return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
512 extern void tcp_get_info(struct sock *, struct tcp_info *);
514 /* Read 'sendfile()'-style from a TCP socket */
515 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
516 unsigned int, size_t);
517 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
518 sk_read_actor_t recv_actor);
520 extern void tcp_initialize_rcv_mss(struct sock *sk);
522 extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
523 extern int tcp_mss_to_mtu(struct sock *sk, int mss);
524 extern void tcp_mtup_init(struct sock *sk);
526 static inline void tcp_bound_rto(const struct sock *sk)
528 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
529 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
532 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
534 return (tp->srtt >> 3) + tp->rttvar;
537 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
539 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
540 ntohl(TCP_FLAG_ACK) |
544 static inline void tcp_fast_path_on(struct tcp_sock *tp)
546 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
549 static inline void tcp_fast_path_check(struct sock *sk)
551 struct tcp_sock *tp = tcp_sk(sk);
553 if (skb_queue_empty(&tp->out_of_order_queue) &&
555 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
557 tcp_fast_path_on(tp);
560 /* Compute the actual rto_min value */
561 static inline u32 tcp_rto_min(struct sock *sk)
563 struct dst_entry *dst = __sk_dst_get(sk);
564 u32 rto_min = TCP_RTO_MIN;
566 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
567 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
571 /* Compute the actual receive window we are currently advertising.
572 * Rcv_nxt can be after the window if our peer push more data
573 * than the offered window.
575 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
577 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
584 /* Choose a new window, without checks for shrinking, and without
585 * scaling applied to the result. The caller does these things
586 * if necessary. This is a "raw" window selection.
588 extern u32 __tcp_select_window(struct sock *sk);
590 /* TCP timestamps are only 32-bits, this causes a slight
591 * complication on 64-bit systems since we store a snapshot
592 * of jiffies in the buffer control blocks below. We decided
593 * to use only the low 32-bits of jiffies and hide the ugly
594 * casts with the following macro.
596 #define tcp_time_stamp ((__u32)(jiffies))
598 /* This is what the send packet queuing engine uses to pass
599 * TCP per-packet control information to the transmission
600 * code. We also store the host-order sequence numbers in
601 * here too. This is 36 bytes on 32-bit architectures,
602 * 40 bytes on 64-bit machines, if this grows please adjust
603 * skbuff.h:skbuff->cb[xxx] size appropriately.
607 struct inet_skb_parm h4;
608 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
609 struct inet6_skb_parm h6;
611 } header; /* For incoming frames */
612 __u32 seq; /* Starting sequence number */
613 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
614 __u32 when; /* used to compute rtt's */
615 __u8 flags; /* TCP header flags. */
617 /* NOTE: These must match up to the flags byte in a
620 #define TCPCB_FLAG_FIN 0x01
621 #define TCPCB_FLAG_SYN 0x02
622 #define TCPCB_FLAG_RST 0x04
623 #define TCPCB_FLAG_PSH 0x08
624 #define TCPCB_FLAG_ACK 0x10
625 #define TCPCB_FLAG_URG 0x20
626 #define TCPCB_FLAG_ECE 0x40
627 #define TCPCB_FLAG_CWR 0x80
629 __u8 sacked; /* State flags for SACK/FACK. */
630 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
631 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
632 #define TCPCB_LOST 0x04 /* SKB is lost */
633 #define TCPCB_TAGBITS 0x07 /* All tag bits */
635 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
636 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
638 __u32 ack_seq; /* Sequence number ACK'd */
641 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
643 /* Due to TSO, an SKB can be composed of multiple actual
644 * packets. To keep these tracked properly, we use this.
646 static inline int tcp_skb_pcount(const struct sk_buff *skb)
648 return skb_shinfo(skb)->gso_segs;
651 /* This is valid iff tcp_skb_pcount() > 1. */
652 static inline int tcp_skb_mss(const struct sk_buff *skb)
654 return skb_shinfo(skb)->gso_size;
657 /* Events passed to congestion control interface */
659 CA_EVENT_TX_START, /* first transmit when no packets in flight */
660 CA_EVENT_CWND_RESTART, /* congestion window restart */
661 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
662 CA_EVENT_FRTO, /* fast recovery timeout */
663 CA_EVENT_LOSS, /* loss timeout */
664 CA_EVENT_FAST_ACK, /* in sequence ack */
665 CA_EVENT_SLOW_ACK, /* other ack */
669 * Interface for adding new TCP congestion control handlers
671 #define TCP_CA_NAME_MAX 16
672 #define TCP_CA_MAX 128
673 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
675 #define TCP_CONG_NON_RESTRICTED 0x1
676 #define TCP_CONG_RTT_STAMP 0x2
678 struct tcp_congestion_ops {
679 struct list_head list;
682 /* initialize private data (optional) */
683 void (*init)(struct sock *sk);
684 /* cleanup private data (optional) */
685 void (*release)(struct sock *sk);
687 /* return slow start threshold (required) */
688 u32 (*ssthresh)(struct sock *sk);
689 /* lower bound for congestion window (optional) */
690 u32 (*min_cwnd)(const struct sock *sk);
691 /* do new cwnd calculation (required) */
692 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight);
693 /* call before changing ca_state (optional) */
694 void (*set_state)(struct sock *sk, u8 new_state);
695 /* call when cwnd event occurs (optional) */
696 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
697 /* new value of cwnd after loss (optional) */
698 u32 (*undo_cwnd)(struct sock *sk);
699 /* hook for packet ack accounting (optional) */
700 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
701 /* get info for inet_diag (optional) */
702 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
704 char name[TCP_CA_NAME_MAX];
705 struct module *owner;
708 extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
709 extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
711 extern void tcp_init_congestion_control(struct sock *sk);
712 extern void tcp_cleanup_congestion_control(struct sock *sk);
713 extern int tcp_set_default_congestion_control(const char *name);
714 extern void tcp_get_default_congestion_control(char *name);
715 extern void tcp_get_available_congestion_control(char *buf, size_t len);
716 extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
717 extern int tcp_set_allowed_congestion_control(char *allowed);
718 extern int tcp_set_congestion_control(struct sock *sk, const char *name);
719 extern void tcp_slow_start(struct tcp_sock *tp);
720 extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
722 extern struct tcp_congestion_ops tcp_init_congestion_ops;
723 extern u32 tcp_reno_ssthresh(struct sock *sk);
724 extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
725 extern u32 tcp_reno_min_cwnd(const struct sock *sk);
726 extern struct tcp_congestion_ops tcp_reno;
728 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
730 struct inet_connection_sock *icsk = inet_csk(sk);
732 if (icsk->icsk_ca_ops->set_state)
733 icsk->icsk_ca_ops->set_state(sk, ca_state);
734 icsk->icsk_ca_state = ca_state;
737 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
739 const struct inet_connection_sock *icsk = inet_csk(sk);
741 if (icsk->icsk_ca_ops->cwnd_event)
742 icsk->icsk_ca_ops->cwnd_event(sk, event);
745 /* These functions determine how the current flow behaves in respect of SACK
746 * handling. SACK is negotiated with the peer, and therefore it can vary
747 * between different flows.
749 * tcp_is_sack - SACK enabled
750 * tcp_is_reno - No SACK
751 * tcp_is_fack - FACK enabled, implies SACK enabled
753 static inline int tcp_is_sack(const struct tcp_sock *tp)
755 return tp->rx_opt.sack_ok;
758 static inline int tcp_is_reno(const struct tcp_sock *tp)
760 return !tcp_is_sack(tp);
763 static inline int tcp_is_fack(const struct tcp_sock *tp)
765 return tp->rx_opt.sack_ok & 2;
768 static inline void tcp_enable_fack(struct tcp_sock *tp)
770 tp->rx_opt.sack_ok |= 2;
773 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
775 return tp->sacked_out + tp->lost_out;
778 /* This determines how many packets are "in the network" to the best
779 * of our knowledge. In many cases it is conservative, but where
780 * detailed information is available from the receiver (via SACK
781 * blocks etc.) we can make more aggressive calculations.
783 * Use this for decisions involving congestion control, use just
784 * tp->packets_out to determine if the send queue is empty or not.
786 * Read this equation as:
788 * "Packets sent once on transmission queue" MINUS
789 * "Packets left network, but not honestly ACKed yet" PLUS
790 * "Packets fast retransmitted"
792 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
794 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
797 #define TCP_INFINITE_SSTHRESH 0x7fffffff
799 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
801 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
804 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
805 * The exception is rate halving phase, when cwnd is decreasing towards
808 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
810 const struct tcp_sock *tp = tcp_sk(sk);
811 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
812 return tp->snd_ssthresh;
814 return max(tp->snd_ssthresh,
815 ((tp->snd_cwnd >> 1) +
816 (tp->snd_cwnd >> 2)));
819 /* Use define here intentionally to get WARN_ON location shown at the caller */
820 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
822 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
823 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
825 /* Slow start with delack produces 3 packets of burst, so that
826 * it is safe "de facto". This will be the default - same as
827 * the default reordering threshold - but if reordering increases,
828 * we must be able to allow cwnd to burst at least this much in order
829 * to not pull it back when holes are filled.
831 static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
833 return tp->reordering;
836 /* Returns end sequence number of the receiver's advertised window */
837 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
839 return tp->snd_una + tp->snd_wnd;
841 extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
843 static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
844 const struct sk_buff *skb)
847 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
850 static inline void tcp_check_probe_timer(struct sock *sk)
852 struct tcp_sock *tp = tcp_sk(sk);
853 const struct inet_connection_sock *icsk = inet_csk(sk);
855 if (!tp->packets_out && !icsk->icsk_pending)
856 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
857 icsk->icsk_rto, TCP_RTO_MAX);
860 static inline void tcp_push_pending_frames(struct sock *sk)
862 struct tcp_sock *tp = tcp_sk(sk);
864 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
867 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
872 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
878 * Calculate(/check) TCP checksum
880 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
881 __be32 daddr, __wsum base)
883 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
886 static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
888 return __skb_checksum_complete(skb);
891 static inline int tcp_checksum_complete(struct sk_buff *skb)
893 return !skb_csum_unnecessary(skb) &&
894 __tcp_checksum_complete(skb);
897 /* Prequeue for VJ style copy to user, combined with checksumming. */
899 static inline void tcp_prequeue_init(struct tcp_sock *tp)
901 tp->ucopy.task = NULL;
903 tp->ucopy.memory = 0;
904 skb_queue_head_init(&tp->ucopy.prequeue);
905 #ifdef CONFIG_NET_DMA
906 tp->ucopy.dma_chan = NULL;
907 tp->ucopy.wakeup = 0;
908 tp->ucopy.pinned_list = NULL;
909 tp->ucopy.dma_cookie = 0;
913 /* Packet is added to VJ-style prequeue for processing in process
914 * context, if a reader task is waiting. Apparently, this exciting
915 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
916 * failed somewhere. Latency? Burstiness? Well, at least now we will
917 * see, why it failed. 8)8) --ANK
919 * NOTE: is this not too big to inline?
921 static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
923 struct tcp_sock *tp = tcp_sk(sk);
925 if (sysctl_tcp_low_latency || !tp->ucopy.task)
928 __skb_queue_tail(&tp->ucopy.prequeue, skb);
929 tp->ucopy.memory += skb->truesize;
930 if (tp->ucopy.memory > sk->sk_rcvbuf) {
931 struct sk_buff *skb1;
933 BUG_ON(sock_owned_by_user(sk));
935 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
936 sk_backlog_rcv(sk, skb1);
937 NET_INC_STATS_BH(sock_net(sk),
938 LINUX_MIB_TCPPREQUEUEDROPPED);
941 tp->ucopy.memory = 0;
942 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
943 wake_up_interruptible_poll(sk->sk_sleep,
944 POLLIN | POLLRDNORM | POLLRDBAND);
945 if (!inet_csk_ack_scheduled(sk))
946 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
947 (3 * tcp_rto_min(sk)) / 4,
957 static const char *statename[]={
958 "Unused","Established","Syn Sent","Syn Recv",
959 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
960 "Close Wait","Last ACK","Listen","Closing"
963 extern void tcp_set_state(struct sock *sk, int state);
965 extern void tcp_done(struct sock *sk);
967 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
970 rx_opt->num_sacks = 0;
973 /* Determine a window scaling and initial window to offer. */
974 extern void tcp_select_initial_window(int __space, __u32 mss,
975 __u32 *rcv_wnd, __u32 *window_clamp,
976 int wscale_ok, __u8 *rcv_wscale);
978 static inline int tcp_win_from_space(int space)
980 return sysctl_tcp_adv_win_scale<=0 ?
981 (space>>(-sysctl_tcp_adv_win_scale)) :
982 space - (space>>sysctl_tcp_adv_win_scale);
985 /* Note: caller must be prepared to deal with negative returns */
986 static inline int tcp_space(const struct sock *sk)
988 return tcp_win_from_space(sk->sk_rcvbuf -
989 atomic_read(&sk->sk_rmem_alloc));
992 static inline int tcp_full_space(const struct sock *sk)
994 return tcp_win_from_space(sk->sk_rcvbuf);
997 static inline void tcp_openreq_init(struct request_sock *req,
998 struct tcp_options_received *rx_opt,
1001 struct inet_request_sock *ireq = inet_rsk(req);
1003 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1005 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1006 req->mss = rx_opt->mss_clamp;
1007 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1008 ireq->tstamp_ok = rx_opt->tstamp_ok;
1009 ireq->sack_ok = rx_opt->sack_ok;
1010 ireq->snd_wscale = rx_opt->snd_wscale;
1011 ireq->wscale_ok = rx_opt->wscale_ok;
1014 ireq->rmt_port = tcp_hdr(skb)->source;
1015 ireq->loc_port = tcp_hdr(skb)->dest;
1018 extern void tcp_enter_memory_pressure(struct sock *sk);
1020 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1022 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1025 static inline int keepalive_time_when(const struct tcp_sock *tp)
1027 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1030 static inline int keepalive_probes(const struct tcp_sock *tp)
1032 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1035 static inline int tcp_fin_time(const struct sock *sk)
1037 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1038 const int rto = inet_csk(sk)->icsk_rto;
1040 if (fin_timeout < (rto << 2) - (rto >> 1))
1041 fin_timeout = (rto << 2) - (rto >> 1);
1046 static inline int tcp_paws_check(const struct tcp_options_received *rx_opt,
1049 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1051 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1057 static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt,
1060 if (tcp_paws_check(rx_opt, 0))
1063 /* RST segments are not recommended to carry timestamp,
1064 and, if they do, it is recommended to ignore PAWS because
1065 "their cleanup function should take precedence over timestamps."
1066 Certainly, it is mistake. It is necessary to understand the reasons
1067 of this constraint to relax it: if peer reboots, clock may go
1068 out-of-sync and half-open connections will not be reset.
1069 Actually, the problem would be not existing if all
1070 the implementations followed draft about maintaining clock
1071 via reboots. Linux-2.2 DOES NOT!
1073 However, we can relax time bounds for RST segments to MSL.
1075 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1080 #define TCP_CHECK_TIMER(sk) do { } while (0)
1082 static inline void tcp_mib_init(struct net *net)
1085 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1086 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1087 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1088 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1092 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1094 tp->lost_skb_hint = NULL;
1095 tp->scoreboard_skb_hint = NULL;
1098 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1100 tcp_clear_retrans_hints_partial(tp);
1101 tp->retransmit_skb_hint = NULL;
1107 /* - key database */
1108 struct tcp_md5sig_key {
1113 struct tcp4_md5sig_key {
1114 struct tcp_md5sig_key base;
1118 struct tcp6_md5sig_key {
1119 struct tcp_md5sig_key base;
1121 u32 scope_id; /* XXX */
1123 struct in6_addr addr;
1127 struct tcp_md5sig_info {
1128 struct tcp4_md5sig_key *keys4;
1129 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1130 struct tcp6_md5sig_key *keys6;
1138 /* - pseudo header */
1139 struct tcp4_pseudohdr {
1147 struct tcp6_pseudohdr {
1148 struct in6_addr saddr;
1149 struct in6_addr daddr;
1151 __be32 protocol; /* including padding */
1154 union tcp_md5sum_block {
1155 struct tcp4_pseudohdr ip4;
1156 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1157 struct tcp6_pseudohdr ip6;
1161 /* - pool: digest algorithm, hash description and scratch buffer */
1162 struct tcp_md5sig_pool {
1163 struct hash_desc md5_desc;
1164 union tcp_md5sum_block md5_blk;
1167 #define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
1170 extern int tcp_v4_md5_hash_skb(char *md5_hash,
1171 struct tcp_md5sig_key *key,
1173 struct request_sock *req,
1174 struct sk_buff *skb);
1176 extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1177 struct sock *addr_sk);
1179 extern int tcp_v4_md5_do_add(struct sock *sk,
1184 extern int tcp_v4_md5_do_del(struct sock *sk,
1187 #ifdef CONFIG_TCP_MD5SIG
1188 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
1189 &(struct tcp_md5sig_key) { \
1190 .key = (twsk)->tw_md5_key, \
1191 .keylen = (twsk)->tw_md5_keylen, \
1194 #define tcp_twsk_md5_key(twsk) NULL
1197 extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *);
1198 extern void tcp_free_md5sig_pool(void);
1200 extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
1201 extern void __tcp_put_md5sig_pool(void);
1202 extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1203 extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
1204 unsigned header_len);
1205 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1206 struct tcp_md5sig_key *key);
1209 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
1211 int cpu = get_cpu();
1212 struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
1218 static inline void tcp_put_md5sig_pool(void)
1220 __tcp_put_md5sig_pool();
1224 /* write queue abstraction */
1225 static inline void tcp_write_queue_purge(struct sock *sk)
1227 struct sk_buff *skb;
1229 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1230 sk_wmem_free_skb(sk, skb);
1234 static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1236 return skb_peek(&sk->sk_write_queue);
1239 static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1241 return skb_peek_tail(&sk->sk_write_queue);
1244 static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1246 return skb_queue_next(&sk->sk_write_queue, skb);
1249 static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
1251 return skb_queue_prev(&sk->sk_write_queue, skb);
1254 #define tcp_for_write_queue(skb, sk) \
1255 skb_queue_walk(&(sk)->sk_write_queue, skb)
1257 #define tcp_for_write_queue_from(skb, sk) \
1258 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1260 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1261 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1263 /* This function calculates a "timeout" which is equivalent to the timeout of a
1264 * TCP connection after "boundary" unsucessful, exponentially backed-off
1265 * retransmissions with an initial RTO of TCP_RTO_MIN.
1267 static inline bool retransmits_timed_out(const struct sock *sk,
1268 unsigned int boundary)
1270 unsigned int timeout, linear_backoff_thresh;
1272 if (!inet_csk(sk)->icsk_retransmits)
1275 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
1277 if (boundary <= linear_backoff_thresh)
1278 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
1280 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
1281 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
1283 return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout;
1286 static inline struct sk_buff *tcp_send_head(struct sock *sk)
1288 return sk->sk_send_head;
1291 static inline bool tcp_skb_is_last(const struct sock *sk,
1292 const struct sk_buff *skb)
1294 return skb_queue_is_last(&sk->sk_write_queue, skb);
1297 static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1299 if (tcp_skb_is_last(sk, skb))
1300 sk->sk_send_head = NULL;
1302 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1305 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1307 if (sk->sk_send_head == skb_unlinked)
1308 sk->sk_send_head = NULL;
1311 static inline void tcp_init_send_head(struct sock *sk)
1313 sk->sk_send_head = NULL;
1316 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1318 __skb_queue_tail(&sk->sk_write_queue, skb);
1321 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1323 __tcp_add_write_queue_tail(sk, skb);
1325 /* Queue it, remembering where we must start sending. */
1326 if (sk->sk_send_head == NULL) {
1327 sk->sk_send_head = skb;
1329 if (tcp_sk(sk)->highest_sack == NULL)
1330 tcp_sk(sk)->highest_sack = skb;
1334 static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1336 __skb_queue_head(&sk->sk_write_queue, skb);
1339 /* Insert buff after skb on the write queue of sk. */
1340 static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1341 struct sk_buff *buff,
1344 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1347 /* Insert new before skb on the write queue of sk. */
1348 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1349 struct sk_buff *skb,
1352 __skb_queue_before(&sk->sk_write_queue, skb, new);
1354 if (sk->sk_send_head == skb)
1355 sk->sk_send_head = new;
1358 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1360 __skb_unlink(skb, &sk->sk_write_queue);
1363 static inline int tcp_write_queue_empty(struct sock *sk)
1365 return skb_queue_empty(&sk->sk_write_queue);
1368 /* Start sequence of the highest skb with SACKed bit, valid only if
1369 * sacked > 0 or when the caller has ensured validity by itself.
1371 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1373 if (!tp->sacked_out)
1376 if (tp->highest_sack == NULL)
1379 return TCP_SKB_CB(tp->highest_sack)->seq;
1382 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1384 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1385 tcp_write_queue_next(sk, skb);
1388 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1390 return tcp_sk(sk)->highest_sack;
1393 static inline void tcp_highest_sack_reset(struct sock *sk)
1395 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1398 /* Called when old skb is about to be deleted (to be combined with new skb) */
1399 static inline void tcp_highest_sack_combine(struct sock *sk,
1400 struct sk_buff *old,
1401 struct sk_buff *new)
1403 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1404 tcp_sk(sk)->highest_sack = new;
1408 enum tcp_seq_states {
1409 TCP_SEQ_STATE_LISTENING,
1410 TCP_SEQ_STATE_OPENREQ,
1411 TCP_SEQ_STATE_ESTABLISHED,
1412 TCP_SEQ_STATE_TIME_WAIT,
1415 struct tcp_seq_afinfo {
1418 struct file_operations seq_fops;
1419 struct seq_operations seq_ops;
1422 struct tcp_iter_state {
1423 struct seq_net_private p;
1425 enum tcp_seq_states state;
1426 struct sock *syn_wait_sk;
1427 int bucket, sbucket, num, uid;
1430 extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1431 extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1433 extern struct request_sock_ops tcp_request_sock_ops;
1434 extern struct request_sock_ops tcp6_request_sock_ops;
1436 extern void tcp_v4_destroy_sock(struct sock *sk);
1438 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
1439 extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
1440 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
1441 struct sk_buff *skb);
1442 extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
1443 struct sk_buff *skb);
1444 extern int tcp_gro_complete(struct sk_buff *skb);
1445 extern int tcp4_gro_complete(struct sk_buff *skb);
1447 #ifdef CONFIG_PROC_FS
1448 extern int tcp4_proc_init(void);
1449 extern void tcp4_proc_exit(void);
1452 /* TCP af-specific functions */
1453 struct tcp_sock_af_ops {
1454 #ifdef CONFIG_TCP_MD5SIG
1455 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1456 struct sock *addr_sk);
1457 int (*calc_md5_hash) (char *location,
1458 struct tcp_md5sig_key *md5,
1460 struct request_sock *req,
1461 struct sk_buff *skb);
1462 int (*md5_add) (struct sock *sk,
1463 struct sock *addr_sk,
1466 int (*md5_parse) (struct sock *sk,
1467 char __user *optval,
1472 struct tcp_request_sock_ops {
1473 #ifdef CONFIG_TCP_MD5SIG
1474 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1475 struct request_sock *req);
1476 int (*calc_md5_hash) (char *location,
1477 struct tcp_md5sig_key *md5,
1479 struct request_sock *req,
1480 struct sk_buff *skb);
1484 extern void tcp_v4_init(void);
1485 extern void tcp_init(void);