#define TCP_NAGLE_CORK 2 /* Socket is corked */
#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
+/* TCP thin-stream limits */
+#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
+
extern struct inet_timewait_death_row tcp_death_row;
/* sysctl variables for tcp */
extern int sysctl_tcp_slow_start_after_idle;
extern int sysctl_tcp_max_ssthresh;
extern int sysctl_tcp_cookie_size;
+extern int sysctl_tcp_thin_linear_timeouts;
+extern int sysctl_tcp_thin_dupack;
extern atomic_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
int level, int optname,
char __user *optval, unsigned int optlen);
extern void tcp_set_keepalive(struct sock *sk, int val);
+extern void tcp_syn_ack_timeout(struct sock *sk,
+ struct request_sock *req);
extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg,
size_t len, int nonblock,
extern void tcp_parse_options(struct sk_buff *skb,
struct tcp_options_received *opt_rx,
u8 **hvpp,
- int estab,
- struct dst_entry *dst);
+ int estab);
extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
* TCP v4 functions exported for the inet6 API
*/
-extern void tcp_v4_send_check(struct sock *sk, int len,
+extern void tcp_v4_send_check(struct sock *sk,
struct sk_buff *skb);
extern int tcp_v4_conn_request(struct sock *sk,
icsk->icsk_rto, TCP_RTO_MAX);
}
-static inline void tcp_push_pending_frames(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
-}
-
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
{
tp->snd_wl1 = seq;
tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
- wake_up_interruptible_poll(sk->sk_sleep,
+ wake_up_interruptible_sync_poll(sk_sleep(sk),
POLLIN | POLLRDNORM | POLLRDBAND);
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
/* Determine a window scaling and initial window to offer. */
extern void tcp_select_initial_window(int __space, __u32 mss,
__u32 *rcv_wnd, __u32 *window_clamp,
- int wscale_ok, __u8 *rcv_wscale);
+ int wscale_ok, __u8 *rcv_wscale,
+ __u32 init_rcv_wnd);
static inline int tcp_win_from_space(int space)
{
#define tcp_twsk_md5_key(twsk) NULL
#endif
-extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *);
+extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
extern void tcp_free_md5sig_pool(void);
extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
-/* This function calculates a "timeout" which is equivalent to the timeout of a
- * TCP connection after "boundary" unsucessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN.
- */
-static inline bool retransmits_timed_out(struct sock *sk,
- unsigned int boundary)
-{
- unsigned int timeout, linear_backoff_thresh;
- unsigned int start_ts;
-
- if (!inet_csk(sk)->icsk_retransmits)
- return false;
-
- if (unlikely(!tcp_sk(sk)->retrans_stamp))
- start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
- else
- start_ts = tcp_sk(sk)->retrans_stamp;
-
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
-
- if (boundary <= linear_backoff_thresh)
- timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
- else
- timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
- (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
-
- return (tcp_time_stamp - start_ts) >= timeout;
-}
-
static inline struct sk_buff *tcp_send_head(struct sock *sk)
{
return sk->sk_send_head;
return skb_queue_empty(&sk->sk_write_queue);
}
+static inline void tcp_push_pending_frames(struct sock *sk)
+{
+ if (tcp_send_head(sk)) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
+ }
+}
+
/* Start sequence of the highest skb with SACKed bit, valid only if
* sacked > 0 or when the caller has ensured validity by itself.
*/
tcp_sk(sk)->highest_sack = new;
}
+/* Determines whether this is a thin stream (which may suffer from
+ * increased latency). Used to trigger latency-reducing mechanisms.
+ */
+static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp)
+{
+ return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
+}
+
/* /proc */
enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,