#define TCP_DEBUG 1
#define FASTRETRANS_DEBUG 1
-#include <linux/config.h>
#include <linux/list.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/skbuff.h>
+#include <linux/dmaengine.h>
+#include <linux/crypto.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
/* Minimal RCV_MSS. */
#define TCP_MIN_RCVMSS 536U
+/* The least MTU to use for probing */
+#define TCP_BASE_MSS 512
+
/* After receiving this amount of duplicate ACKs fast retransmit starts. */
#define TCP_FASTRETRANS_THRESH 3
#define MAX_TCP_SYNCNT 127
#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
-#define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
#define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
+#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
/*
* TCP option lengths
#define TCPOLEN_WINDOW 3
#define TCPOLEN_SACK_PERM 2
#define TCPOLEN_TIMESTAMP 10
+#define TCPOLEN_MD5SIG 18
/* But this is what stacks really send out. */
#define TCPOLEN_TSTAMP_ALIGNED 12
#define TCPOLEN_SACK_BASE 2
#define TCPOLEN_SACK_BASE_ALIGNED 4
#define TCPOLEN_SACK_PERBLOCK 8
+#define TCPOLEN_MD5SIG_ALIGNED 20
/* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto;
+extern int sysctl_tcp_frto_response;
extern int sysctl_tcp_low_latency;
+extern int sysctl_tcp_dma_copybreak;
extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor;
extern int sysctl_tcp_abc;
+extern int sysctl_tcp_mtu_probing;
+extern int sysctl_tcp_base_mss;
+extern int sysctl_tcp_workaround_signed_windows;
+extern int sysctl_tcp_slow_start_after_idle;
+extern int sysctl_tcp_max_ssthresh;
extern atomic_t tcp_memory_allocated;
extern atomic_t tcp_sockets_allocated;
{
return (__s32)(seq1-seq2) < 0;
}
-
-static inline int after(__u32 seq1, __u32 seq2)
-{
- return (__s32)(seq2-seq1) < 0;
-}
-
+#define after(seq2, seq1) before(seq1, seq2)
/* is s2<=s1<=s3 ? */
static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
extern void tcp_rcv_space_adjust(struct sock *sk);
+extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
+
+extern int tcp_twsk_unique(struct sock *sk,
+ struct sock *sktw, void *twp);
+
+extern void tcp_twsk_destructor(struct sock *sk);
+
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
extern int tcp_child_process(struct sock *parent,
struct sock *child,
struct sk_buff *skb);
+extern int tcp_use_frto(struct sock *sk);
extern void tcp_enter_frto(struct sock *sk);
extern void tcp_enter_loss(struct sock *sk, int how);
extern void tcp_clear_retrans(struct tcp_sock *tp);
extern int tcp_setsockopt(struct sock *sk, int level,
int optname, char __user *optval,
int optlen);
+extern int compat_tcp_getsockopt(struct sock *sk,
+ int level, int optname,
+ char __user *optval, int __user *optlen);
+extern int compat_tcp_setsockopt(struct sock *sk,
+ int level, int optname,
+ char __user *optval, int optlen);
extern void tcp_set_keepalive(struct sock *sk, int val);
extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg,
extern void tcp_unhash(struct sock *sk);
-extern int tcp_v4_hash_connecting(struct sock *sk);
-
-
/* From syncookies.c */
extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt);
extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
-/* Initialize RCV_MSS value.
- * RCV_MSS is an our guess about MSS used by the peer.
- * We haven't any direct information about the MSS.
- * It's better to underestimate the RCV_MSS rather than overestimate.
- * Overestimations make us ACKing less frequently than needed.
- * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
- */
+extern void tcp_initialize_rcv_mss(struct sock *sk);
-static inline void tcp_initialize_rcv_mss(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
+extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
+extern int tcp_mss_to_mtu(struct sock *sk, int mss);
+extern void tcp_mtup_init(struct sock *sk);
- hint = min(hint, tp->rcv_wnd/2);
- hint = min(hint, TCP_MIN_RCVMSS);
- hint = max(hint, TCP_MIN_MSS);
-
- inet_csk(sk)->icsk_ack.rcv_mss = hint;
-}
-
-static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
+static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
ntohl(TCP_FLAG_ACK) |
snd_wnd);
}
-static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
+static inline void tcp_fast_path_on(struct tcp_sock *tp)
{
__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
}
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
*/
-static __inline__ u32 tcp_receive_window(const struct tcp_sock *tp)
+static inline u32 tcp_receive_window(const struct tcp_sock *tp)
{
s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
*/
static inline int tcp_skb_pcount(const struct sk_buff *skb)
{
- return skb_shinfo(skb)->tso_segs;
+ return skb_shinfo(skb)->gso_segs;
}
/* This is valid iff tcp_skb_pcount() > 1. */
static inline int tcp_skb_mss(const struct sk_buff *skb)
{
- return skb_shinfo(skb)->tso_size;
+ return skb_shinfo(skb)->gso_size;
}
static inline void tcp_dec_pcount_approx(__u32 *count,
* Interface for adding new TCP congestion control handlers
*/
#define TCP_CA_NAME_MAX 16
+#define TCP_CA_MAX 128
+#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
+
struct tcp_congestion_ops {
struct list_head list;
+ int non_restricted;
/* initialize private data (optional) */
void (*init)(struct sock *sk);
/* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk);
/* lower bound for congestion window (optional) */
- u32 (*min_cwnd)(struct sock *sk);
+ u32 (*min_cwnd)(const struct sock *sk);
/* do new cwnd calculation (required) */
void (*cong_avoid)(struct sock *sk, u32 ack,
u32 rtt, u32 in_flight, int good_ack);
extern void tcp_cleanup_congestion_control(struct sock *sk);
extern int tcp_set_default_congestion_control(const char *name);
extern void tcp_get_default_congestion_control(char *name);
+extern void tcp_get_available_congestion_control(char *buf, size_t len);
+extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
+extern int tcp_set_allowed_congestion_control(char *allowed);
extern int tcp_set_congestion_control(struct sock *sk, const char *name);
+extern void tcp_slow_start(struct tcp_sock *tp);
extern struct tcp_congestion_ops tcp_init_congestion_ops;
extern u32 tcp_reno_ssthresh(struct sock *sk);
extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
u32 rtt, u32 in_flight, int flag);
-extern u32 tcp_reno_min_cwnd(struct sock *sk);
+extern u32 tcp_reno_min_cwnd(const struct sock *sk);
extern struct tcp_congestion_ops tcp_reno;
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
* "Packets left network, but not honestly ACKed yet" PLUS
* "Packets fast retransmitted"
*/
-static __inline__ unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
+static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
{
return (tp->packets_out - tp->left_out + tp->retrans_out);
}
(tp->snd_cwnd >> 2)));
}
-/*
- * Linear increase during slow start
- */
-static inline void tcp_slow_start(struct tcp_sock *tp)
-{
- if (sysctl_tcp_abc) {
- /* RFC3465: Slow Start
- * TCP sender SHOULD increase cwnd by the number of
- * previously unacknowledged bytes ACKed by each incoming
- * acknowledgment, provided the increase is not more than L
- */
- if (tp->bytes_acked < tp->mss_cache)
- return;
-
- /* We MAY increase by 2 if discovered delayed ack */
- if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- }
- }
- tp->bytes_acked = 0;
-
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
-}
-
-
static inline void tcp_sync_left_out(struct tcp_sock *tp)
{
if (tp->rx_opt.sack_ok &&
tp->left_out = tp->sacked_out + tp->lost_out;
}
-/* Set slow start threshold and cwnd not falling to slow start */
-static inline void __tcp_enter_cwr(struct sock *sk)
-{
- const struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
-
- tp->undo_marker = 0;
- tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
- tp->snd_cwnd = min(tp->snd_cwnd,
- tcp_packets_in_flight(tp) + 1U);
- tp->snd_cwnd_cnt = 0;
- tp->high_seq = tp->snd_nxt;
- tp->snd_cwnd_stamp = tcp_time_stamp;
- TCP_ECN_queue_cwr(tp);
-}
-
-static inline void tcp_enter_cwr(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- tp->prior_ssthresh = 0;
- tp->bytes_acked = 0;
- if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
- __tcp_enter_cwr(sk);
- tcp_set_ca_state(sk, TCP_CA_CWR);
- }
-}
-
+extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
/* Slow start with delack produces 3 packets of burst, so that
if (in_flight >= tp->snd_cwnd)
return 1;
- if (!(sk->sk_route_caps & NETIF_F_TSO))
+ if (!sk_can_gso(sk))
return 0;
left = tp->snd_cwnd - in_flight;
return left <= tcp_max_burst(tp);
}
-static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
- const struct sk_buff *skb)
+static inline void tcp_minshall_update(struct tcp_sock *tp, int mss,
+ const struct sk_buff *skb)
{
if (skb->len < mss)
tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
}
-static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
if (!tp->packets_out && !icsk->icsk_pending)
icsk->icsk_rto, TCP_RTO_MAX);
}
-static __inline__ void tcp_push_pending_frames(struct sock *sk,
- struct tcp_sock *tp)
+static inline void tcp_push_pending_frames(struct sock *sk,
+ struct tcp_sock *tp)
{
__tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle);
}
-static __inline__ void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
+static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
{
tp->snd_wl1 = seq;
}
-static __inline__ void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
+static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
{
tp->snd_wl1 = seq;
}
/*
* Calculate(/check) TCP checksum
*/
-static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
- unsigned long saddr, unsigned long daddr,
- unsigned long base)
+static inline __sum16 tcp_v4_check(int len, __be32 saddr,
+ __be32 daddr, __wsum base)
{
return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
}
-static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
+static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
{
return __skb_checksum_complete(skb);
}
-static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
+static inline int tcp_checksum_complete(struct sk_buff *skb)
{
return skb->ip_summed != CHECKSUM_UNNECESSARY &&
__tcp_checksum_complete(skb);
/* Prequeue for VJ style copy to user, combined with checksumming. */
-static __inline__ void tcp_prequeue_init(struct tcp_sock *tp)
+static inline void tcp_prequeue_init(struct tcp_sock *tp)
{
tp->ucopy.task = NULL;
tp->ucopy.len = 0;
tp->ucopy.memory = 0;
skb_queue_head_init(&tp->ucopy.prequeue);
+#ifdef CONFIG_NET_DMA
+ tp->ucopy.dma_chan = NULL;
+ tp->ucopy.wakeup = 0;
+ tp->ucopy.pinned_list = NULL;
+ tp->ucopy.dma_cookie = 0;
+#endif
}
/* Packet is added to VJ-style prequeue for processing in process
*
* NOTE: is this not too big to inline?
*/
-static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
};
#endif
-static __inline__ void tcp_set_state(struct sock *sk, int state)
+static inline void tcp_set_state(struct sock *sk, int state)
{
int oldstate = sk->sk_state;
#endif
}
-static __inline__ void tcp_done(struct sock *sk)
+static inline void tcp_done(struct sock *sk)
{
+ if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
+ TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+
tcp_set_state(sk, TCP_CLOSE);
tcp_clear_xmit_timers(sk);
inet_csk_destroy_sock(sk);
}
-static __inline__ void tcp_sack_reset(struct tcp_options_received *rx_opt)
+static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
{
rx_opt->dsack = 0;
rx_opt->eff_sacks = 0;
rx_opt->num_sacks = 0;
}
-static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, __u32 tstamp)
-{
- if (tp->rx_opt.tstamp_ok) {
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
- (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) |
- TCPOLEN_TIMESTAMP);
- *ptr++ = htonl(tstamp);
- *ptr++ = htonl(tp->rx_opt.ts_recent);
- }
- if (tp->rx_opt.eff_sacks) {
- struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
- int this_sack;
-
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
- (TCPOPT_NOP << 16) |
- (TCPOPT_SACK << 8) |
- (TCPOLEN_SACK_BASE +
- (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)));
- for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
- *ptr++ = htonl(sp[this_sack].start_seq);
- *ptr++ = htonl(sp[this_sack].end_seq);
- }
- if (tp->rx_opt.dsack) {
- tp->rx_opt.dsack = 0;
- tp->rx_opt.eff_sacks--;
- }
- }
-}
-
-/* Construct a tcp options header for a SYN or SYN_ACK packet.
- * If this is every changed make sure to change the definition of
- * MAX_SYN_SIZE to match the new maximum number of options that you
- * can generate.
- */
-static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
- int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
-{
- /* We always get an MSS option.
- * The option bytes which will be seen in normal data
- * packets should timestamps be used, must be in the MSS
- * advertised. But we subtract them from tp->mss_cache so
- * that calculations in tcp_sendmsg are simpler etc.
- * So account for this fact here if necessary. If we
- * don't do this correctly, as a receiver we won't
- * recognize data packets as being full sized when we
- * should, and thus we won't abide by the delayed ACK
- * rules correctly.
- * SACKs don't matter, we never delay an ACK when we
- * have any of those going out.
- */
- *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
- if (ts) {
- if(sack)
- *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
- else
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
- *ptr++ = htonl(tstamp); /* TSVAL */
- *ptr++ = htonl(ts_recent); /* TSECR */
- } else if(sack)
- *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
- if (offer_wscale)
- *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
-}
-
/* Determine a window scaling and initial window to offer. */
extern void tcp_select_initial_window(int __space, __u32 mss,
__u32 *rcv_wnd, __u32 *window_clamp,
return tcp_win_from_space(sk->sk_rcvbuf);
}
-static __inline__ void tcp_openreq_init(struct request_sock *req,
- struct tcp_options_received *rx_opt,
- struct sk_buff *skb)
+static inline void tcp_openreq_init(struct request_sock *req,
+ struct tcp_options_received *rx_opt,
+ struct sk_buff *skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
ireq->wscale_ok = rx_opt->wscale_ok;
ireq->acked = 0;
ireq->ecn_ok = 0;
- ireq->rmt_port = skb->h.th->source;
+ ireq->rmt_port = tcp_hdr(skb)->source;
}
extern void tcp_enter_memory_pressure(void);
{
if ((s32)(rx_opt->rcv_tsval - rx_opt->ts_recent) >= 0)
return 0;
- if (xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
+ if (get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)
return 0;
/* RST segments are not recommended to carry timestamp,
However, we can relax time bounds for RST segments to MSL.
*/
- if (rst && xtime.tv_sec >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
+ if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
return 0;
return 1;
}
#define TCP_CHECK_TIMER(sk) do { } while (0)
-static inline int tcp_use_frto(const struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
-
- /* F-RTO must be activated in sysctl and there must be some
- * unsent new data, and the advertised window should allow
- * sending it.
- */
- return (sysctl_tcp_frto && sk->sk_send_head &&
- !after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
- tp->snd_una + tp->snd_wnd));
-}
-
static inline void tcp_mib_init(void)
{
/* See RFC 2012 */
tp->fastpath_skb_hint = NULL;
}
+/* MD5 Signature */
+struct crypto_hash;
+
+/* - key database */
+struct tcp_md5sig_key {
+ u8 *key;
+ u8 keylen;
+};
+
+struct tcp4_md5sig_key {
+ u8 *key;
+ u16 keylen;
+ __be32 addr;
+};
+
+struct tcp6_md5sig_key {
+ u8 *key;
+ u16 keylen;
+#if 0
+ u32 scope_id; /* XXX */
+#endif
+ struct in6_addr addr;
+};
+
+/* - sock block */
+struct tcp_md5sig_info {
+ struct tcp4_md5sig_key *keys4;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct tcp6_md5sig_key *keys6;
+ u32 entries6;
+ u32 alloced6;
+#endif
+ u32 entries4;
+ u32 alloced4;
+};
+
+/* - pseudo header */
+struct tcp4_pseudohdr {
+ __be32 saddr;
+ __be32 daddr;
+ __u8 pad;
+ __u8 protocol;
+ __be16 len;
+};
+
+struct tcp6_pseudohdr {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ __be32 len;
+ __be32 protocol; /* including padding */
+};
+
+union tcp_md5sum_block {
+ struct tcp4_pseudohdr ip4;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct tcp6_pseudohdr ip6;
+#endif
+};
+
+/* - pool: digest algorithm, hash description and scratch buffer */
+struct tcp_md5sig_pool {
+ struct hash_desc md5_desc;
+ union tcp_md5sum_block md5_blk;
+};
+
+#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
+
+/* - functions */
+extern int tcp_v4_calc_md5_hash(char *md5_hash,
+ struct tcp_md5sig_key *key,
+ struct sock *sk,
+ struct dst_entry *dst,
+ struct request_sock *req,
+ struct tcphdr *th,
+ int protocol, int tcplen);
+extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+ struct sock *addr_sk);
+
+extern int tcp_v4_md5_do_add(struct sock *sk,
+ __be32 addr,
+ u8 *newkey,
+ u8 newkeylen);
+
+extern int tcp_v4_md5_do_del(struct sock *sk,
+ __be32 addr);
+
+extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void);
+extern void tcp_free_md5sig_pool(void);
+
+extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
+extern void __tcp_put_md5sig_pool(void);
+
+static inline
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
+{
+ int cpu = get_cpu();
+ struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
+ if (!ret)
+ put_cpu();
+ return ret;
+}
+
+static inline void tcp_put_md5sig_pool(void)
+{
+ __tcp_put_md5sig_pool();
+ put_cpu();
+}
+
+/* write queue abstraction */
+static inline void tcp_write_queue_purge(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
+ sk_stream_free_skb(sk, skb);
+ sk_stream_mem_reclaim(sk);
+}
+
+static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
+{
+ struct sk_buff *skb = sk->sk_write_queue.next;
+ if (skb == (struct sk_buff *) &sk->sk_write_queue)
+ return NULL;
+ return skb;
+}
+
+static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
+{
+ struct sk_buff *skb = sk->sk_write_queue.prev;
+ if (skb == (struct sk_buff *) &sk->sk_write_queue)
+ return NULL;
+ return skb;
+}
+
+static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
+{
+ return skb->next;
+}
+
+#define tcp_for_write_queue(skb, sk) \
+ for (skb = (sk)->sk_write_queue.next; \
+ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
+ skb = skb->next)
+
+#define tcp_for_write_queue_from(skb, sk) \
+ for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\
+ skb = skb->next)
+
+static inline struct sk_buff *tcp_send_head(struct sock *sk)
+{
+ return sk->sk_send_head;
+}
+
+static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
+{
+ sk->sk_send_head = skb->next;
+ if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
+ sk->sk_send_head = NULL;
+}
+
+static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
+{
+ if (sk->sk_send_head == skb_unlinked)
+ sk->sk_send_head = NULL;
+}
+
+static inline void tcp_init_send_head(struct sock *sk)
+{
+ sk->sk_send_head = NULL;
+}
+
+static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
+{
+ __skb_queue_tail(&sk->sk_write_queue, skb);
+}
+
+static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
+{
+ __tcp_add_write_queue_tail(sk, skb);
+
+ /* Queue it, remembering where we must start sending. */
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = skb;
+}
+
+static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
+{
+ __skb_queue_head(&sk->sk_write_queue, skb);
+}
+
+/* Insert buff after skb on the write queue of sk. */
+static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
+ struct sk_buff *buff,
+ struct sock *sk)
+{
+ __skb_append(skb, buff, &sk->sk_write_queue);
+}
+
+/* Insert skb between prev and next on the write queue of sk. */
+static inline void tcp_insert_write_queue_before(struct sk_buff *new,
+ struct sk_buff *skb,
+ struct sock *sk)
+{
+ __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
+}
+
+static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
+{
+ __skb_unlink(skb, &sk->sk_write_queue);
+}
+
+static inline int tcp_skb_is_last(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ return skb->next == (struct sk_buff *)&sk->sk_write_queue;
+}
+
+static inline int tcp_write_queue_empty(struct sock *sk)
+{
+ return skb_queue_empty(&sk->sk_write_queue);
+}
+
/* /proc */
enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,
extern int tcp_v4_destroy_sock(struct sock *sk);
+extern int tcp_v4_gso_send_check(struct sk_buff *skb);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
+
#ifdef CONFIG_PROC_FS
extern int tcp4_proc_init(void);
extern void tcp4_proc_exit(void);
#endif
+/* TCP af-specific functions */
+struct tcp_sock_af_ops {
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
+ struct sock *addr_sk);
+ int (*calc_md5_hash) (char *location,
+ struct tcp_md5sig_key *md5,
+ struct sock *sk,
+ struct dst_entry *dst,
+ struct request_sock *req,
+ struct tcphdr *th,
+ int protocol, int len);
+ int (*md5_add) (struct sock *sk,
+ struct sock *addr_sk,
+ u8 *newkey,
+ u8 len);
+ int (*md5_parse) (struct sock *sk,
+ char __user *optval,
+ int optlen);
+#endif
+};
+
+struct tcp_request_sock_ops {
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
+ struct request_sock *req);
+#endif
+};
+
extern void tcp_v4_init(struct net_proto_family *ops);
extern void tcp_init(void);