Bluetooth: Add CID field to L2CAP socket address structure
[safe/jmp/linux-2.6] / include / net / tcp.h
index 60e5be8..218235d 100644 (file)
 
 extern struct inet_hashinfo tcp_hashinfo;
 
-extern atomic_t tcp_orphan_count;
+extern struct percpu_counter tcp_orphan_count;
 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
 
 #define MAX_TCP_HEADER (128 + MAX_HEADER)
+#define MAX_TCP_OPTION_SPACE 40
 
 /* 
  * Never offer a window over 32767 without using window scaling. Some
@@ -184,6 +185,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define TCPOLEN_SACK_BASE_ALIGNED      4
 #define TCPOLEN_SACK_PERBLOCK          8
 #define TCPOLEN_MD5SIG_ALIGNED         20
+#define TCPOLEN_MSS_ALIGNED            4
 
 /* Flags in tp->nonagle */
 #define TCP_NAGLE_OFF          1       /* Nagle's algo is disabled */
@@ -236,7 +238,7 @@ extern int sysctl_tcp_slow_start_after_idle;
 extern int sysctl_tcp_max_ssthresh;
 
 extern atomic_t tcp_memory_allocated;
-extern atomic_t tcp_sockets_allocated;
+extern struct percpu_counter tcp_sockets_allocated;
 extern int tcp_memory_pressure;
 
 /*
@@ -265,11 +267,10 @@ static inline int tcp_too_many_orphans(struct sock *sk, int num)
 
 extern struct proto tcp_prot;
 
-DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
-#define TCP_INC_STATS(net, field)      do { (void)net; SNMP_INC_STATS(tcp_statistics, field); } while (0)
-#define TCP_INC_STATS_BH(net, field)   do { (void)net; SNMP_INC_STATS_BH(tcp_statistics, field); } while (0)
-#define TCP_DEC_STATS(net, field)      do { (void)net; SNMP_DEC_STATS(tcp_statistics, field); } while (0)
-#define TCP_ADD_STATS_USER(net, field, val) do { (void)net; SNMP_ADD_STATS_USER(tcp_statistics, field, val); } while (0)
+#define TCP_INC_STATS(net, field)      SNMP_INC_STATS((net)->mib.tcp_statistics, field)
+#define TCP_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
+#define TCP_DEC_STATS(net, field)      SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
+#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 
 extern void                    tcp_v4_err(struct sk_buff *skb, u32);
 
@@ -587,7 +588,6 @@ struct tcp_skb_cb {
 #define TCPCB_EVER_RETRANS     0x80    /* Ever retransmitted frame     */
 #define TCPCB_RETRANS          (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
 
-       __u16           urg_ptr;        /* Valid w/URG flags is set.    */
        __u32           ack_seq;        /* Sequence number ACK'd        */
 };
 
@@ -761,8 +761,6 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
        return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
 }
 
-extern int tcp_limit_reno_sacked(struct tcp_sock *tp);
-
 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
  * The exception is rate halving phase, when cwnd is decreasing towards
  * ssthresh.
@@ -893,7 +891,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
                        BUG_ON(sock_owned_by_user(sk));
 
                        while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
-                               sk->sk_backlog_rcv(sk, skb1);
+                               sk_backlog_rcv(sk, skb1);
                                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
                        }
 
@@ -973,6 +971,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->acked = 0;
        ireq->ecn_ok = 0;
        ireq->rmt_port = tcp_hdr(skb)->source;
+       ireq->loc_port = tcp_hdr(skb)->dest;
 }
 
 extern void tcp_enter_memory_pressure(struct sock *sk);
@@ -1038,13 +1037,12 @@ static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
 {
        tp->lost_skb_hint = NULL;
        tp->scoreboard_skb_hint = NULL;
-       tp->retransmit_skb_hint = NULL;
-       tp->forward_skb_hint = NULL;
 }
 
 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
 {
        tcp_clear_retrans_hints_partial(tp);
+       tp->retransmit_skb_hint = NULL;
 }
 
 /* MD5 Signature */
@@ -1113,20 +1111,12 @@ struct tcp_md5sig_pool {
 #define TCP_MD5SIG_MAXKEYS     (~(u32)0)       /* really?! */
 
 /* - functions */
-extern int                     tcp_calc_md5_hash(char *md5_hash,
-                                                 struct tcp_md5sig_key *key,
-                                                 int bplen,
-                                                 struct tcphdr *th,
-                                                 unsigned int tcplen,
-                                                 struct tcp_md5sig_pool *hp);
-
-extern int                     tcp_v4_calc_md5_hash(char *md5_hash,
-                                                    struct tcp_md5sig_key *key,
-                                                    struct sock *sk,
-                                                    struct dst_entry *dst,
-                                                    struct request_sock *req,
-                                                    struct tcphdr *th,
-                                                    unsigned int tcplen);
+extern int                     tcp_v4_md5_hash_skb(char *md5_hash,
+                                                   struct tcp_md5sig_key *key,
+                                                   struct sock *sk,
+                                                   struct request_sock *req,
+                                                   struct sk_buff *skb);
+
 extern struct tcp_md5sig_key   *tcp_v4_md5_lookup(struct sock *sk,
                                                   struct sock *addr_sk);
 
@@ -1153,6 +1143,11 @@ extern void                      tcp_free_md5sig_pool(void);
 
 extern struct tcp_md5sig_pool  *__tcp_get_md5sig_pool(int cpu);
 extern void                    __tcp_put_md5sig_pool(void);
+extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
+extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
+                                unsigned header_len);
+extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+                           struct tcp_md5sig_key *key);
 
 static inline
 struct tcp_md5sig_pool         *tcp_get_md5sig_pool(void)
@@ -1182,49 +1177,50 @@ static inline void tcp_write_queue_purge(struct sock *sk)
 
 static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
 {
-       struct sk_buff *skb = sk->sk_write_queue.next;
-       if (skb == (struct sk_buff *) &sk->sk_write_queue)
-               return NULL;
-       return skb;
+       return skb_peek(&sk->sk_write_queue);
 }
 
 static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
 {
-       struct sk_buff *skb = sk->sk_write_queue.prev;
-       if (skb == (struct sk_buff *) &sk->sk_write_queue)
-               return NULL;
-       return skb;
+       return skb_peek_tail(&sk->sk_write_queue);
 }
 
 static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
 {
-       return skb->next;
+       return skb_queue_next(&sk->sk_write_queue, skb);
+}
+
+static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
+{
+       return skb_queue_prev(&sk->sk_write_queue, skb);
 }
 
 #define tcp_for_write_queue(skb, sk)                                   \
-               for (skb = (sk)->sk_write_queue.next;                   \
-                    (skb != (struct sk_buff *)&(sk)->sk_write_queue);  \
-                    skb = skb->next)
+       skb_queue_walk(&(sk)->sk_write_queue, skb)
 
 #define tcp_for_write_queue_from(skb, sk)                              \
-               for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\
-                    skb = skb->next)
+       skb_queue_walk_from(&(sk)->sk_write_queue, skb)
 
 #define tcp_for_write_queue_from_safe(skb, tmp, sk)                    \
-               for (tmp = skb->next;                                   \
-                    (skb != (struct sk_buff *)&(sk)->sk_write_queue);  \
-                    skb = tmp, tmp = skb->next)
+       skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
 
 static inline struct sk_buff *tcp_send_head(struct sock *sk)
 {
        return sk->sk_send_head;
 }
 
+static inline bool tcp_skb_is_last(const struct sock *sk,
+                                  const struct sk_buff *skb)
+{
+       return skb_queue_is_last(&sk->sk_write_queue, skb);
+}
+
 static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
 {
-       sk->sk_send_head = skb->next;
-       if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
+       if (tcp_skb_is_last(sk, skb))
                sk->sk_send_head = NULL;
+       else
+               sk->sk_send_head = tcp_write_queue_next(sk, skb);
 }
 
 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
@@ -1269,12 +1265,12 @@ static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
        __skb_queue_after(&sk->sk_write_queue, skb, buff);
 }
 
-/* Insert skb between prev and next on the write queue of sk.  */
+/* Insert new before skb on the write queue of sk.  */
 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
                                                  struct sk_buff *skb,
                                                  struct sock *sk)
 {
-       __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
+       __skb_queue_before(&sk->sk_write_queue, skb, new);
 
        if (sk->sk_send_head == skb)
                sk->sk_send_head = new;
@@ -1285,12 +1281,6 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
        __skb_unlink(skb, &sk->sk_write_queue);
 }
 
-static inline int tcp_skb_is_last(const struct sock *sk,
-                                 const struct sk_buff *skb)
-{
-       return skb->next == (struct sk_buff *)&sk->sk_write_queue;
-}
-
 static inline int tcp_write_queue_empty(struct sock *sk)
 {
        return skb_queue_empty(&sk->sk_write_queue);
@@ -1368,6 +1358,12 @@ extern void tcp_v4_destroy_sock(struct sock *sk);
 
 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
 extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
+                                       struct sk_buff *skb);
+extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
+                                        struct sk_buff *skb);
+extern int tcp_gro_complete(struct sk_buff *skb);
+extern int tcp4_gro_complete(struct sk_buff *skb);
 
 #ifdef CONFIG_PROC_FS
 extern int  tcp4_proc_init(void);
@@ -1382,10 +1378,8 @@ struct tcp_sock_af_ops {
        int                     (*calc_md5_hash) (char *location,
                                                  struct tcp_md5sig_key *md5,
                                                  struct sock *sk,
-                                                 struct dst_entry *dst,
                                                  struct request_sock *req,
-                                                 struct tcphdr *th,
-                                                 unsigned int len);
+                                                 struct sk_buff *skb);
        int                     (*md5_add) (struct sock *sk,
                                            struct sock *addr_sk,
                                            u8 *newkey,