[PATCH] slab: remove kmem_cache_t
[safe/jmp/linux-2.6] / include / net / request_sock.h
index 08a8fd1..7aed02c 100644 (file)
@@ -16,7 +16,9 @@
 #define _REQUEST_SOCK_H
 
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 #include <linux/types.h>
+
 #include <net/sock.h>
 
 struct request_sock;
@@ -26,14 +28,15 @@ struct proto;
 
 struct request_sock_ops {
        int             family;
-       kmem_cache_t    *slab;
        int             obj_size;
+       struct kmem_cache       *slab;
        int             (*rtx_syn_ack)(struct sock *sk,
                                       struct request_sock *req,
                                       struct dst_entry *dst);
        void            (*send_ack)(struct sk_buff *skb,
                                    struct request_sock *req);
-       void            (*send_reset)(struct sk_buff *skb);
+       void            (*send_reset)(struct sock *sk,
+                                     struct sk_buff *skb);
        void            (*destructor)(struct request_sock *req);
 };
 
@@ -49,13 +52,15 @@ struct request_sock {
        u32                             rcv_wnd;          /* rcv_wnd offered first time */
        u32                             ts_recent;
        unsigned long                   expires;
-       struct request_sock_ops         *rsk_ops;
+       const struct request_sock_ops   *rsk_ops;
        struct sock                     *sk;
+       u32                             secid;
+       u32                             peer_secid;
 };
 
-static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops)
+static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
 {
-       struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
+       struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
 
        if (req != NULL)
                req->rsk_ops = ops;
@@ -74,4 +79,186 @@ static inline void reqsk_free(struct request_sock *req)
        __reqsk_free(req);
 }
 
+extern int sysctl_max_syn_backlog;
+
+/** struct listen_sock - listen state
+ *
+ * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
+ */
+struct listen_sock {
+       u8                      max_qlen_log;
+       /* 3 bytes hole, try to use */
+       int                     qlen;
+       int                     qlen_young;
+       int                     clock_hand;
+       u32                     hash_rnd;
+       u32                     nr_table_entries;
+       struct request_sock     *syn_table[0];
+};
+
+/** struct request_sock_queue - queue of request_socks
+ *
+ * @rskq_accept_head - FIFO head of established children
+ * @rskq_accept_tail - FIFO tail of established children
+ * @rskq_defer_accept - User waits for some data after accept()
+ * @syn_wait_lock - serializer
+ *
+ * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
+ * lock sock while browsing the listening hash (otherwise it's deadlock prone).
+ *
+ * This lock is acquired in read mode only from listening_get_next() seq_file
+ * op and it's acquired in write mode _only_ from code that is actively
+ * changing rskq_accept_head. All readers that are holding the master sock lock
+ * don't need to grab this lock in read mode too as rskq_accept_head. writes
+ * are always protected from the main sock lock.
+ */
+struct request_sock_queue {
+       struct request_sock     *rskq_accept_head;
+       struct request_sock     *rskq_accept_tail;
+       rwlock_t                syn_wait_lock;
+       u8                      rskq_defer_accept;
+       /* 3 bytes hole, try to pack */
+       struct listen_sock      *listen_opt;
+};
+
+extern int reqsk_queue_alloc(struct request_sock_queue *queue,
+                            unsigned int nr_table_entries);
+
+static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
+{
+       struct listen_sock *lopt;
+
+       write_lock_bh(&queue->syn_wait_lock);
+       lopt = queue->listen_opt;
+       queue->listen_opt = NULL;
+       write_unlock_bh(&queue->syn_wait_lock);
+
+       return lopt;
+}
+
+static inline void __reqsk_queue_destroy(struct request_sock_queue *queue)
+{
+       kfree(reqsk_queue_yank_listen_sk(queue));
+}
+
+extern void reqsk_queue_destroy(struct request_sock_queue *queue);
+
+static inline struct request_sock *
+       reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
+{
+       struct request_sock *req = queue->rskq_accept_head;
+
+       queue->rskq_accept_head = NULL;
+       return req;
+}
+
+static inline int reqsk_queue_empty(struct request_sock_queue *queue)
+{
+       return queue->rskq_accept_head == NULL;
+}
+
+static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
+                                     struct request_sock *req,
+                                     struct request_sock **prev_req)
+{
+       write_lock(&queue->syn_wait_lock);
+       *prev_req = req->dl_next;
+       write_unlock(&queue->syn_wait_lock);
+}
+
+static inline void reqsk_queue_add(struct request_sock_queue *queue,
+                                  struct request_sock *req,
+                                  struct sock *parent,
+                                  struct sock *child)
+{
+       req->sk = child;
+       sk_acceptq_added(parent);
+
+       if (queue->rskq_accept_head == NULL)
+               queue->rskq_accept_head = req;
+       else
+               queue->rskq_accept_tail->dl_next = req;
+
+       queue->rskq_accept_tail = req;
+       req->dl_next = NULL;
+}
+
+static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
+{
+       struct request_sock *req = queue->rskq_accept_head;
+
+       BUG_TRAP(req != NULL);
+
+       queue->rskq_accept_head = req->dl_next;
+       if (queue->rskq_accept_head == NULL)
+               queue->rskq_accept_tail = NULL;
+
+       return req;
+}
+
+static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
+                                                struct sock *parent)
+{
+       struct request_sock *req = reqsk_queue_remove(queue);
+       struct sock *child = req->sk;
+
+       BUG_TRAP(child != NULL);
+
+       sk_acceptq_removed(parent);
+       __reqsk_free(req);
+       return child;
+}
+
+static inline int reqsk_queue_removed(struct request_sock_queue *queue,
+                                     struct request_sock *req)
+{
+       struct listen_sock *lopt = queue->listen_opt;
+
+       if (req->retrans == 0)
+               --lopt->qlen_young;
+
+       return --lopt->qlen;
+}
+
+static inline int reqsk_queue_added(struct request_sock_queue *queue)
+{
+       struct listen_sock *lopt = queue->listen_opt;
+       const int prev_qlen = lopt->qlen;
+
+       lopt->qlen_young++;
+       lopt->qlen++;
+       return prev_qlen;
+}
+
+static inline int reqsk_queue_len(const struct request_sock_queue *queue)
+{
+       return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
+}
+
+static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
+{
+       return queue->listen_opt->qlen_young;
+}
+
+static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+{
+       return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
+}
+
+static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
+                                       u32 hash, struct request_sock *req,
+                                       unsigned long timeout)
+{
+       struct listen_sock *lopt = queue->listen_opt;
+
+       req->expires = jiffies + timeout;
+       req->retrans = 0;
+       req->sk = NULL;
+       req->dl_next = lopt->syn_table[hash];
+
+       write_lock(&queue->syn_wait_lock);
+       lopt->syn_table[hash] = req;
+       write_unlock(&queue->syn_wait_lock);
+}
+
 #endif /* _REQUEST_SOCK_H */