tcp: accept socket after TCP_DEFER_ACCEPT period
[safe/jmp/linux-2.6] / net / ipv4 / inetpeer.c
index 3c513ce..b1fbe18 100644 (file)
@@ -3,8 +3,6 @@
  *
  *  This source is covered by the GNU GPL, the same as all kernel sources.
  *
- *  Version:   $Id: inetpeer.c,v 1.7 2001/09/20 21:22:50 davem Exp $
- *
  *  Authors:   Andrey V. Savochkin <saw@msu.ru>
  */
 
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/random.h>
-#include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/time.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/net.h>
+#include <net/ip.h>
 #include <net/inetpeer.h>
 
 /*
@@ -61,7 +59,7 @@
  *  4.  Global variable peer_total is modified under the pool lock.
  *  5.  struct inet_peer fields modification:
  *             avl_left, avl_right, avl_parent, avl_height: pool lock
- *             unused_next, unused_prevp: unused node list lock
+ *             unused: unused node list lock
  *             refcnt: atomically against modifications on other CPU;
  *                usually under some other lock to prevent node disappearing
  *             dtime: unused node list lock
@@ -72,7 +70,7 @@
 /* Exported for inet_getid inline function.  */
 DEFINE_SPINLOCK(inet_peer_idlock);
 
-static kmem_cache_t *peer_cachep;
+static struct kmem_cache *peer_cachep __read_mostly;
 
 #define node_height(x) x->avl_height
 static struct inet_peer peer_fake_node = {
@@ -85,26 +83,21 @@ static struct inet_peer *peer_root = peer_avl_empty;
 static DEFINE_RWLOCK(peer_pool_lock);
 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
 
-static volatile int peer_total;
+static int peer_total;
 /* Exported for sysctl_net_ipv4.  */
-int inet_peer_threshold = 65536 + 128; /* start to throw entries more
+int inet_peer_threshold __read_mostly = 65536 + 128;   /* start to throw entries more
                                         * aggressively at this stage */
-int inet_peer_minttl = 120 * HZ;       /* TTL under high load: 120 sec */
-int inet_peer_maxttl = 10 * 60 * HZ;   /* usual time to live: 10 min */
+int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
+int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;     /* usual time to live: 10 min */
+int inet_peer_gc_mintime __read_mostly = 10 * HZ;
+int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
 
-static struct inet_peer *inet_peer_unused_head;
-/* Exported for inet_putpeer inline function.  */
-struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
-DEFINE_SPINLOCK(inet_peer_unused_lock);
-#define PEER_MAX_CLEANUP_WORK 30
+static LIST_HEAD(unused_peers);
+static DEFINE_SPINLOCK(inet_peer_unused_lock);
 
 static void peer_check_expire(unsigned long dummy);
-static struct timer_list peer_periodic_timer =
-       TIMER_INITIALIZER(peer_check_expire, 0, 0);
+static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
 
-/* Exported for sysctl_net_ipv4.  */
-int inet_peer_gc_mintime = 10 * HZ,
-    inet_peer_gc_maxtime = 120 * HZ;
 
 /* Called from ip_output.c:ip_init  */
 void __init inet_initpeers(void)
@@ -126,11 +119,8 @@ void __init inet_initpeers(void)
 
        peer_cachep = kmem_cache_create("inet_peer_cache",
                        sizeof(struct inet_peer),
-                       0, SLAB_HWCACHE_ALIGN,
-                       NULL, NULL);
-
-       if (!peer_cachep)
-               panic("cannot create inet_peer_cache");
+                       0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+                       NULL);
 
        /* All the timers, started at system startup tend
           to synchronize. Perturb it a bit.
@@ -145,32 +135,31 @@ void __init inet_initpeers(void)
 static void unlink_from_unused(struct inet_peer *p)
 {
        spin_lock_bh(&inet_peer_unused_lock);
-       if (p->unused_prevp != NULL) {
-               /* On unused list. */
-               *p->unused_prevp = p->unused_next;
-               if (p->unused_next != NULL)
-                       p->unused_next->unused_prevp = p->unused_prevp;
-               else
-                       inet_peer_unused_tailp = p->unused_prevp;
-               p->unused_prevp = NULL; /* mark it as removed */
-       }
+       list_del_init(&p->unused);
        spin_unlock_bh(&inet_peer_unused_lock);
 }
 
-/* Called with local BH disabled and the pool lock held. */
-#define lookup(daddr)                                          \
+/*
+ * Called with local BH disabled and the pool lock held.
+ * _stack is known to be NULL or not at compile time,
+ * so compiler will optimize the if (_stack) tests.
+ */
+#define lookup(_daddr, _stack)                                         \
 ({                                                             \
        struct inet_peer *u, **v;                               \
-       stackptr = stack;                                       \
-       *stackptr++ = &peer_root;                               \
+       if (_stack != NULL) {                                   \
+               stackptr = _stack;                              \
+               *stackptr++ = &peer_root;                       \
+       }                                                       \
        for (u = peer_root; u != peer_avl_empty; ) {            \
-               if (daddr == u->v4daddr)                        \
+               if (_daddr == u->v4daddr)                       \
                        break;                                  \
-               if (daddr < u->v4daddr)                         \
+               if ((__force __u32)_daddr < (__force __u32)u->v4daddr)  \
                        v = &u->avl_left;                       \
                else                                            \
                        v = &u->avl_right;                      \
-               *stackptr++ = v;                                \
+               if (_stack != NULL)                             \
+                       *stackptr++ = v;                        \
                u = *v;                                         \
        }                                                       \
        u;                                                      \
@@ -294,7 +283,7 @@ static void unlink_from_pool(struct inet_peer *p)
        if (atomic_read(&p->refcnt) == 1) {
                struct inet_peer **stack[PEER_MAXDEPTH];
                struct inet_peer ***stackptr, ***delp;
-               if (lookup(p->v4daddr) != p)
+               if (lookup(p->v4daddr, stack) != p)
                        BUG();
                delp = stackptr - 1; /* *delp[0] == p */
                if (p->avl_left == peer_avl_empty) {
@@ -304,8 +293,7 @@ static void unlink_from_pool(struct inet_peer *p)
                        /* look for a node to insert instead of p */
                        struct inet_peer *t;
                        t = lookup_rightempty(p);
-                       if (*stackptr[-1] != t)
-                               BUG();
+                       BUG_ON(*stackptr[-1] != t);
                        **--stackptr = t->avl_left;
                        /* t is removed, t->v4daddr > x->v4daddr for any
                         * x in p->avl_left subtree.
@@ -314,8 +302,7 @@ static void unlink_from_pool(struct inet_peer *p)
                        t->avl_left = p->avl_left;
                        t->avl_right = p->avl_right;
                        t->avl_height = p->avl_height;
-                       if (delp[1] != &p->avl_left)
-                               BUG();
+                       BUG_ON(delp[1] != &p->avl_left);
                        delp[1] = &t->avl_left; /* was &p->avl_left */
                }
                peer_avl_rebalance(stack, stackptr);
@@ -339,23 +326,24 @@ static void unlink_from_pool(struct inet_peer *p)
 /* May be called with local BH enabled. */
 static int cleanup_once(unsigned long ttl)
 {
-       struct inet_peer *p;
+       struct inet_peer *p = NULL;
 
        /* Remove the first entry from the list of unused nodes. */
        spin_lock_bh(&inet_peer_unused_lock);
-       p = inet_peer_unused_head;
-       if (p != NULL) {
-               if (time_after(p->dtime + ttl, jiffies)) {
+       if (!list_empty(&unused_peers)) {
+               __u32 delta;
+
+               p = list_first_entry(&unused_peers, struct inet_peer, unused);
+               delta = (__u32)jiffies - p->dtime;
+
+               if (delta < ttl) {
                        /* Do not prune fresh entries. */
                        spin_unlock_bh(&inet_peer_unused_lock);
                        return -1;
                }
-               inet_peer_unused_head = p->unused_next;
-               if (p->unused_next != NULL)
-                       p->unused_next->unused_prevp = p->unused_prevp;
-               else
-                       inet_peer_unused_tailp = p->unused_prevp;
-               p->unused_prevp = NULL; /* mark as not on the list */
+
+               list_del_init(&p->unused);
+
                /* Grab an extra reference to prevent node disappearing
                 * before unlink_from_pool() call. */
                atomic_inc(&p->refcnt);
@@ -373,14 +361,14 @@ static int cleanup_once(unsigned long ttl)
 }
 
 /* Called with or without local BH being disabled. */
-struct inet_peer *inet_getpeer(__u32 daddr, int create)
+struct inet_peer *inet_getpeer(__be32 daddr, int create)
 {
        struct inet_peer *p, *n;
        struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
 
        /* Look up for the address quickly. */
        read_lock_bh(&peer_pool_lock);
-       p = lookup(daddr);
+       p = lookup(daddr, NULL);
        if (p != peer_avl_empty)
                atomic_inc(&p->refcnt);
        read_unlock_bh(&peer_pool_lock);
@@ -401,18 +389,19 @@ struct inet_peer *inet_getpeer(__u32 daddr, int create)
                return NULL;
        n->v4daddr = daddr;
        atomic_set(&n->refcnt, 1);
+       atomic_set(&n->rid, 0);
        n->ip_id_count = secure_ip_id(daddr);
        n->tcp_ts_stamp = 0;
 
        write_lock_bh(&peer_pool_lock);
        /* Check if an entry has suddenly appeared. */
-       p = lookup(daddr);
+       p = lookup(daddr, stack);
        if (p != peer_avl_empty)
                goto out_free;
 
        /* Link the node. */
        link_to_pool(n);
-       n->unused_prevp = NULL; /* not on the list */
+       INIT_LIST_HEAD(&n->unused);
        peer_total++;
        write_unlock_bh(&peer_pool_lock);
 
@@ -436,7 +425,7 @@ out_free:
 /* Called with local BH disabled. */
 static void peer_check_expire(unsigned long dummy)
 {
-       int i;
+       unsigned long now = jiffies;
        int ttl;
 
        if (peer_total >= inet_peer_threshold)
@@ -445,7 +434,10 @@ static void peer_check_expire(unsigned long dummy)
                ttl = inet_peer_maxttl
                                - (inet_peer_maxttl - inet_peer_minttl) / HZ *
                                        peer_total / inet_peer_threshold * HZ;
-       for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++);
+       while (!cleanup_once(ttl)) {
+               if (jiffies != now)
+                       break;
+       }
 
        /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
         * interval depending on the total number of entries (more entries,
@@ -459,3 +451,13 @@ static void peer_check_expire(unsigned long dummy)
                                peer_total / inet_peer_threshold * HZ;
        add_timer(&peer_periodic_timer);
 }
+
+void inet_putpeer(struct inet_peer *p)
+{
+       spin_lock_bh(&inet_peer_unused_lock);
+       if (atomic_dec_and_test(&p->refcnt)) {
+               list_add_tail(&p->unused, &unused_peers);
+               p->dtime = (__u32)jiffies;
+       }
+       spin_unlock_bh(&inet_peer_unused_lock);
+}