smp_call_function: get rid of the unused nonatomic/retry argument
[safe/jmp/linux-2.6] / net / core / flow.c
index 885a2f6..5cf8105 100644 (file)
 #include <linux/mutex.h>
 #include <net/flow.h>
 #include <asm/atomic.h>
-#include <asm/semaphore.h>
 #include <linux/security.h>
 
 struct flow_cache_entry {
        struct flow_cache_entry *next;
        u16                     family;
        u8                      dir;
-       struct flowi            key;
        u32                     genid;
-       u32                     sk_sid;
+       struct flowi            key;
        void                    *object;
        atomic_t                *object_ref;
 };
@@ -45,7 +43,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
 
 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
 
-static kmem_cache_t *flow_cachep __read_mostly;
+static struct kmem_cache *flow_cachep __read_mostly;
 
 static int flow_lwm, flow_hwm;
 
@@ -53,7 +51,7 @@ struct flow_percpu_info {
        int hash_rnd_recalc;
        u32 hash_rnd;
        int count;
-} ____cacheline_aligned;
+};
 static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
 
 #define flow_hash_rnd_recalc(cpu) \
@@ -79,13 +77,21 @@ static void flow_cache_new_hashrnd(unsigned long arg)
 {
        int i;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                flow_hash_rnd_recalc(i) = 1;
 
        flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
        add_timer(&flow_hash_rnd_timer);
 }
 
+static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
+{
+       if (fle->object)
+               atomic_dec(fle->object_ref);
+       kmem_cache_free(flow_cachep, fle);
+       flow_count(cpu)--;
+}
+
 static void __flow_cache_shrink(int cpu, int shrink_to)
 {
        struct flow_cache_entry *fle, **flp;
@@ -101,10 +107,7 @@ static void __flow_cache_shrink(int cpu, int shrink_to)
                }
                while ((fle = *flp) != NULL) {
                        *flp = fle->next;
-                       if (fle->object)
-                               atomic_dec(fle->object_ref);
-                       kmem_cache_free(flow_cachep, fle);
-                       flow_count(cpu)--;
+                       flow_entry_kill(cpu, fle);
                }
        }
 }
@@ -138,8 +141,6 @@ typedef u64 flow_compare_t;
 typedef u32 flow_compare_t;
 #endif
 
-extern void flowi_is_missized(void);
-
 /* I hear what you're saying, use memcmp.  But memcmp cannot make
  * important assumptions that we can here, such as alignment and
  * constant size.
@@ -149,8 +150,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
        flow_compare_t *k1, *k1_lim, *k2;
        const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
 
-       if (sizeof(struct flowi) % sizeof(flow_compare_t))
-               flowi_is_missized();
+       BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
 
        k1 = (flow_compare_t *) key1;
        k1_lim = k1 + n_elem;
@@ -165,7 +165,7 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
        return 0;
 }
 
-void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir,
+void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
                        flow_resolve_t resolver)
 {
        struct flow_cache_entry *fle, **head;
@@ -189,7 +189,6 @@ void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir,
        for (fle = *head; fle; fle = fle->next) {
                if (fle->family == family &&
                    fle->dir == dir &&
-                   fle->sk_sid == sk_sid &&
                    flow_key_compare(key, &fle->key) == 0) {
                        if (fle->genid == atomic_read(&flow_cache_genid)) {
                                void *ret = fle->object;
@@ -208,13 +207,12 @@ void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir,
                if (flow_count(cpu) > flow_hwm)
                        flow_cache_shrink(cpu);
 
-               fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
+               fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
                if (fle) {
                        fle->next = *head;
                        *head = fle;
                        fle->family = family;
                        fle->dir = dir;
-                       fle->sk_sid = sk_sid;
                        memcpy(&fle->key, key, sizeof(*key));
                        fle->object = NULL;
                        flow_count(cpu)++;
@@ -223,12 +221,13 @@ void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir,
 
 nocache:
        {
+               int err;
                void *obj;
                atomic_t *obj_ref;
 
-               resolver(key, sk_sid, family, dir, &obj, &obj_ref);
+               err = resolver(key, family, dir, &obj, &obj_ref);
 
-               if (fle) {
+               if (fle && !err) {
                        fle->genid = atomic_read(&flow_cache_genid);
 
                        if (fle->object)
@@ -241,6 +240,8 @@ nocache:
                }
                local_bh_enable();
 
+               if (err)
+                       obj = ERR_PTR(err);
                return obj;
        }
 }
@@ -291,19 +292,19 @@ void flow_cache_flush(void)
        static DEFINE_MUTEX(flow_flush_sem);
 
        /* Don't want cpus going down or up during this. */
-       lock_cpu_hotplug();
+       get_online_cpus();
        mutex_lock(&flow_flush_sem);
        atomic_set(&info.cpuleft, num_online_cpus());
        init_completion(&info.completion);
 
        local_bh_disable();
-       smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
+       smp_call_function(flow_cache_flush_per_cpu, &info, 0);
        flow_cache_flush_tasklet((unsigned long)&info);
        local_bh_enable();
 
        wait_for_completion(&info.completion);
        mutex_unlock(&flow_flush_sem);
-       unlock_cpu_hotplug();
+       put_online_cpus();
 }
 
 static void __devinit flow_cache_cpu_prepare(int cpu)
@@ -329,16 +330,14 @@ static void __devinit flow_cache_cpu_prepare(int cpu)
        tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int flow_cache_cpu(struct notifier_block *nfb,
                          unsigned long action,
                          void *hcpu)
 {
-       if (action == CPU_DEAD)
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
                __flow_cache_shrink((unsigned long)hcpu, 0);
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static int __init flow_cache_init(void)
 {
@@ -346,22 +345,17 @@ static int __init flow_cache_init(void)
 
        flow_cachep = kmem_cache_create("flow_cache",
                                        sizeof(struct flow_cache_entry),
-                                       0, SLAB_HWCACHE_ALIGN,
-                                       NULL, NULL);
-
-       if (!flow_cachep)
-               panic("NET: failed to allocate flow cache slab\n");
-
+                                       0, SLAB_PANIC,
+                                       NULL);
        flow_hash_shift = 10;
        flow_lwm = 2 * flow_hash_size;
        flow_hwm = 4 * flow_hash_size;
 
-       init_timer(&flow_hash_rnd_timer);
-       flow_hash_rnd_timer.function = flow_cache_new_hashrnd;
+       setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0);
        flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
        add_timer(&flow_hash_rnd_timer);
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                flow_cache_cpu_prepare(i);
 
        hotcpu_notifier(flow_cache_cpu, 0);