netfilter: iptables: lock free counters
[safe/jmp/linux-2.6] / net / ipv6 / netfilter / ip6_tables.c
index d64594b..34af7bb 100644 (file)
@@ -382,10 +382,12 @@ ip6t_do_table(struct sk_buff *skb,
        mtpar.family  = tgpar.family = NFPROTO_IPV6;
        tgpar.hooknum = hook;
 
-       read_lock_bh(&table->lock);
        IP_NF_ASSERT(table->valid_hooks & (1 << hook));
-       private = table->private;
-       table_base = (void *)private->entries[smp_processor_id()];
+
+       rcu_read_lock();
+       private = rcu_dereference(table->private);
+       table_base = rcu_dereference(private->entries[smp_processor_id()]);
+
        e = get_entry(table_base, private->hook_entry[hook]);
 
        /* For return from builtin chain */
@@ -483,7 +485,7 @@ ip6t_do_table(struct sk_buff *skb,
 #ifdef CONFIG_NETFILTER_DEBUG
        ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
 #endif
-       read_unlock_bh(&table->lock);
+       rcu_read_unlock();
 
 #ifdef DEBUG_ALLOW_ALL
        return NF_ACCEPT;
@@ -964,11 +966,64 @@ get_counters(const struct xt_table_info *t,
        }
 }
 
+/* We're lazy, and add to the first CPU; overflow works its fey magic
+ * and everything is OK. */
+static int
+add_counter_to_entry(struct ip6t_entry *e,
+                    const struct xt_counters addme[],
+                    unsigned int *i)
+{
+       ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
+
+       (*i)++;
+       return 0;
+}
+
+/* Take values from counters and add them back onto the current cpu */
+static void put_counters(struct xt_table_info *t,
+                        const struct xt_counters counters[])
+{
+       unsigned int i, cpu;
+
+       local_bh_disable();
+       cpu = smp_processor_id();
+       i = 0;
+       IP6T_ENTRY_ITERATE(t->entries[cpu],
+                          t->size,
+                          add_counter_to_entry,
+                          counters,
+                          &i);
+       local_bh_enable();
+}
+
+static inline int
+zero_entry_counter(struct ip6t_entry *e, void *arg)
+{
+       e->counters.bcnt = 0;
+       e->counters.pcnt = 0;
+       return 0;
+}
+
+static void
+clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info)
+{
+       unsigned int cpu;
+       const void *loc_cpu_entry = info->entries[raw_smp_processor_id()];
+
+       memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
+       for_each_possible_cpu(cpu) {
+               memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size);
+               IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size,
+                                  zero_entry_counter, NULL);
+       }
+}
+
 static struct xt_counters *alloc_counters(struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = table->private;
+       struct xt_table_info *private = table->private;
+       struct xt_table_info *info;
 
        /* We need atomic snapshot of counters: rest doesn't change
           (other than comefrom, which userspace doesn't care
@@ -977,14 +1032,28 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
        counters = vmalloc_node(countersize, numa_node_id());
 
        if (counters == NULL)
-               return ERR_PTR(-ENOMEM);
+               goto nomem;
+
+       info = xt_alloc_table_info(private->size);
+       if (!info)
+               goto free_counters;
+
+       clone_counters(info, private);
+
+       mutex_lock(&table->lock);
+       xt_table_entry_swap_rcu(private, info);
+       synchronize_net();      /* Wait until smoke has cleared */
+
+       get_counters(info, counters);
+       put_counters(private, counters);
+       mutex_unlock(&table->lock);
 
-       /* First, sum counters... */
-       write_lock_bh(&table->lock);
-       get_counters(private, counters);
-       write_unlock_bh(&table->lock);
+       xt_free_table_info(info);
 
-       return counters;
+ free_counters:
+       vfree(counters);
+ nomem:
+       return ERR_PTR(-ENOMEM);
 }
 
 static int
@@ -1351,28 +1420,6 @@ do_replace(struct net *net, void __user *user, unsigned int len)
        return ret;
 }
 
-/* We're lazy, and add to the first CPU; overflow works its fey magic
- * and everything is OK. */
-static inline int
-add_counter_to_entry(struct ip6t_entry *e,
-                    const struct xt_counters addme[],
-                    unsigned int *i)
-{
-#if 0
-       duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
-                *i,
-                (long unsigned int)e->counters.pcnt,
-                (long unsigned int)e->counters.bcnt,
-                (long unsigned int)addme[*i].pcnt,
-                (long unsigned int)addme[*i].bcnt);
-#endif
-
-       ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
-
-       (*i)++;
-       return 0;
-}
-
 static int
 do_add_counters(struct net *net, void __user *user, unsigned int len,
                int compat)
@@ -1433,13 +1480,14 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
                goto free;
        }
 
-       write_lock_bh(&t->lock);
+       mutex_lock(&t->lock);
        private = t->private;
        if (private->number != num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
        }
 
+       preempt_disable();
        i = 0;
        /* Choose the copy that is on our node */
        loc_cpu_entry = private->entries[raw_smp_processor_id()];
@@ -1448,8 +1496,9 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
                          add_counter_to_entry,
                          paddc,
                          &i);
+       preempt_enable();
  unlock_up_free:
-       write_unlock_bh(&t->lock);
+       mutex_unlock(&t->lock);
        xt_table_unlock(t);
        module_put(t->me);
  free: