perf events: Don't generate events for the idle task when exclude_idle is set
[safe/jmp/linux-2.6] / kernel / smp.c
index 94188b8..c9d1c78 100644 (file)
@@ -29,8 +29,7 @@ enum {
 
 struct call_function_data {
        struct call_single_data csd;
-       spinlock_t              lock;
-       unsigned int            refs;
+       atomic_t                refs;
        cpumask_var_t           cpumask;
 };
 
@@ -39,9 +38,7 @@ struct call_single_queue {
        spinlock_t              lock;
 };
 
-static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
-       .lock                   = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
-};
+static DEFINE_PER_CPU(struct call_function_data, cfd_data);
 
 static int
 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -177,6 +174,11 @@ void generic_smp_call_function_interrupt(void)
        int cpu = get_cpu();
 
        /*
+        * Shouldn't receive this interrupt on a cpu that is not yet online.
+        */
+       WARN_ON_ONCE(!cpu_online(cpu));
+
+       /*
         * Ensure entry is visible on call_function_queue after we have
         * entered the IPI. See comment in smp_call_function_many.
         * If we don't have this, then we may miss an entry on the list
@@ -191,25 +193,18 @@ void generic_smp_call_function_interrupt(void)
        list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
                int refs;
 
-               spin_lock(&data->lock);
-               if (!cpumask_test_cpu(cpu, data->cpumask)) {
-                       spin_unlock(&data->lock);
+               if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
                        continue;
-               }
-               cpumask_clear_cpu(cpu, data->cpumask);
-               spin_unlock(&data->lock);
 
                data->csd.func(data->csd.info);
 
-               spin_lock(&data->lock);
-               WARN_ON(data->refs == 0);
-               refs = --data->refs;
+               refs = atomic_dec_return(&data->refs);
+               WARN_ON(refs < 0);
                if (!refs) {
                        spin_lock(&call_function.lock);
                        list_del_rcu(&data->csd.list);
                        spin_unlock(&call_function.lock);
                }
-               spin_unlock(&data->lock);
 
                if (refs)
                        continue;
@@ -230,6 +225,11 @@ void generic_smp_call_function_single_interrupt(void)
        unsigned int data_flags;
        LIST_HEAD(list);
 
+       /*
+        * Shouldn't receive this interrupt on a cpu that is not yet online.
+        */
+       WARN_ON_ONCE(!cpu_online(smp_processor_id()));
+
        spin_lock(&q->lock);
        list_replace_init(&q->list, &list);
        spin_unlock(&q->lock);
@@ -285,8 +285,14 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
         */
        this_cpu = get_cpu();
 
-       /* Can deadlock when called with interrupts disabled */
-       WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
+       /*
+        * Can deadlock when called with interrupts disabled.
+        * We allow cpu's that are not yet online though, as no one else can
+        * send smp call function interrupt to this cpu and as such deadlocks
+        * can't happen.
+        */
+       WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
+                    && !oops_in_progress);
 
        if (cpu == this_cpu) {
                local_irq_save(flags);
@@ -329,19 +335,18 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
 {
        csd_lock(data);
 
-       /* Can deadlock when called with interrupts disabled */
-       WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress);
+       /*
+        * Can deadlock when called with interrupts disabled.
+        * We allow cpu's that are not yet online though, as no one else can
+        * send smp call function interrupt to this cpu and as such deadlocks
+        * can't happen.
+        */
+       WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
+                    && !oops_in_progress);
 
        generic_exec_single(cpu, data, wait);
 }
 
-/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
-
-#ifndef arch_send_call_function_ipi_mask
-# define arch_send_call_function_ipi_mask(maskp) \
-        arch_send_call_function_ipi(*(maskp))
-#endif
-
 /**
  * smp_call_function_many(): Run a function on a set of other CPUs.
  * @mask: The set of cpus to run on (only runs on online subset).
@@ -365,8 +370,14 @@ void smp_call_function_many(const struct cpumask *mask,
        unsigned long flags;
        int cpu, next_cpu, this_cpu = smp_processor_id();
 
-       /* Can deadlock when called with interrupts disabled */
-       WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
+       /*
+        * Can deadlock when called with interrupts disabled.
+        * We allow cpu's that are not yet online though, as no one else can
+        * send smp call function interrupt to this cpu and as such deadlocks
+        * can't happen.
+        */
+       WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
+                    && !oops_in_progress);
 
        /* So, what's a CPU they want? Ignoring this one. */
        cpu = cpumask_first_and(mask, cpu_online_mask);
@@ -391,23 +402,20 @@ void smp_call_function_many(const struct cpumask *mask,
        data = &__get_cpu_var(cfd_data);
        csd_lock(&data->csd);
 
-       spin_lock_irqsave(&data->lock, flags);
        data->csd.func = func;
        data->csd.info = info;
        cpumask_and(data->cpumask, mask, cpu_online_mask);
        cpumask_clear_cpu(this_cpu, data->cpumask);
-       data->refs = cpumask_weight(data->cpumask);
+       atomic_set(&data->refs, cpumask_weight(data->cpumask));
 
-       spin_lock(&call_function.lock);
+       spin_lock_irqsave(&call_function.lock, flags);
        /*
         * Place entry at the _HEAD_ of the list, so that any cpu still
         * observing the entry in generic_smp_call_function_interrupt()
         * will not miss any other list entries:
         */
        list_add_rcu(&data->csd.list, &call_function.queue);
-       spin_unlock(&call_function.lock);
-
-       spin_unlock_irqrestore(&data->lock, flags);
+       spin_unlock_irqrestore(&call_function.lock, flags);
 
        /*
         * Make the list addition visible before sending the ipi.