perf annotate: Fix segmentation fault
[safe/jmp/linux-2.6] / drivers / cpufreq / cpufreq.c
index 1867dac..fd69086 100644 (file)
@@ -452,6 +452,7 @@ static ssize_t show_##file_name                             \
 
 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
 show_one(scaling_min_freq, min);
 show_one(scaling_max_freq, max);
 show_one(scaling_cur_freq, cur);
@@ -659,6 +660,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
 define_one_ro0400(cpuinfo_cur_freq);
 define_one_ro(cpuinfo_min_freq);
 define_one_ro(cpuinfo_max_freq);
+define_one_ro(cpuinfo_transition_latency);
 define_one_ro(scaling_available_governors);
 define_one_ro(scaling_driver);
 define_one_ro(scaling_cur_freq);
@@ -672,6 +674,7 @@ define_one_rw(scaling_setspeed);
 static struct attribute *default_attrs[] = {
        &cpuinfo_min_freq.attr,
        &cpuinfo_max_freq.attr,
+       &cpuinfo_transition_latency.attr,
        &scaling_min_freq.attr,
        &scaling_max_freq.attr,
        &affected_cpus.attr,
@@ -753,16 +756,15 @@ static struct kobj_type ktype_cpufreq = {
        .release        = cpufreq_sysfs_release,
 };
 
-static struct kobj_type ktype_empty_cpufreq = {
-       .sysfs_ops      = &sysfs_ops,
-       .release        = cpufreq_sysfs_release,
-};
-
 
 /**
  * cpufreq_add_dev - add a CPU device
  *
  * Adds the cpufreq interface for a CPU device.
+ *
+ * The Oracle says: try running cpufreq registration/unregistration concurrently
+ * with with cpu hotplugging and all hell will break loose. Tried to clean this
+ * mess up, but more thorough testing is needed. - Mathieu
  */
 static int cpufreq_add_dev(struct sys_device *sys_dev)
 {
@@ -774,9 +776,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
        struct sys_device *cpu_sys_dev;
        unsigned long flags;
        unsigned int j;
-#ifdef CONFIG_SMP
-       struct cpufreq_policy *managed_policy;
-#endif
 
        if (cpu_is_offline(cpu))
                return 0;
@@ -806,15 +805,12 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
                goto nomem_out;
        }
        if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
-               kfree(policy);
                ret = -ENOMEM;
-               goto nomem_out;
+               goto err_free_policy;
        }
-       if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
-               free_cpumask_var(policy->cpus);
-               kfree(policy);
+       if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
                ret = -ENOMEM;
-               goto nomem_out;
+               goto err_free_cpumask;
        }
 
        policy->cpu = cpu;
@@ -822,7 +818,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
 
        /* Initially set CPU itself as the policy_cpu */
        per_cpu(policy_cpu, cpu) = cpu;
-       lock_policy_rwsem_write(cpu);
+       ret = (lock_policy_rwsem_write(cpu) < 0);
+       WARN_ON(ret);
 
        init_completion(&policy->kobj_unregister);
        INIT_WORK(&policy->update, handle_update);
@@ -835,7 +832,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
        ret = cpufreq_driver->init(policy);
        if (ret) {
                dprintk("initialization failed\n");
-               goto err_out;
+               goto err_unlock_policy;
        }
        policy->user_policy.min = policy->min;
        policy->user_policy.max = policy->max;
@@ -854,21 +851,31 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
 #endif
 
        for_each_cpu(j, policy->cpus) {
+               struct cpufreq_policy *managed_policy;
+
                if (cpu == j)
                        continue;
 
                /* Check for existing affected CPUs.
                 * They may not be aware of it due to CPU Hotplug.
+                * cpufreq_cpu_put is called when the device is removed
+                * in __cpufreq_remove_dev()
                 */
-               managed_policy = cpufreq_cpu_get(j);            /* FIXME: Where is this released?  What about error paths? */
+               managed_policy = cpufreq_cpu_get(j);
                if (unlikely(managed_policy)) {
 
                        /* Set proper policy_cpu */
                        unlock_policy_rwsem_write(cpu);
                        per_cpu(policy_cpu, cpu) = managed_policy->cpu;
 
-                       if (lock_policy_rwsem_write(cpu) < 0)
-                               goto err_out_driver_exit;
+                       if (lock_policy_rwsem_write(cpu) < 0) {
+                               /* Should not go through policy unlock path */
+                               if (cpufreq_driver->exit)
+                                       cpufreq_driver->exit(policy);
+                               ret = -EBUSY;
+                               cpufreq_cpu_put(managed_policy);
+                               goto err_free_cpumask;
+                       }
 
                        spin_lock_irqsave(&cpufreq_driver_lock, flags);
                        cpumask_copy(managed_policy->cpus, policy->cpus);
@@ -880,53 +887,47 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
                                                &managed_policy->kobj,
                                                "cpufreq");
                        if (ret)
-                               goto err_out_driver_exit;
-
-                       cpufreq_debug_enable_ratelimit();
-                       ret = 0;
-                       goto err_out_driver_exit; /* call driver->exit() */
+                               cpufreq_cpu_put(managed_policy);
+                       /*
+                        * Success. We only needed to be added to the mask.
+                        * Call driver->exit() because only the cpu parent of
+                        * the kobj needed to call init().
+                        */
+                       goto out_driver_exit; /* call driver->exit() */
                }
        }
 #endif
        memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
 
        /* prepare interface data */
-       if (!cpufreq_driver->hide_interface) {
-               ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
-                                          &sys_dev->kobj, "cpufreq");
-               if (ret)
-                       goto err_out_driver_exit;
+       ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
+                                  "cpufreq");
+       if (ret)
+               goto out_driver_exit;
 
-               /* set up files for this cpu device */
-               drv_attr = cpufreq_driver->attr;
-               while ((drv_attr) && (*drv_attr)) {
-                       ret = sysfs_create_file(&policy->kobj,
-                                               &((*drv_attr)->attr));
-                       if (ret)
-                               goto err_out_driver_exit;
-                       drv_attr++;
-               }
-               if (cpufreq_driver->get) {
-                       ret = sysfs_create_file(&policy->kobj,
-                                               &cpuinfo_cur_freq.attr);
-                       if (ret)
-                               goto err_out_driver_exit;
-               }
-               if (cpufreq_driver->target) {
-                       ret = sysfs_create_file(&policy->kobj,
-                                               &scaling_cur_freq.attr);
-                       if (ret)
-                               goto err_out_driver_exit;
-               }
-       } else {
-               ret = kobject_init_and_add(&policy->kobj, &ktype_empty_cpufreq,
-                                          &sys_dev->kobj, "cpufreq");
+       /* set up files for this cpu device */
+       drv_attr = cpufreq_driver->attr;
+       while ((drv_attr) && (*drv_attr)) {
+               ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
+               if (ret)
+                       goto err_out_kobj_put;
+               drv_attr++;
+       }
+       if (cpufreq_driver->get) {
+               ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
+               if (ret)
+                       goto err_out_kobj_put;
+       }
+       if (cpufreq_driver->target) {
+               ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
                if (ret)
-                       goto err_out_driver_exit;
+                       goto err_out_kobj_put;
        }
 
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
        for_each_cpu(j, policy->cpus) {
+               if (!cpu_online(j))
+                       continue;
                per_cpu(cpufreq_cpu_data, j) = policy;
                per_cpu(policy_cpu, j) = policy->cpu;
        }
@@ -934,18 +935,22 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
 
        /* symlink affected CPUs */
        for_each_cpu(j, policy->cpus) {
+               struct cpufreq_policy *managed_policy;
+
                if (j == cpu)
                        continue;
                if (!cpu_online(j))
                        continue;
 
                dprintk("CPU %u already managed, adding link\n", j);
-               cpufreq_cpu_get(cpu);
+               managed_policy = cpufreq_cpu_get(cpu);
                cpu_sys_dev = get_cpu_sysdev(j);
                ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
                                        "cpufreq");
-               if (ret)
+               if (ret) {
+                       cpufreq_cpu_put(managed_policy);
                        goto err_out_unregister;
+               }
        }
 
        policy->governor = NULL; /* to assure that the starting sequence is
@@ -977,17 +982,20 @@ err_out_unregister:
                per_cpu(cpufreq_cpu_data, j) = NULL;
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+err_out_kobj_put:
        kobject_put(&policy->kobj);
        wait_for_completion(&policy->kobj_unregister);
 
-err_out_driver_exit:
+out_driver_exit:
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 
-err_out:
+err_unlock_policy:
        unlock_policy_rwsem_write(cpu);
+err_free_cpumask:
+       free_cpumask_var(policy->cpus);
+err_free_policy:
        kfree(policy);
-
 nomem_out:
        module_put(cpufreq_driver->owner);
 module_out:
@@ -1085,8 +1093,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
        if (cpufreq_driver->target)
                __cpufreq_governor(data, CPUFREQ_GOV_STOP);
 
-       unlock_policy_rwsem_write(cpu);
-
        kobject_put(&data->kobj);
 
        /* we need to make sure that the underlying kobj is actually
@@ -1100,6 +1106,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(data);
 
+       unlock_policy_rwsem_write(cpu);
+
        free_cpumask_var(data->related_cpus);
        free_cpumask_var(data->cpus);
        kfree(data);
@@ -1240,13 +1248,22 @@ EXPORT_SYMBOL(cpufreq_get);
 
 static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
 {
-       int cpu = sysdev->id;
        int ret = 0;
+
+#ifdef __powerpc__
+       int cpu = sysdev->id;
        unsigned int cur_freq = 0;
        struct cpufreq_policy *cpu_policy;
 
        dprintk("suspending cpu %u\n", cpu);
 
+       /*
+        * This whole bogosity is here because Powerbooks are made of fail.
+        * No sane platform should need any of the code below to be run.
+        * (it's entirely the wrong thing to do, as driver->get may
+        *  reenable interrupts on some architectures).
+        */
+
        if (!cpu_online(cpu))
                return 0;
 
@@ -1305,6 +1322,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
 
 out:
        cpufreq_cpu_put(cpu_policy);
+#endif /* __powerpc__ */
        return ret;
 }
 
@@ -1318,12 +1336,18 @@ out:
  */
 static int cpufreq_resume(struct sys_device *sysdev)
 {
-       int cpu = sysdev->id;
        int ret = 0;
+
+#ifdef __powerpc__
+       int cpu = sysdev->id;
        struct cpufreq_policy *cpu_policy;
 
        dprintk("resuming cpu %u\n", cpu);
 
+       /* As with the ->suspend method, all the code below is
+        * only necessary because Powerbooks suck.
+        * See commit 42d4dc3f4e1e for jokes. */
+
        if (!cpu_online(cpu))
                return 0;
 
@@ -1387,6 +1411,7 @@ out:
        schedule_work(&cpu_policy->update);
 fail:
        cpufreq_cpu_put(cpu_policy);
+#endif /* __powerpc__ */
        return ret;
 }