cpumask: alloc zeroed cpumask for static cpumask_var_ts
[safe/jmp/linux-2.6] / arch / x86 / kernel / cpu / cpufreq / speedstep-centrino.c
index ca2ac13..55c831e 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/cpufeature.h>
 
 #define PFX            "speedstep-centrino: "
-#define MAINTAINER     "cpufreq@lists.linux.org.uk"
+#define MAINTAINER     "cpufreq@vger.kernel.org"
 
 #define dprintk(msg...) \
        cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
@@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu)
        unsigned l, h;
        unsigned clock_freq;
        cpumask_t saved_mask;
-       cpumask_of_cpu_ptr(new_mask, cpu);
 
        saved_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, new_mask);
+       set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
        if (smp_processor_id() != cpu)
                return 0;
 
@@ -391,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
           enable it if not. */
        rdmsr(MSR_IA32_MISC_ENABLE, l, h);
 
-       if (!(l & (1<<16))) {
-               l |= (1<<16);
+       if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+               l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
                dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
                wrmsr(MSR_IA32_MISC_ENABLE, l, h);
 
                /* check to see if it stuck */
                rdmsr(MSR_IA32_MISC_ENABLE, l, h);
-               if (!(l & (1<<16))) {
+               if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
                        printk(KERN_INFO PFX
                                "couldn't enable Enhanced SpeedStep\n");
                        return -ENODEV;
@@ -459,13 +458,6 @@ static int centrino_verify (struct cpufreq_policy *policy)
  *
  * Sets a new CPUFreq policy.
  */
-struct allmasks {
-       cpumask_t               online_policy_cpus;
-       cpumask_t               saved_mask;
-       cpumask_t               set_mask;
-       cpumask_t               covered_cpus;
-};
-
 static int centrino_target (struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -475,14 +467,15 @@ static int centrino_target (struct cpufreq_policy *policy,
        struct cpufreq_freqs    freqs;
        int                     retval = 0;
        unsigned int            j, k, first_cpu, tmp;
-       CPUMASK_ALLOC(allmasks);
-       CPUMASK_PTR(online_policy_cpus, allmasks);
-       CPUMASK_PTR(saved_mask, allmasks);
-       CPUMASK_PTR(set_mask, allmasks);
-       CPUMASK_PTR(covered_cpus, allmasks);
+       cpumask_var_t saved_mask, covered_cpus;
 
-       if (unlikely(allmasks == NULL))
+       if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL)))
+               return -ENOMEM;
+       if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
+               free_cpumask_var(saved_mask);
                return -ENOMEM;
+       }
+       cpumask_copy(saved_mask, &current->cpus_allowed);
 
        if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
                retval = -ENODEV;
@@ -498,30 +491,26 @@ static int centrino_target (struct cpufreq_policy *policy,
                goto out;
        }
 
-#ifdef CONFIG_HOTPLUG_CPU
-       /* cpufreq holds the hotplug lock, so we are safe from here on */
-       cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus);
-#else
-       *online_policy_cpus = policy->cpus;
-#endif
-
-       *saved_mask = current->cpus_allowed;
        first_cpu = 1;
-       cpus_clear(*covered_cpus);
-       for_each_cpu_mask_nr(j, *online_policy_cpus) {
+       for_each_cpu(j, policy->cpus) {
+               const struct cpumask *mask;
+
+               /* cpufreq holds the hotplug lock, so we are safe here */
+               if (!cpu_online(j))
+                       continue;
+
                /*
                 * Support for SMP systems.
                 * Make sure we are running on CPU that wants to change freq
                 */
-               cpus_clear(*set_mask);
                if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
-                       cpus_or(*set_mask, *set_mask, *online_policy_cpus);
+                       mask = policy->cpus;
                else
-                       cpu_set(j, *set_mask);
+                       mask = cpumask_of(j);
 
-               set_cpus_allowed_ptr(current, set_mask);
+               set_cpus_allowed_ptr(current, mask);
                preempt_disable();
-               if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) {
+               if (unlikely(!cpu_isset(smp_processor_id(), *mask))) {
                        dprintk("couldn't limit to CPUs in this domain\n");
                        retval = -EAGAIN;
                        if (first_cpu) {
@@ -549,7 +538,9 @@ static int centrino_target (struct cpufreq_policy *policy,
                        dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
                                target_freq, freqs.old, freqs.new, msr);
 
-                       for_each_cpu_mask_nr(k, *online_policy_cpus) {
+                       for_each_cpu(k, policy->cpus) {
+                               if (!cpu_online(k))
+                                       continue;
                                freqs.cpu = k;
                                cpufreq_notify_transition(&freqs,
                                        CPUFREQ_PRECHANGE);
@@ -572,7 +563,9 @@ static int centrino_target (struct cpufreq_policy *policy,
                preempt_enable();
        }
 
-       for_each_cpu_mask_nr(k, *online_policy_cpus) {
+       for_each_cpu(k, policy->cpus) {
+               if (!cpu_online(k))
+                       continue;
                freqs.cpu = k;
                cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
        }
@@ -585,21 +578,17 @@ static int centrino_target (struct cpufreq_policy *policy,
                 * Best effort undo..
                 */
 
-               if (!cpus_empty(*covered_cpus)) {
-                       cpumask_of_cpu_ptr_declare(new_mask);
-
-                       for_each_cpu_mask_nr(j, *covered_cpus) {
-                               cpumask_of_cpu_ptr_next(new_mask, j);
-                               set_cpus_allowed_ptr(current, new_mask);
-                               wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
-                       }
+               for_each_cpu_mask_nr(j, *covered_cpus) {
+                       set_cpus_allowed_ptr(current, &cpumask_of_cpu(j));
+                       wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
                }
 
                tmp = freqs.new;
                freqs.new = freqs.old;
                freqs.old = tmp;
-               for_each_cpu_mask_nr(j, *online_policy_cpus) {
-                       freqs.cpu = j;
+               for_each_cpu(j, policy->cpus) {
+                       if (!cpu_online(j))
+                               continue;
                        cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
                        cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
                }
@@ -612,7 +601,8 @@ migrate_end:
        preempt_enable();
        set_cpus_allowed_ptr(current, saved_mask);
 out:
-       CPUMASK_FREE(allmasks);
+       free_cpumask_var(saved_mask);
+       free_cpumask_var(covered_cpus);
        return retval;
 }