[CPUFREQ] Fix coding style issues in cpufreq.
[safe/jmp/linux-2.6] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8  *      Added handling for CPU hotplug
9  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10  *      Fix handling for CPU hotplug -- affected CPUs
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
31
32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33                                                 "cpufreq-core", msg)
34
35 /**
36  * The "cpufreq driver" - the arch- or hardware-dependent low
37  * level driver of CPUFreq support, and its spinlock. This lock
38  * also protects the cpufreq_cpu_data array.
39  */
40 static struct cpufreq_driver *cpufreq_driver;
41 static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
42 static DEFINE_SPINLOCK(cpufreq_driver_lock);
43
44 /* internal prototypes */
45 static int __cpufreq_governor(struct cpufreq_policy *policy,
46                                                 unsigned int event);
47 static void handle_update(void *data);
48
49 /**
50  * Two notifier lists: the "policy" list is involved in the
51  * validation process for a new CPU frequency policy; the
52  * "transition" list for kernel code that needs to handle
53  * changes to devices when the CPU clock speed changes.
54  * The mutex locks both lists.
55  */
56 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
57 static struct srcu_notifier_head cpufreq_transition_notifier_list;
58
59 static int __init init_cpufreq_transition_notifier_list(void)
60 {
61         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
62         return 0;
63 }
64 core_initcall(init_cpufreq_transition_notifier_list);
65
66 static LIST_HEAD(cpufreq_governor_list);
67 static DEFINE_MUTEX (cpufreq_governor_mutex);
68
69 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
70 {
71         struct cpufreq_policy *data;
72         unsigned long flags;
73
74         if (cpu >= NR_CPUS)
75                 goto err_out;
76
77         /* get the cpufreq driver */
78         spin_lock_irqsave(&cpufreq_driver_lock, flags);
79
80         if (!cpufreq_driver)
81                 goto err_out_unlock;
82
83         if (!try_module_get(cpufreq_driver->owner))
84                 goto err_out_unlock;
85
86
87         /* get the CPU */
88         data = cpufreq_cpu_data[cpu];
89
90         if (!data)
91                 goto err_out_put_module;
92
93         if (!kobject_get(&data->kobj))
94                 goto err_out_put_module;
95
96         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
97         return data;
98
99 err_out_put_module:
100         module_put(cpufreq_driver->owner);
101 err_out_unlock:
102         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
103 err_out:
104         return NULL;
105 }
106 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
107
108
109 void cpufreq_cpu_put(struct cpufreq_policy *data)
110 {
111         kobject_put(&data->kobj);
112         module_put(cpufreq_driver->owner);
113 }
114 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
115
116
117 /*********************************************************************
118  *                     UNIFIED DEBUG HELPERS                         *
119  *********************************************************************/
120 #ifdef CONFIG_CPU_FREQ_DEBUG
121
122 /* what part(s) of the CPUfreq subsystem are debugged? */
123 static unsigned int debug;
124
125 /* is the debug output ratelimit'ed using printk_ratelimit? User can
126  * set or modify this value.
127  */
128 static unsigned int debug_ratelimit = 1;
129
130 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
131  * loading of a cpufreq driver, temporarily disabled when a new policy
132  * is set, and disabled upon cpufreq driver removal
133  */
134 static unsigned int disable_ratelimit = 1;
135 static DEFINE_SPINLOCK(disable_ratelimit_lock);
136
137 static void cpufreq_debug_enable_ratelimit(void)
138 {
139         unsigned long flags;
140
141         spin_lock_irqsave(&disable_ratelimit_lock, flags);
142         if (disable_ratelimit)
143                 disable_ratelimit--;
144         spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
145 }
146
147 static void cpufreq_debug_disable_ratelimit(void)
148 {
149         unsigned long flags;
150
151         spin_lock_irqsave(&disable_ratelimit_lock, flags);
152         disable_ratelimit++;
153         spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
154 }
155
156 void cpufreq_debug_printk(unsigned int type, const char *prefix,
157                                                         const char *fmt, ...)
158 {
159         char s[256];
160         va_list args;
161         unsigned int len;
162         unsigned long flags;
163
164         WARN_ON(!prefix);
165         if (type & debug) {
166                 spin_lock_irqsave(&disable_ratelimit_lock, flags);
167                 if (!disable_ratelimit && debug_ratelimit
168                                         && !printk_ratelimit()) {
169                         spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
170                         return;
171                 }
172                 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
173
174                 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
175
176                 va_start(args, fmt);
177                 len += vsnprintf(&s[len], (256 - len), fmt, args);
178                 va_end(args);
179
180                 printk(s);
181
182                 WARN_ON(len < 5);
183         }
184 }
185 EXPORT_SYMBOL(cpufreq_debug_printk);
186
187
188 module_param(debug, uint, 0644);
189 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
190                         " 2 to debug drivers, and 4 to debug governors.");
191
192 module_param(debug_ratelimit, uint, 0644);
193 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
194                                         " set to 0 to disable ratelimiting.");
195
196 #else /* !CONFIG_CPU_FREQ_DEBUG */
197
198 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
199 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
200
201 #endif /* CONFIG_CPU_FREQ_DEBUG */
202
203
204 /*********************************************************************
205  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
206  *********************************************************************/
207
208 /**
209  * adjust_jiffies - adjust the system "loops_per_jiffy"
210  *
211  * This function alters the system "loops_per_jiffy" for the clock
212  * speed change. Note that loops_per_jiffy cannot be updated on SMP
213  * systems as each CPU might be scaled differently. So, use the arch
214  * per-CPU loops_per_jiffy value wherever possible.
215  */
216 #ifndef CONFIG_SMP
217 static unsigned long l_p_j_ref;
218 static unsigned int  l_p_j_ref_freq;
219
220 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
221 {
222         if (ci->flags & CPUFREQ_CONST_LOOPS)
223                 return;
224
225         if (!l_p_j_ref_freq) {
226                 l_p_j_ref = loops_per_jiffy;
227                 l_p_j_ref_freq = ci->old;
228                 dprintk("saving %lu as reference value for loops_per_jiffy;"
229                         "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
230         }
231         if ((val == CPUFREQ_PRECHANGE  && ci->old < ci->new) ||
232             (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
233             (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
234                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
235                                                                 ci->new);
236                 dprintk("scaling loops_per_jiffy to %lu"
237                         "for frequency %u kHz\n", loops_per_jiffy, ci->new);
238         }
239 }
240 #else
241 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
242 {
243         return;
244 }
245 #endif
246
247
248 /**
249  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
250  * on frequency transition.
251  *
252  * This function calls the transition notifiers and the "adjust_jiffies"
253  * function. It is called twice on all CPU frequency changes that have
254  * external effects.
255  */
256 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
257 {
258         struct cpufreq_policy *policy;
259
260         BUG_ON(irqs_disabled());
261
262         freqs->flags = cpufreq_driver->flags;
263         dprintk("notification %u of frequency transition to %u kHz\n",
264                 state, freqs->new);
265
266         policy = cpufreq_cpu_data[freqs->cpu];
267         switch (state) {
268
269         case CPUFREQ_PRECHANGE:
270                 /* detect if the driver reported a value as "old frequency"
271                  * which is not equal to what the cpufreq core thinks is
272                  * "old frequency".
273                  */
274                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
275                         if ((policy) && (policy->cpu == freqs->cpu) &&
276                             (policy->cur) && (policy->cur != freqs->old)) {
277                                 dprintk("Warning: CPU frequency is"
278                                         " %u, cpufreq assumed %u kHz.\n",
279                                         freqs->old, policy->cur);
280                                 freqs->old = policy->cur;
281                         }
282                 }
283                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
284                                 CPUFREQ_PRECHANGE, freqs);
285                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
286                 break;
287
288         case CPUFREQ_POSTCHANGE:
289                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
290                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
291                                 CPUFREQ_POSTCHANGE, freqs);
292                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
293                         policy->cur = freqs->new;
294                 break;
295         }
296 }
297 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
298
299
300
301 /*********************************************************************
302  *                          SYSFS INTERFACE                          *
303  *********************************************************************/
304
305 static struct cpufreq_governor *__find_governor(const char *str_governor)
306 {
307         struct cpufreq_governor *t;
308
309         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
310                 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
311                         return t;
312
313         return NULL;
314 }
315
316 /**
317  * cpufreq_parse_governor - parse a governor string
318  */
319 static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
320                                 struct cpufreq_governor **governor)
321 {
322         int err = -EINVAL;
323
324         if (!cpufreq_driver)
325                 goto out;
326
327         if (cpufreq_driver->setpolicy) {
328                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
329                         *policy = CPUFREQ_POLICY_PERFORMANCE;
330                         err = 0;
331                 } else if (!strnicmp(str_governor, "powersave",
332                                                 CPUFREQ_NAME_LEN)) {
333                         *policy = CPUFREQ_POLICY_POWERSAVE;
334                         err = 0;
335                 }
336         } else if (cpufreq_driver->target) {
337                 struct cpufreq_governor *t;
338
339                 mutex_lock(&cpufreq_governor_mutex);
340
341                 t = __find_governor(str_governor);
342
343                 if (t == NULL) {
344                         char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
345                                                                 str_governor);
346
347                         if (name) {
348                                 int ret;
349
350                                 mutex_unlock(&cpufreq_governor_mutex);
351                                 ret = request_module(name);
352                                 mutex_lock(&cpufreq_governor_mutex);
353
354                                 if (ret == 0)
355                                         t = __find_governor(str_governor);
356                         }
357
358                         kfree(name);
359                 }
360
361                 if (t != NULL) {
362                         *governor = t;
363                         err = 0;
364                 }
365
366                 mutex_unlock(&cpufreq_governor_mutex);
367         }
368   out:
369         return err;
370 }
371
372
373 /* drivers/base/cpu.c */
374 extern struct sysdev_class cpu_sysdev_class;
375
376
377 /**
378  * cpufreq_per_cpu_attr_read() / show_##file_name() -
379  * print out cpufreq information
380  *
381  * Write out information from cpufreq_driver->policy[cpu]; object must be
382  * "unsigned int".
383  */
384
385 #define show_one(file_name, object)                     \
386 static ssize_t show_##file_name                         \
387 (struct cpufreq_policy * policy, char *buf)             \
388 {                                                       \
389         return sprintf (buf, "%u\n", policy->object);   \
390 }
391
392 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
393 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
394 show_one(scaling_min_freq, min);
395 show_one(scaling_max_freq, max);
396 show_one(scaling_cur_freq, cur);
397
398 static int __cpufreq_set_policy(struct cpufreq_policy *data,
399                                 struct cpufreq_policy *policy);
400
401 /**
402  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
403  */
404 #define store_one(file_name, object)                    \
405 static ssize_t store_##file_name                                        \
406 (struct cpufreq_policy * policy, const char *buf, size_t count)         \
407 {                                                                       \
408         unsigned int ret = -EINVAL;                                     \
409         struct cpufreq_policy new_policy;                               \
410                                                                         \
411         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
412         if (ret)                                                        \
413                 return -EINVAL;                                         \
414                                                                         \
415         ret = sscanf (buf, "%u", &new_policy.object);                   \
416         if (ret != 1)                                                   \
417                 return -EINVAL;                                         \
418                                                                         \
419         lock_cpu_hotplug();                                             \
420         mutex_lock(&policy->lock);                                      \
421         ret = __cpufreq_set_policy(policy, &new_policy);                \
422         policy->user_policy.object = policy->object;                    \
423         mutex_unlock(&policy->lock);                                    \
424         unlock_cpu_hotplug();                                           \
425                                                                         \
426         return ret ? ret : count;                                       \
427 }
428
429 store_one(scaling_min_freq,min);
430 store_one(scaling_max_freq,max);
431
432 /**
433  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
434  */
435 static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
436                                                         char *buf)
437 {
438         unsigned int cur_freq = cpufreq_get(policy->cpu);
439         if (!cur_freq)
440                 return sprintf(buf, "<unknown>");
441         return sprintf(buf, "%u\n", cur_freq);
442 }
443
444
445 /**
446  * show_scaling_governor - show the current policy for the specified CPU
447  */
448 static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
449                                                         char *buf)
450 {
451         if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
452                 return sprintf(buf, "powersave\n");
453         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
454                 return sprintf(buf, "performance\n");
455         else if (policy->governor)
456                 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
457         return -EINVAL;
458 }
459
460
461 /**
462  * store_scaling_governor - store policy for the specified CPU
463  */
464 static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
465                                        const char *buf, size_t count)
466 {
467         unsigned int ret = -EINVAL;
468         char    str_governor[16];
469         struct cpufreq_policy new_policy;
470
471         ret = cpufreq_get_policy(&new_policy, policy->cpu);
472         if (ret)
473                 return ret;
474
475         ret = sscanf (buf, "%15s", str_governor);
476         if (ret != 1)
477                 return -EINVAL;
478
479         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
480                                                 &new_policy.governor))
481                 return -EINVAL;
482
483         lock_cpu_hotplug();
484
485         /* Do not use cpufreq_set_policy here or the user_policy.max
486            will be wrongly overridden */
487         mutex_lock(&policy->lock);
488         ret = __cpufreq_set_policy(policy, &new_policy);
489
490         policy->user_policy.policy = policy->policy;
491         policy->user_policy.governor = policy->governor;
492         mutex_unlock(&policy->lock);
493
494         unlock_cpu_hotplug();
495
496         if (ret)
497                 return ret;
498         else
499                 return count;
500 }
501
502 /**
503  * show_scaling_driver - show the cpufreq driver currently loaded
504  */
505 static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
506 {
507         return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
508 }
509
510 /**
511  * show_scaling_available_governors - show the available CPUfreq governors
512  */
513 static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
514                                 char *buf)
515 {
516         ssize_t i = 0;
517         struct cpufreq_governor *t;
518
519         if (!cpufreq_driver->target) {
520                 i += sprintf(buf, "performance powersave");
521                 goto out;
522         }
523
524         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
525                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
526                         goto out;
527                 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
528         }
529 out:
530         i += sprintf(&buf[i], "\n");
531         return i;
532 }
533 /**
534  * show_affected_cpus - show the CPUs affected by each transition
535  */
536 static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
537 {
538         ssize_t i = 0;
539         unsigned int cpu;
540
541         for_each_cpu_mask(cpu, policy->cpus) {
542                 if (i)
543                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
544                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
545                 if (i >= (PAGE_SIZE - 5))
546                     break;
547         }
548         i += sprintf(&buf[i], "\n");
549         return i;
550 }
551
552
553 #define define_one_ro(_name) \
554 static struct freq_attr _name = \
555 __ATTR(_name, 0444, show_##_name, NULL)
556
557 #define define_one_ro0400(_name) \
558 static struct freq_attr _name = \
559 __ATTR(_name, 0400, show_##_name, NULL)
560
561 #define define_one_rw(_name) \
562 static struct freq_attr _name = \
563 __ATTR(_name, 0644, show_##_name, store_##_name)
564
565 define_one_ro0400(cpuinfo_cur_freq);
566 define_one_ro(cpuinfo_min_freq);
567 define_one_ro(cpuinfo_max_freq);
568 define_one_ro(scaling_available_governors);
569 define_one_ro(scaling_driver);
570 define_one_ro(scaling_cur_freq);
571 define_one_ro(affected_cpus);
572 define_one_rw(scaling_min_freq);
573 define_one_rw(scaling_max_freq);
574 define_one_rw(scaling_governor);
575
576 static struct attribute * default_attrs[] = {
577         &cpuinfo_min_freq.attr,
578         &cpuinfo_max_freq.attr,
579         &scaling_min_freq.attr,
580         &scaling_max_freq.attr,
581         &affected_cpus.attr,
582         &scaling_governor.attr,
583         &scaling_driver.attr,
584         &scaling_available_governors.attr,
585         NULL
586 };
587
588 #define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
589 #define to_attr(a) container_of(a,struct freq_attr,attr)
590
591 static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
592 {
593         struct cpufreq_policy * policy = to_policy(kobj);
594         struct freq_attr * fattr = to_attr(attr);
595         ssize_t ret;
596         policy = cpufreq_cpu_get(policy->cpu);
597         if (!policy)
598                 return -EINVAL;
599         if (fattr->show)
600                 ret = fattr->show(policy, buf);
601         else
602                 ret = -EIO;
603
604         cpufreq_cpu_put(policy);
605         return ret;
606 }
607
608 static ssize_t store(struct kobject * kobj, struct attribute * attr,
609                      const char * buf, size_t count)
610 {
611         struct cpufreq_policy * policy = to_policy(kobj);
612         struct freq_attr * fattr = to_attr(attr);
613         ssize_t ret;
614         policy = cpufreq_cpu_get(policy->cpu);
615         if (!policy)
616                 return -EINVAL;
617         if (fattr->store)
618                 ret = fattr->store(policy, buf, count);
619         else
620                 ret = -EIO;
621
622         cpufreq_cpu_put(policy);
623         return ret;
624 }
625
626 static void cpufreq_sysfs_release(struct kobject * kobj)
627 {
628         struct cpufreq_policy * policy = to_policy(kobj);
629         dprintk("last reference is dropped\n");
630         complete(&policy->kobj_unregister);
631 }
632
633 static struct sysfs_ops sysfs_ops = {
634         .show   = show,
635         .store  = store,
636 };
637
638 static struct kobj_type ktype_cpufreq = {
639         .sysfs_ops      = &sysfs_ops,
640         .default_attrs  = default_attrs,
641         .release        = cpufreq_sysfs_release,
642 };
643
644
645 /**
646  * cpufreq_add_dev - add a CPU device
647  *
648  * Adds the cpufreq interface for a CPU device.
649  */
650 static int cpufreq_add_dev (struct sys_device * sys_dev)
651 {
652         unsigned int cpu = sys_dev->id;
653         int ret = 0;
654         struct cpufreq_policy new_policy;
655         struct cpufreq_policy *policy;
656         struct freq_attr **drv_attr;
657         struct sys_device *cpu_sys_dev;
658         unsigned long flags;
659         unsigned int j;
660 #ifdef CONFIG_SMP
661         struct cpufreq_policy *managed_policy;
662 #endif
663
664         if (cpu_is_offline(cpu))
665                 return 0;
666
667         cpufreq_debug_disable_ratelimit();
668         dprintk("adding CPU %u\n", cpu);
669
670 #ifdef CONFIG_SMP
671         /* check whether a different CPU already registered this
672          * CPU because it is in the same boat. */
673         policy = cpufreq_cpu_get(cpu);
674         if (unlikely(policy)) {
675                 cpufreq_cpu_put(policy);
676                 cpufreq_debug_enable_ratelimit();
677                 return 0;
678         }
679 #endif
680
681         if (!try_module_get(cpufreq_driver->owner)) {
682                 ret = -EINVAL;
683                 goto module_out;
684         }
685
686         policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
687         if (!policy) {
688                 ret = -ENOMEM;
689                 goto nomem_out;
690         }
691
692         policy->cpu = cpu;
693         policy->cpus = cpumask_of_cpu(cpu);
694
695         mutex_init(&policy->lock);
696         mutex_lock(&policy->lock);
697         init_completion(&policy->kobj_unregister);
698         INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
699
700         /* call driver. From then on the cpufreq must be able
701          * to accept all calls to ->verify and ->setpolicy for this CPU
702          */
703         ret = cpufreq_driver->init(policy);
704         if (ret) {
705                 dprintk("initialization failed\n");
706                 mutex_unlock(&policy->lock);
707                 goto err_out;
708         }
709
710 #ifdef CONFIG_SMP
711         for_each_cpu_mask(j, policy->cpus) {
712                 if (cpu == j)
713                         continue;
714
715                 /* check for existing affected CPUs.  They may not be aware
716                  * of it due to CPU Hotplug.
717                  */
718                 managed_policy = cpufreq_cpu_get(j);
719                 if (unlikely(managed_policy)) {
720                         spin_lock_irqsave(&cpufreq_driver_lock, flags);
721                         managed_policy->cpus = policy->cpus;
722                         cpufreq_cpu_data[cpu] = managed_policy;
723                         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
724
725                         dprintk("CPU already managed, adding link\n");
726                         sysfs_create_link(&sys_dev->kobj,
727                                           &managed_policy->kobj, "cpufreq");
728
729                         cpufreq_debug_enable_ratelimit();
730                         mutex_unlock(&policy->lock);
731                         ret = 0;
732                         goto err_out_driver_exit; /* call driver->exit() */
733                 }
734         }
735 #endif
736         memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
737
738         /* prepare interface data */
739         policy->kobj.parent = &sys_dev->kobj;
740         policy->kobj.ktype = &ktype_cpufreq;
741         strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
742
743         ret = kobject_register(&policy->kobj);
744         if (ret) {
745                 mutex_unlock(&policy->lock);
746                 goto err_out_driver_exit;
747         }
748         /* set up files for this cpu device */
749         drv_attr = cpufreq_driver->attr;
750         while ((drv_attr) && (*drv_attr)) {
751                 sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
752                 drv_attr++;
753         }
754         if (cpufreq_driver->get)
755                 sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
756         if (cpufreq_driver->target)
757                 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
758
759         spin_lock_irqsave(&cpufreq_driver_lock, flags);
760         for_each_cpu_mask(j, policy->cpus)
761                 cpufreq_cpu_data[j] = policy;
762         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
763
764         /* symlink affected CPUs */
765         for_each_cpu_mask(j, policy->cpus) {
766                 if (j == cpu)
767                         continue;
768                 if (!cpu_online(j))
769                         continue;
770
771                 dprintk("CPU %u already managed, adding link\n", j);
772                 cpufreq_cpu_get(cpu);
773                 cpu_sys_dev = get_cpu_sysdev(j);
774                 sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
775                                   "cpufreq");
776         }
777
778         policy->governor = NULL; /* to assure that the starting sequence is
779                                   * run in cpufreq_set_policy */
780         mutex_unlock(&policy->lock);
781
782         /* set default policy */
783         ret = cpufreq_set_policy(&new_policy);
784         if (ret) {
785                 dprintk("setting policy failed\n");
786                 goto err_out_unregister;
787         }
788
789         module_put(cpufreq_driver->owner);
790         dprintk("initialization complete\n");
791         cpufreq_debug_enable_ratelimit();
792
793         return 0;
794
795
796 err_out_unregister:
797         spin_lock_irqsave(&cpufreq_driver_lock, flags);
798         for_each_cpu_mask(j, policy->cpus)
799                 cpufreq_cpu_data[j] = NULL;
800         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
801
802         kobject_unregister(&policy->kobj);
803         wait_for_completion(&policy->kobj_unregister);
804
805 err_out_driver_exit:
806         if (cpufreq_driver->exit)
807                 cpufreq_driver->exit(policy);
808
809 err_out:
810         kfree(policy);
811
812 nomem_out:
813         module_put(cpufreq_driver->owner);
814 module_out:
815         cpufreq_debug_enable_ratelimit();
816         return ret;
817 }
818
819
820 /**
821  * cpufreq_remove_dev - remove a CPU device
822  *
823  * Removes the cpufreq interface for a CPU device.
824  */
825 static int cpufreq_remove_dev (struct sys_device * sys_dev)
826 {
827         unsigned int cpu = sys_dev->id;
828         unsigned long flags;
829         struct cpufreq_policy *data;
830 #ifdef CONFIG_SMP
831         struct sys_device *cpu_sys_dev;
832         unsigned int j;
833 #endif
834
835         cpufreq_debug_disable_ratelimit();
836         dprintk("unregistering CPU %u\n", cpu);
837
838         spin_lock_irqsave(&cpufreq_driver_lock, flags);
839         data = cpufreq_cpu_data[cpu];
840
841         if (!data) {
842                 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
843                 cpufreq_debug_enable_ratelimit();
844                 return -EINVAL;
845         }
846         cpufreq_cpu_data[cpu] = NULL;
847
848
849 #ifdef CONFIG_SMP
850         /* if this isn't the CPU which is the parent of the kobj, we
851          * only need to unlink, put and exit
852          */
853         if (unlikely(cpu != data->cpu)) {
854                 dprintk("removing link\n");
855                 cpu_clear(cpu, data->cpus);
856                 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
857                 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
858                 cpufreq_cpu_put(data);
859                 cpufreq_debug_enable_ratelimit();
860                 return 0;
861         }
862 #endif
863
864
865         if (!kobject_get(&data->kobj)) {
866                 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
867                 cpufreq_debug_enable_ratelimit();
868                 return -EFAULT;
869         }
870
871 #ifdef CONFIG_SMP
872         /* if we have other CPUs still registered, we need to unlink them,
873          * or else wait_for_completion below will lock up. Clean the
874          * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
875          * links afterwards.
876          */
877         if (unlikely(cpus_weight(data->cpus) > 1)) {
878                 for_each_cpu_mask(j, data->cpus) {
879                         if (j == cpu)
880                                 continue;
881                         cpufreq_cpu_data[j] = NULL;
882                 }
883         }
884
885         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
886
887         if (unlikely(cpus_weight(data->cpus) > 1)) {
888                 for_each_cpu_mask(j, data->cpus) {
889                         if (j == cpu)
890                                 continue;
891                         dprintk("removing link for cpu %u\n", j);
892                         cpu_sys_dev = get_cpu_sysdev(j);
893                         sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
894                         cpufreq_cpu_put(data);
895                 }
896         }
897 #else
898         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
899 #endif
900
901         mutex_lock(&data->lock);
902         if (cpufreq_driver->target)
903                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
904         mutex_unlock(&data->lock);
905
906         kobject_unregister(&data->kobj);
907
908         kobject_put(&data->kobj);
909
910         /* we need to make sure that the underlying kobj is actually
911          * not referenced anymore by anybody before we proceed with
912          * unloading.
913          */
914         dprintk("waiting for dropping of refcount\n");
915         wait_for_completion(&data->kobj_unregister);
916         dprintk("wait complete\n");
917
918         if (cpufreq_driver->exit)
919                 cpufreq_driver->exit(data);
920
921         kfree(data);
922
923         cpufreq_debug_enable_ratelimit();
924         return 0;
925 }
926
927
928 static void handle_update(void *data)
929 {
930         unsigned int cpu = (unsigned int)(long)data;
931         dprintk("handle_update for cpu %u called\n", cpu);
932         cpufreq_update_policy(cpu);
933 }
934
935 /**
936  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
937  *      @cpu: cpu number
938  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
939  *      @new_freq: CPU frequency the CPU actually runs at
940  *
941  *      We adjust to current frequency first, and need to clean up later. So either call
942  *      to cpufreq_update_policy() or schedule handle_update()).
943  */
944 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
945                                 unsigned int new_freq)
946 {
947         struct cpufreq_freqs freqs;
948
949         dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
950                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
951
952         freqs.cpu = cpu;
953         freqs.old = old_freq;
954         freqs.new = new_freq;
955         cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
956         cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
957 }
958
959
960 /**
961  * cpufreq_quick_get - get the CPU frequency (in kHz) frpm policy->cur
962  * @cpu: CPU number
963  *
964  * This is the last known freq, without actually getting it from the driver.
965  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
966  */
967 unsigned int cpufreq_quick_get(unsigned int cpu)
968 {
969         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
970         unsigned int ret_freq = 0;
971
972         if (policy) {
973                 mutex_lock(&policy->lock);
974                 ret_freq = policy->cur;
975                 mutex_unlock(&policy->lock);
976                 cpufreq_cpu_put(policy);
977         }
978
979         return (ret_freq);
980 }
981 EXPORT_SYMBOL(cpufreq_quick_get);
982
983
984 /**
985  * cpufreq_get - get the current CPU frequency (in kHz)
986  * @cpu: CPU number
987  *
988  * Get the CPU current (static) CPU frequency
989  */
990 unsigned int cpufreq_get(unsigned int cpu)
991 {
992         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
993         unsigned int ret_freq = 0;
994
995         if (!policy)
996                 return 0;
997
998         if (!cpufreq_driver->get)
999                 goto out;
1000
1001         mutex_lock(&policy->lock);
1002
1003         ret_freq = cpufreq_driver->get(cpu);
1004
1005         if (ret_freq && policy->cur &&
1006                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1007                 /* verify no discrepancy between actual and
1008                                         saved value exists */
1009                 if (unlikely(ret_freq != policy->cur)) {
1010                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1011                         schedule_work(&policy->update);
1012                 }
1013         }
1014
1015         mutex_unlock(&policy->lock);
1016
1017 out:
1018         cpufreq_cpu_put(policy);
1019
1020         return (ret_freq);
1021 }
1022 EXPORT_SYMBOL(cpufreq_get);
1023
1024
1025 /**
1026  *      cpufreq_suspend - let the low level driver prepare for suspend
1027  */
1028
1029 static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
1030 {
1031         int cpu = sysdev->id;
1032         int ret = 0;
1033         unsigned int cur_freq = 0;
1034         struct cpufreq_policy *cpu_policy;
1035
1036         dprintk("suspending cpu %u\n", cpu);
1037
1038         if (!cpu_online(cpu))
1039                 return 0;
1040
1041         /* we may be lax here as interrupts are off. Nonetheless
1042          * we need to grab the correct cpu policy, as to check
1043          * whether we really run on this CPU.
1044          */
1045
1046         cpu_policy = cpufreq_cpu_get(cpu);
1047         if (!cpu_policy)
1048                 return -EINVAL;
1049
1050         /* only handle each CPU group once */
1051         if (unlikely(cpu_policy->cpu != cpu)) {
1052                 cpufreq_cpu_put(cpu_policy);
1053                 return 0;
1054         }
1055
1056         if (cpufreq_driver->suspend) {
1057                 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1058                 if (ret) {
1059                         printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1060                                         "step on CPU %u\n", cpu_policy->cpu);
1061                         cpufreq_cpu_put(cpu_policy);
1062                         return ret;
1063                 }
1064         }
1065
1066
1067         if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
1068                 goto out;
1069
1070         if (cpufreq_driver->get)
1071                 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1072
1073         if (!cur_freq || !cpu_policy->cur) {
1074                 printk(KERN_ERR "cpufreq: suspend failed to assert current "
1075                        "frequency is what timing core thinks it is.\n");
1076                 goto out;
1077         }
1078
1079         if (unlikely(cur_freq != cpu_policy->cur)) {
1080                 struct cpufreq_freqs freqs;
1081
1082                 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1083                         dprintk("Warning: CPU frequency is %u, "
1084                                "cpufreq assumed %u kHz.\n",
1085                                cur_freq, cpu_policy->cur);
1086
1087                 freqs.cpu = cpu;
1088                 freqs.old = cpu_policy->cur;
1089                 freqs.new = cur_freq;
1090
1091                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
1092                                     CPUFREQ_SUSPENDCHANGE, &freqs);
1093                 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1094
1095                 cpu_policy->cur = cur_freq;
1096         }
1097
1098 out:
1099         cpufreq_cpu_put(cpu_policy);
1100         return 0;
1101 }
1102
1103 /**
1104  *      cpufreq_resume -  restore proper CPU frequency handling after resume
1105  *
1106  *      1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1107  *      2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
1108  *      3.) schedule call cpufreq_update_policy() ASAP as interrupts are
1109  *          restored.
1110  */
1111 static int cpufreq_resume(struct sys_device * sysdev)
1112 {
1113         int cpu = sysdev->id;
1114         int ret = 0;
1115         struct cpufreq_policy *cpu_policy;
1116
1117         dprintk("resuming cpu %u\n", cpu);
1118
1119         if (!cpu_online(cpu))
1120                 return 0;
1121
1122         /* we may be lax here as interrupts are off. Nonetheless
1123          * we need to grab the correct cpu policy, as to check
1124          * whether we really run on this CPU.
1125          */
1126
1127         cpu_policy = cpufreq_cpu_get(cpu);
1128         if (!cpu_policy)
1129                 return -EINVAL;
1130
1131         /* only handle each CPU group once */
1132         if (unlikely(cpu_policy->cpu != cpu)) {
1133                 cpufreq_cpu_put(cpu_policy);
1134                 return 0;
1135         }
1136
1137         if (cpufreq_driver->resume) {
1138                 ret = cpufreq_driver->resume(cpu_policy);
1139                 if (ret) {
1140                         printk(KERN_ERR "cpufreq: resume failed in ->resume "
1141                                         "step on CPU %u\n", cpu_policy->cpu);
1142                         cpufreq_cpu_put(cpu_policy);
1143                         return ret;
1144                 }
1145         }
1146
1147         if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1148                 unsigned int cur_freq = 0;
1149
1150                 if (cpufreq_driver->get)
1151                         cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1152
1153                 if (!cur_freq || !cpu_policy->cur) {
1154                         printk(KERN_ERR "cpufreq: resume failed to assert "
1155                                         "current frequency is what timing core "
1156                                         "thinks it is.\n");
1157                         goto out;
1158                 }
1159
1160                 if (unlikely(cur_freq != cpu_policy->cur)) {
1161                         struct cpufreq_freqs freqs;
1162
1163                         if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1164                                 dprintk("Warning: CPU frequency"
1165                                        "is %u, cpufreq assumed %u kHz.\n",
1166                                        cur_freq, cpu_policy->cur);
1167
1168                         freqs.cpu = cpu;
1169                         freqs.old = cpu_policy->cur;
1170                         freqs.new = cur_freq;
1171
1172                         srcu_notifier_call_chain(
1173                                         &cpufreq_transition_notifier_list,
1174                                         CPUFREQ_RESUMECHANGE, &freqs);
1175                         adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1176
1177                         cpu_policy->cur = cur_freq;
1178                 }
1179         }
1180
1181 out:
1182         schedule_work(&cpu_policy->update);
1183         cpufreq_cpu_put(cpu_policy);
1184         return ret;
1185 }
1186
1187 static struct sysdev_driver cpufreq_sysdev_driver = {
1188         .add            = cpufreq_add_dev,
1189         .remove         = cpufreq_remove_dev,
1190         .suspend        = cpufreq_suspend,
1191         .resume         = cpufreq_resume,
1192 };
1193
1194
1195 /*********************************************************************
1196  *                     NOTIFIER LISTS INTERFACE                      *
1197  *********************************************************************/
1198
1199 /**
1200  *      cpufreq_register_notifier - register a driver with cpufreq
1201  *      @nb: notifier function to register
1202  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1203  *
1204  *      Add a driver to one of two lists: either a list of drivers that
1205  *      are notified about clock rate changes (once before and once after
1206  *      the transition), or a list of drivers that are notified about
1207  *      changes in cpufreq policy.
1208  *
1209  *      This function may sleep, and has the same return conditions as
1210  *      blocking_notifier_chain_register.
1211  */
1212 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1213 {
1214         int ret;
1215
1216         switch (list) {
1217         case CPUFREQ_TRANSITION_NOTIFIER:
1218                 ret = srcu_notifier_chain_register(
1219                                 &cpufreq_transition_notifier_list, nb);
1220                 break;
1221         case CPUFREQ_POLICY_NOTIFIER:
1222                 ret = blocking_notifier_chain_register(
1223                                 &cpufreq_policy_notifier_list, nb);
1224                 break;
1225         default:
1226                 ret = -EINVAL;
1227         }
1228
1229         return ret;
1230 }
1231 EXPORT_SYMBOL(cpufreq_register_notifier);
1232
1233
1234 /**
1235  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1236  *      @nb: notifier block to be unregistered
1237  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1238  *
1239  *      Remove a driver from the CPU frequency notifier list.
1240  *
1241  *      This function may sleep, and has the same return conditions as
1242  *      blocking_notifier_chain_unregister.
1243  */
1244 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1245 {
1246         int ret;
1247
1248         switch (list) {
1249         case CPUFREQ_TRANSITION_NOTIFIER:
1250                 ret = srcu_notifier_chain_unregister(
1251                                 &cpufreq_transition_notifier_list, nb);
1252                 break;
1253         case CPUFREQ_POLICY_NOTIFIER:
1254                 ret = blocking_notifier_chain_unregister(
1255                                 &cpufreq_policy_notifier_list, nb);
1256                 break;
1257         default:
1258                 ret = -EINVAL;
1259         }
1260
1261         return ret;
1262 }
1263 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1264
1265
1266 /*********************************************************************
1267  *                              GOVERNORS                            *
1268  *********************************************************************/
1269
1270
1271 /* Must be called with lock_cpu_hotplug held */
1272 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1273                             unsigned int target_freq,
1274                             unsigned int relation)
1275 {
1276         int retval = -EINVAL;
1277
1278         dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1279                 target_freq, relation);
1280         if (cpu_online(policy->cpu) && cpufreq_driver->target)
1281                 retval = cpufreq_driver->target(policy, target_freq, relation);
1282
1283         return retval;
1284 }
1285 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1286
1287 int cpufreq_driver_target(struct cpufreq_policy *policy,
1288                           unsigned int target_freq,
1289                           unsigned int relation)
1290 {
1291         int ret;
1292
1293         policy = cpufreq_cpu_get(policy->cpu);
1294         if (!policy)
1295                 return -EINVAL;
1296
1297         lock_cpu_hotplug();
1298         mutex_lock(&policy->lock);
1299
1300         ret = __cpufreq_driver_target(policy, target_freq, relation);
1301
1302         mutex_unlock(&policy->lock);
1303         unlock_cpu_hotplug();
1304
1305         cpufreq_cpu_put(policy);
1306         return ret;
1307 }
1308 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1309
1310 int cpufreq_driver_getavg(struct cpufreq_policy *policy)
1311 {
1312         int ret = 0;
1313
1314         policy = cpufreq_cpu_get(policy->cpu);
1315         if (!policy)
1316                 return -EINVAL;
1317
1318         mutex_lock(&policy->lock);
1319
1320         if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
1321                 ret = cpufreq_driver->getavg(policy->cpu);
1322
1323         mutex_unlock(&policy->lock);
1324
1325         cpufreq_cpu_put(policy);
1326         return ret;
1327 }
1328 EXPORT_SYMBOL_GPL(cpufreq_driver_getavg);
1329
1330 /*
1331  * Locking: Must be called with the lock_cpu_hotplug() lock held
1332  * when "event" is CPUFREQ_GOV_LIMITS
1333  */
1334
1335 static int __cpufreq_governor(struct cpufreq_policy *policy,
1336                                         unsigned int event)
1337 {
1338         int ret;
1339
1340         if (!try_module_get(policy->governor->owner))
1341                 return -EINVAL;
1342
1343         dprintk("__cpufreq_governor for CPU %u, event %u\n",
1344                                                 policy->cpu, event);
1345         ret = policy->governor->governor(policy, event);
1346
1347         /* we keep one module reference alive for
1348                         each CPU governed by this CPU */
1349         if ((event != CPUFREQ_GOV_START) || ret)
1350                 module_put(policy->governor->owner);
1351         if ((event == CPUFREQ_GOV_STOP) && !ret)
1352                 module_put(policy->governor->owner);
1353
1354         return ret;
1355 }
1356
1357
1358 int cpufreq_register_governor(struct cpufreq_governor *governor)
1359 {
1360         int err;
1361
1362         if (!governor)
1363                 return -EINVAL;
1364
1365         mutex_lock(&cpufreq_governor_mutex);
1366
1367         err = -EBUSY;
1368         if (__find_governor(governor->name) == NULL) {
1369                 err = 0;
1370                 list_add(&governor->governor_list, &cpufreq_governor_list);
1371         }
1372
1373         mutex_unlock(&cpufreq_governor_mutex);
1374         return err;
1375 }
1376 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1377
1378
1379 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1380 {
1381         if (!governor)
1382                 return;
1383
1384         mutex_lock(&cpufreq_governor_mutex);
1385         list_del(&governor->governor_list);
1386         mutex_unlock(&cpufreq_governor_mutex);
1387         return;
1388 }
1389 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1390
1391
1392
1393 /*********************************************************************
1394  *                          POLICY INTERFACE                         *
1395  *********************************************************************/
1396
1397 /**
1398  * cpufreq_get_policy - get the current cpufreq_policy
1399  * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
1400  *
1401  * Reads the current cpufreq policy.
1402  */
1403 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1404 {
1405         struct cpufreq_policy *cpu_policy;
1406         if (!policy)
1407                 return -EINVAL;
1408
1409         cpu_policy = cpufreq_cpu_get(cpu);
1410         if (!cpu_policy)
1411                 return -EINVAL;
1412
1413         mutex_lock(&cpu_policy->lock);
1414         memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1415         mutex_unlock(&cpu_policy->lock);
1416
1417         cpufreq_cpu_put(cpu_policy);
1418         return 0;
1419 }
1420 EXPORT_SYMBOL(cpufreq_get_policy);
1421
1422
1423 /*
1424  * data   : current policy.
1425  * policy : policy to be set.
1426  * Locking: Must be called with the lock_cpu_hotplug() lock held
1427  */
1428 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1429                                 struct cpufreq_policy *policy)
1430 {
1431         int ret = 0;
1432
1433         cpufreq_debug_disable_ratelimit();
1434         dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1435                 policy->min, policy->max);
1436
1437         memcpy(&policy->cpuinfo, &data->cpuinfo,
1438                                 sizeof(struct cpufreq_cpuinfo));
1439
1440         if (policy->min > data->min && policy->min > policy->max) {
1441                 ret = -EINVAL;
1442                 goto error_out;
1443         }
1444
1445         /* verify the cpu speed can be set within this limit */
1446         ret = cpufreq_driver->verify(policy);
1447         if (ret)
1448                 goto error_out;
1449
1450         /* adjust if necessary - all reasons */
1451         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1452                         CPUFREQ_ADJUST, policy);
1453
1454         /* adjust if necessary - hardware incompatibility*/
1455         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1456                         CPUFREQ_INCOMPATIBLE, policy);
1457
1458         /* verify the cpu speed can be set within this limit,
1459            which might be different to the first one */
1460         ret = cpufreq_driver->verify(policy);
1461         if (ret)
1462                 goto error_out;
1463
1464         /* notification of the new policy */
1465         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1466                         CPUFREQ_NOTIFY, policy);
1467
1468         data->min = policy->min;
1469         data->max = policy->max;
1470
1471         dprintk("new min and max freqs are %u - %u kHz\n",
1472                                         data->min, data->max);
1473
1474         if (cpufreq_driver->setpolicy) {
1475                 data->policy = policy->policy;
1476                 dprintk("setting range\n");
1477                 ret = cpufreq_driver->setpolicy(policy);
1478         } else {
1479                 if (policy->governor != data->governor) {
1480                         /* save old, working values */
1481                         struct cpufreq_governor *old_gov = data->governor;
1482
1483                         dprintk("governor switch\n");
1484
1485                         /* end old governor */
1486                         if (data->governor)
1487                                 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1488
1489                         /* start new governor */
1490                         data->governor = policy->governor;
1491                         if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1492                                 /* new governor failed, so re-start old one */
1493                                 dprintk("starting governor %s failed\n",
1494                                                         data->governor->name);
1495                                 if (old_gov) {
1496                                         data->governor = old_gov;
1497                                         __cpufreq_governor(data,
1498                                                            CPUFREQ_GOV_START);
1499                                 }
1500                                 ret = -EINVAL;
1501                                 goto error_out;
1502                         }
1503                         /* might be a policy change, too, so fall through */
1504                 }
1505                 dprintk("governor: change or update limits\n");
1506                 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1507         }
1508
1509 error_out:
1510         cpufreq_debug_enable_ratelimit();
1511         return ret;
1512 }
1513
1514 /**
1515  *      cpufreq_set_policy - set a new CPUFreq policy
1516  *      @policy: policy to be set.
1517  *
1518  *      Sets a new CPU frequency and voltage scaling policy.
1519  */
1520 int cpufreq_set_policy(struct cpufreq_policy *policy)
1521 {
1522         int ret = 0;
1523         struct cpufreq_policy *data;
1524
1525         if (!policy)
1526                 return -EINVAL;
1527
1528         data = cpufreq_cpu_get(policy->cpu);
1529         if (!data)
1530                 return -EINVAL;
1531
1532         lock_cpu_hotplug();
1533
1534         /* lock this CPU */
1535         mutex_lock(&data->lock);
1536
1537         ret = __cpufreq_set_policy(data, policy);
1538         data->user_policy.min = data->min;
1539         data->user_policy.max = data->max;
1540         data->user_policy.policy = data->policy;
1541         data->user_policy.governor = data->governor;
1542
1543         mutex_unlock(&data->lock);
1544
1545         unlock_cpu_hotplug();
1546         cpufreq_cpu_put(data);
1547
1548         return ret;
1549 }
1550 EXPORT_SYMBOL(cpufreq_set_policy);
1551
1552
1553 /**
1554  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1555  *      @cpu: CPU which shall be re-evaluated
1556  *
1557  *      Usefull for policy notifiers which have different necessities
1558  *      at different times.
1559  */
1560 int cpufreq_update_policy(unsigned int cpu)
1561 {
1562         struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1563         struct cpufreq_policy policy;
1564         int ret = 0;
1565
1566         if (!data)
1567                 return -ENODEV;
1568
1569         lock_cpu_hotplug();
1570         mutex_lock(&data->lock);
1571
1572         dprintk("updating policy for CPU %u\n", cpu);
1573         memcpy(&policy, data, sizeof(struct cpufreq_policy));
1574         policy.min = data->user_policy.min;
1575         policy.max = data->user_policy.max;
1576         policy.policy = data->user_policy.policy;
1577         policy.governor = data->user_policy.governor;
1578
1579         /* BIOS might change freq behind our back
1580           -> ask driver for current freq and notify governors about a change */
1581         if (cpufreq_driver->get) {
1582                 policy.cur = cpufreq_driver->get(cpu);
1583                 if (!data->cur) {
1584                         dprintk("Driver did not initialize current freq");
1585                         data->cur = policy.cur;
1586                 } else {
1587                         if (data->cur != policy.cur)
1588                                 cpufreq_out_of_sync(cpu, data->cur,
1589                                                                 policy.cur);
1590                 }
1591         }
1592
1593         ret = __cpufreq_set_policy(data, &policy);
1594
1595         mutex_unlock(&data->lock);
1596         unlock_cpu_hotplug();
1597         cpufreq_cpu_put(data);
1598         return ret;
1599 }
1600 EXPORT_SYMBOL(cpufreq_update_policy);
1601
1602 #ifdef CONFIG_HOTPLUG_CPU
1603 static int cpufreq_cpu_callback(struct notifier_block *nfb,
1604                                         unsigned long action, void *hcpu)
1605 {
1606         unsigned int cpu = (unsigned long)hcpu;
1607         struct cpufreq_policy *policy;
1608         struct sys_device *sys_dev;
1609
1610         sys_dev = get_cpu_sysdev(cpu);
1611
1612         if (sys_dev) {
1613                 switch (action) {
1614                 case CPU_ONLINE:
1615                         cpufreq_add_dev(sys_dev);
1616                         break;
1617                 case CPU_DOWN_PREPARE:
1618                         /*
1619                          * We attempt to put this cpu in lowest frequency
1620                          * possible before going down. This will permit
1621                          * hardware-managed P-State to switch other related
1622                          * threads to min or higher speeds if possible.
1623                          */
1624                         policy = cpufreq_cpu_data[cpu];
1625                         if (policy) {
1626                                 cpufreq_driver_target(policy, policy->min,
1627                                                 CPUFREQ_RELATION_H);
1628                         }
1629                         break;
1630                 case CPU_DEAD:
1631                         cpufreq_remove_dev(sys_dev);
1632                         break;
1633                 }
1634         }
1635         return NOTIFY_OK;
1636 }
1637
1638 static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
1639 {
1640     .notifier_call = cpufreq_cpu_callback,
1641 };
1642 #endif /* CONFIG_HOTPLUG_CPU */
1643
1644 /*********************************************************************
1645  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1646  *********************************************************************/
1647
1648 /**
1649  * cpufreq_register_driver - register a CPU Frequency driver
1650  * @driver_data: A struct cpufreq_driver containing the values#
1651  * submitted by the CPU Frequency driver.
1652  *
1653  *   Registers a CPU Frequency driver to this core code. This code
1654  * returns zero on success, -EBUSY when another driver got here first
1655  * (and isn't unregistered in the meantime).
1656  *
1657  */
1658 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1659 {
1660         unsigned long flags;
1661         int ret;
1662
1663         if (!driver_data || !driver_data->verify || !driver_data->init ||
1664             ((!driver_data->setpolicy) && (!driver_data->target)))
1665                 return -EINVAL;
1666
1667         dprintk("trying to register driver %s\n", driver_data->name);
1668
1669         if (driver_data->setpolicy)
1670                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1671
1672         spin_lock_irqsave(&cpufreq_driver_lock, flags);
1673         if (cpufreq_driver) {
1674                 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1675                 return -EBUSY;
1676         }
1677         cpufreq_driver = driver_data;
1678         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1679
1680         ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
1681
1682         if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1683                 int i;
1684                 ret = -ENODEV;
1685
1686                 /* check for at least one working CPU */
1687                 for (i=0; i<NR_CPUS; i++)
1688                         if (cpufreq_cpu_data[i])
1689                                 ret = 0;
1690
1691                 /* if all ->init() calls failed, unregister */
1692                 if (ret) {
1693                         dprintk("no CPU initialized for driver %s\n",
1694                                                         driver_data->name);
1695                         sysdev_driver_unregister(&cpu_sysdev_class,
1696                                                 &cpufreq_sysdev_driver);
1697
1698                         spin_lock_irqsave(&cpufreq_driver_lock, flags);
1699                         cpufreq_driver = NULL;
1700                         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1701                 }
1702         }
1703
1704         if (!ret) {
1705                 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1706                 dprintk("driver %s up and running\n", driver_data->name);
1707                 cpufreq_debug_enable_ratelimit();
1708         }
1709
1710         return (ret);
1711 }
1712 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1713
1714
1715 /**
1716  * cpufreq_unregister_driver - unregister the current CPUFreq driver
1717  *
1718  *    Unregister the current CPUFreq driver. Only call this if you have
1719  * the right to do so, i.e. if you have succeeded in initialising before!
1720  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1721  * currently not initialised.
1722  */
1723 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1724 {
1725         unsigned long flags;
1726
1727         cpufreq_debug_disable_ratelimit();
1728
1729         if (!cpufreq_driver || (driver != cpufreq_driver)) {
1730                 cpufreq_debug_enable_ratelimit();
1731                 return -EINVAL;
1732         }
1733
1734         dprintk("unregistering driver %s\n", driver->name);
1735
1736         sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1737         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1738
1739         spin_lock_irqsave(&cpufreq_driver_lock, flags);
1740         cpufreq_driver = NULL;
1741         spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1742
1743         return 0;
1744 }
1745 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);