2 * drivers/cpufreq/cpufreq_conservative.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/smp.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/ctype.h>
20 #include <linux/cpufreq.h>
21 #include <linux/sysctl.h>
22 #include <linux/types.h>
24 #include <linux/sysfs.h>
25 #include <linux/cpu.h>
26 #include <linux/kmod.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/percpu.h>
31 #include <linux/mutex.h>
33 * dbs is used in this file as a shortform for demandbased switching
34 * It helps to keep variable names smaller, simpler
37 #define DEF_FREQUENCY_UP_THRESHOLD (80)
38 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
41 * The polling frequency of this governor depends on the capability of
42 * the processor. Default polling frequency is 1000 times the transition
43 * latency of the processor. The governor will work on any processor with
44 * transition latency <= 10mS, using appropriate sampling
46 * For CPUs with transition latency > 10mS (mostly drivers
47 * with CPUFREQ_ETERNAL), this governor will not work.
48 * All times here are in uS.
50 static unsigned int def_sampling_rate;
51 #define MIN_SAMPLING_RATE_RATIO (2)
52 /* for correct statistics, we need at least 10 ticks between each measure */
53 #define MIN_STAT_SAMPLING_RATE \
54 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
55 #define MIN_SAMPLING_RATE \
56 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
57 /* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
58 * Define the minimal settable sampling rate to the greater of:
59 * - "HW transition latency" * 100 (same as default sampling / 10)
60 * - MIN_STAT_SAMPLING_RATE
61 * To avoid that userspace shoots itself.
63 static unsigned int minimum_sampling_rate(void)
65 return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
68 /* This will also vanish soon with removing sampling_rate_max */
69 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
70 #define LATENCY_MULTIPLIER (1000)
71 #define DEF_SAMPLING_DOWN_FACTOR (1)
72 #define MAX_SAMPLING_DOWN_FACTOR (10)
73 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
75 static void do_dbs_timer(struct work_struct *work);
77 struct cpu_dbs_info_s {
78 struct cpufreq_policy *cur_policy;
79 unsigned int prev_cpu_idle_up;
80 unsigned int prev_cpu_idle_down;
82 unsigned int down_skip;
83 unsigned int requested_freq;
85 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
87 static unsigned int dbs_enable; /* number of CPUs using this policy */
90 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
91 * lock and dbs_mutex. cpu_hotplug lock should always be held before
92 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
93 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
94 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
95 * is recursive for the same process. -Venki
97 static DEFINE_MUTEX(dbs_mutex);
98 static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
101 unsigned int sampling_rate;
102 unsigned int sampling_down_factor;
103 unsigned int up_threshold;
104 unsigned int down_threshold;
105 unsigned int ignore_nice;
106 unsigned int freq_step;
109 static struct dbs_tuners dbs_tuners_ins = {
110 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
111 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
112 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
117 static inline unsigned int get_cpu_idle_time(unsigned int cpu)
119 unsigned int add_nice = 0, ret;
121 if (dbs_tuners_ins.ignore_nice)
122 add_nice = kstat_cpu(cpu).cpustat.nice;
124 ret = kstat_cpu(cpu).cpustat.idle +
125 kstat_cpu(cpu).cpustat.iowait +
131 /* keep track of frequency transitions */
133 dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
136 struct cpufreq_freqs *freq = data;
137 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
140 struct cpufreq_policy *policy;
142 if (!this_dbs_info->enable)
145 policy = this_dbs_info->cur_policy;
148 * we only care if our internally tracked freq moves outside
149 * the 'valid' ranges of freqency available to us otherwise
150 * we do not change it
152 if (this_dbs_info->requested_freq > policy->max
153 || this_dbs_info->requested_freq < policy->min)
154 this_dbs_info->requested_freq = freq->new;
159 static struct notifier_block dbs_cpufreq_notifier_block = {
160 .notifier_call = dbs_cpufreq_notifier
163 /************************** sysfs interface ************************/
164 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
166 static int print_once;
169 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
170 "sysfs file is deprecated - used by: %s\n",
174 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
177 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
179 static int print_once;
182 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
183 "sysfs file is deprecated - used by: %s\n", current->comm);
186 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
189 #define define_one_ro(_name) \
190 static struct freq_attr _name = \
191 __ATTR(_name, 0444, show_##_name, NULL)
193 define_one_ro(sampling_rate_max);
194 define_one_ro(sampling_rate_min);
196 /* cpufreq_conservative Governor Tunables */
197 #define show_one(file_name, object) \
198 static ssize_t show_##file_name \
199 (struct cpufreq_policy *unused, char *buf) \
201 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
203 show_one(sampling_rate, sampling_rate);
204 show_one(sampling_down_factor, sampling_down_factor);
205 show_one(up_threshold, up_threshold);
206 show_one(down_threshold, down_threshold);
207 show_one(ignore_nice_load, ignore_nice);
208 show_one(freq_step, freq_step);
210 static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
211 const char *buf, size_t count)
215 ret = sscanf(buf, "%u", &input);
216 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
219 mutex_lock(&dbs_mutex);
220 dbs_tuners_ins.sampling_down_factor = input;
221 mutex_unlock(&dbs_mutex);
226 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
227 const char *buf, size_t count)
231 ret = sscanf(buf, "%u", &input);
233 mutex_lock(&dbs_mutex);
235 mutex_unlock(&dbs_mutex);
238 dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
239 mutex_unlock(&dbs_mutex);
244 static ssize_t store_up_threshold(struct cpufreq_policy *unused,
245 const char *buf, size_t count)
249 ret = sscanf(buf, "%u", &input);
251 mutex_lock(&dbs_mutex);
252 if (ret != 1 || input > 100 ||
253 input <= dbs_tuners_ins.down_threshold) {
254 mutex_unlock(&dbs_mutex);
258 dbs_tuners_ins.up_threshold = input;
259 mutex_unlock(&dbs_mutex);
264 static ssize_t store_down_threshold(struct cpufreq_policy *unused,
265 const char *buf, size_t count)
269 ret = sscanf(buf, "%u", &input);
271 mutex_lock(&dbs_mutex);
272 if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) {
273 mutex_unlock(&dbs_mutex);
277 dbs_tuners_ins.down_threshold = input;
278 mutex_unlock(&dbs_mutex);
283 static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
284 const char *buf, size_t count)
291 ret = sscanf(buf, "%u", &input);
298 mutex_lock(&dbs_mutex);
299 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
300 mutex_unlock(&dbs_mutex);
303 dbs_tuners_ins.ignore_nice = input;
305 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
306 for_each_online_cpu(j) {
307 struct cpu_dbs_info_s *j_dbs_info;
308 j_dbs_info = &per_cpu(cpu_dbs_info, j);
309 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
310 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
312 mutex_unlock(&dbs_mutex);
317 static ssize_t store_freq_step(struct cpufreq_policy *policy,
318 const char *buf, size_t count)
323 ret = sscanf(buf, "%u", &input);
331 /* no need to test here if freq_step is zero as the user might actually
332 * want this, they would be crazy though :) */
333 mutex_lock(&dbs_mutex);
334 dbs_tuners_ins.freq_step = input;
335 mutex_unlock(&dbs_mutex);
340 #define define_one_rw(_name) \
341 static struct freq_attr _name = \
342 __ATTR(_name, 0644, show_##_name, store_##_name)
344 define_one_rw(sampling_rate);
345 define_one_rw(sampling_down_factor);
346 define_one_rw(up_threshold);
347 define_one_rw(down_threshold);
348 define_one_rw(ignore_nice_load);
349 define_one_rw(freq_step);
351 static struct attribute *dbs_attributes[] = {
352 &sampling_rate_max.attr,
353 &sampling_rate_min.attr,
355 &sampling_down_factor.attr,
357 &down_threshold.attr,
358 &ignore_nice_load.attr,
363 static struct attribute_group dbs_attr_group = {
364 .attrs = dbs_attributes,
365 .name = "conservative",
368 /************************** sysfs end ************************/
370 static void dbs_check_cpu(int cpu)
372 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
373 unsigned int tmp_idle_ticks, total_idle_ticks;
374 unsigned int freq_target;
375 unsigned int freq_down_sampling_rate;
376 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
377 struct cpufreq_policy *policy;
379 if (!this_dbs_info->enable)
382 policy = this_dbs_info->cur_policy;
385 * The default safe range is 20% to 80%
386 * Every sampling_rate, we check
387 * - If current idle time is less than 20%, then we try to
389 * Every sampling_rate*sampling_down_factor, we check
390 * - If current idle time is more than 80%, then we try to
393 * Any frequency increase takes it to the maximum frequency.
394 * Frequency reduction happens at minimum steps of
395 * 5% (default) of max_frequency
398 /* Check for frequency increase */
399 idle_ticks = UINT_MAX;
401 /* Check for frequency increase */
402 total_idle_ticks = get_cpu_idle_time(cpu);
403 tmp_idle_ticks = total_idle_ticks -
404 this_dbs_info->prev_cpu_idle_up;
405 this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
407 if (tmp_idle_ticks < idle_ticks)
408 idle_ticks = tmp_idle_ticks;
410 /* Scale idle ticks by 100 and compare with up and down ticks */
412 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
413 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
415 if (idle_ticks < up_idle_ticks) {
416 this_dbs_info->down_skip = 0;
417 this_dbs_info->prev_cpu_idle_down =
418 this_dbs_info->prev_cpu_idle_up;
420 /* if we are already at full speed then break out early */
421 if (this_dbs_info->requested_freq == policy->max)
424 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
426 /* max freq cannot be less than 100. But who knows.... */
427 if (unlikely(freq_target == 0))
430 this_dbs_info->requested_freq += freq_target;
431 if (this_dbs_info->requested_freq > policy->max)
432 this_dbs_info->requested_freq = policy->max;
434 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
439 /* Check for frequency decrease */
440 this_dbs_info->down_skip++;
441 if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor)
444 /* Check for frequency decrease */
445 total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
446 tmp_idle_ticks = total_idle_ticks -
447 this_dbs_info->prev_cpu_idle_down;
448 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
450 if (tmp_idle_ticks < idle_ticks)
451 idle_ticks = tmp_idle_ticks;
453 /* Scale idle ticks by 100 and compare with up and down ticks */
455 this_dbs_info->down_skip = 0;
457 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
458 dbs_tuners_ins.sampling_down_factor;
459 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
460 usecs_to_jiffies(freq_down_sampling_rate);
462 if (idle_ticks > down_idle_ticks) {
464 * if we are already at the lowest speed then break out early
465 * or if we 'cannot' reduce the speed as the user might want
466 * freq_target to be zero
468 if (this_dbs_info->requested_freq == policy->min
469 || dbs_tuners_ins.freq_step == 0)
472 freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
474 /* max freq cannot be less than 100. But who knows.... */
475 if (unlikely(freq_target == 0))
478 this_dbs_info->requested_freq -= freq_target;
479 if (this_dbs_info->requested_freq < policy->min)
480 this_dbs_info->requested_freq = policy->min;
482 __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
488 static void do_dbs_timer(struct work_struct *work)
491 mutex_lock(&dbs_mutex);
492 for_each_online_cpu(i)
494 schedule_delayed_work(&dbs_work,
495 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
496 mutex_unlock(&dbs_mutex);
499 static inline void dbs_timer_init(void)
501 init_timer_deferrable(&dbs_work.timer);
502 schedule_delayed_work(&dbs_work,
503 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
507 static inline void dbs_timer_exit(void)
509 cancel_delayed_work(&dbs_work);
513 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
516 unsigned int cpu = policy->cpu;
517 struct cpu_dbs_info_s *this_dbs_info;
521 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
524 case CPUFREQ_GOV_START:
525 if ((!cpu_online(cpu)) || (!policy->cur))
528 if (this_dbs_info->enable) /* Already enabled */
531 mutex_lock(&dbs_mutex);
533 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
535 mutex_unlock(&dbs_mutex);
539 for_each_cpu(j, policy->cpus) {
540 struct cpu_dbs_info_s *j_dbs_info;
541 j_dbs_info = &per_cpu(cpu_dbs_info, j);
542 j_dbs_info->cur_policy = policy;
544 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu);
545 j_dbs_info->prev_cpu_idle_down
546 = j_dbs_info->prev_cpu_idle_up;
548 this_dbs_info->enable = 1;
549 this_dbs_info->down_skip = 0;
550 this_dbs_info->requested_freq = policy->cur;
554 * Start the timerschedule work, when this governor
555 * is used for first time
557 if (dbs_enable == 1) {
558 unsigned int latency;
559 /* policy latency is in nS. Convert it to uS first */
560 latency = policy->cpuinfo.transition_latency / 1000;
565 max(10 * latency * LATENCY_MULTIPLIER,
566 MIN_STAT_SAMPLING_RATE);
568 dbs_tuners_ins.sampling_rate = def_sampling_rate;
571 cpufreq_register_notifier(
572 &dbs_cpufreq_notifier_block,
573 CPUFREQ_TRANSITION_NOTIFIER);
576 mutex_unlock(&dbs_mutex);
579 case CPUFREQ_GOV_STOP:
580 mutex_lock(&dbs_mutex);
581 this_dbs_info->enable = 0;
582 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
585 * Stop the timerschedule work, when this governor
586 * is used for first time
588 if (dbs_enable == 0) {
590 cpufreq_unregister_notifier(
591 &dbs_cpufreq_notifier_block,
592 CPUFREQ_TRANSITION_NOTIFIER);
595 mutex_unlock(&dbs_mutex);
599 case CPUFREQ_GOV_LIMITS:
600 mutex_lock(&dbs_mutex);
601 if (policy->max < this_dbs_info->cur_policy->cur)
602 __cpufreq_driver_target(
603 this_dbs_info->cur_policy,
604 policy->max, CPUFREQ_RELATION_H);
605 else if (policy->min > this_dbs_info->cur_policy->cur)
606 __cpufreq_driver_target(
607 this_dbs_info->cur_policy,
608 policy->min, CPUFREQ_RELATION_L);
609 mutex_unlock(&dbs_mutex);
615 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
618 struct cpufreq_governor cpufreq_gov_conservative = {
619 .name = "conservative",
620 .governor = cpufreq_governor_dbs,
621 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
622 .owner = THIS_MODULE,
625 static int __init cpufreq_gov_dbs_init(void)
627 return cpufreq_register_governor(&cpufreq_gov_conservative);
630 static void __exit cpufreq_gov_dbs_exit(void)
632 /* Make sure that the scheduled work is indeed not running */
633 flush_scheduled_work();
635 cpufreq_unregister_governor(&cpufreq_gov_conservative);
639 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
640 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
641 "Low Latency Frequency Transition capable processors "
642 "optimised for use in a battery environment");
643 MODULE_LICENSE("GPL");
645 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
646 fs_initcall(cpufreq_gov_dbs_init);
648 module_init(cpufreq_gov_dbs_init);
650 module_exit(cpufreq_gov_dbs_exit);