X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fcpufreq%2Fcpufreq_conservative.c;h=526bfbf696112a89784910718ddf3275ce025fef;hb=51ff9ef13366b3752a8ab6229c466fd1bd671d3a;hp=7498f2506adeaa2cffe379509786a24fd8049714;hpb=08a28e2e98aa821cf6f15f8a267beb2f33377bb9;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 7498f25..526bfbf 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -4,7 +4,7 @@ * Copyright (C) 2001 Russell King * (C) 2003 Venkatesh Pallipadi . * Jun Nakajima - * (C) 2004 Alexander Clouter + * (C) 2009 Alexander Clouter * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -13,22 +13,17 @@ #include #include -#include #include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include +#include #include #include -#include #include +#include +#include +#include +#include + /* * dbs is used in this file as a shortform for demandbased switching * It helps to keep variable names smaller, simpler @@ -37,88 +32,159 @@ #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_DOWN_THRESHOLD (20) -/* - * The polling frequency of this governor depends on the capability of +/* + * The polling frequency of this governor depends on the capability of * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling * rate. * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) * this governor will not work. * All times here are in uS. */ -static unsigned int def_sampling_rate; #define MIN_SAMPLING_RATE_RATIO (2) -/* for correct statistics, we need at least 10 ticks between each measure */ -#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) -#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) -#define MAX_SAMPLING_RATE (500 * def_sampling_rate) -#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) -#define TRANSITION_LATENCY_LIMIT (10 * 1000) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) -static void do_dbs_timer(void *data); +static void do_dbs_timer(struct work_struct *work); struct cpu_dbs_info_s { - struct cpufreq_policy *cur_policy; - unsigned int prev_cpu_idle_up; - unsigned int prev_cpu_idle_down; - unsigned int enable; + cputime64_t prev_cpu_idle; + cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + unsigned int down_skip; + unsigned int requested_freq; + int cpu; + unsigned int enable:1; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; }; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ -static DEFINE_MUTEX (dbs_mutex); -static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); +/* + * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on + * different CPUs. It protects dbs_enable in governor start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); -struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; - unsigned int freq_step; -}; +static struct workqueue_struct *kconservative_wq; -static struct dbs_tuners dbs_tuners_ins = { - .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, - .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, - .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int ignore_nice; + unsigned int freq_step; +} dbs_tuners_ins = { + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .ignore_nice = 0, + .freq_step = 5, }; -static inline unsigned int get_cpu_idle_time(unsigned int cpu) +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) { - return kstat_cpu(cpu).cpustat.idle + - kstat_cpu(cpu).cpustat.iowait + - ( dbs_tuners_ins.ignore_nice ? - kstat_cpu(cpu).cpustat.nice : - 0); + cputime64_t idle_time; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); + + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time);; } -/************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) { - return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + return idle_time; } -static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) +/* keep track of frequency transitions */ +static int +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) { - return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); + struct cpufreq_freqs *freq = data; + struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, + freq->cpu); + + struct cpufreq_policy *policy; + + if (!this_dbs_info->enable) + return 0; + + policy = this_dbs_info->cur_policy; + + /* + * we only care if our internally tracked freq moves outside + * the 'valid' ranges of freqency available to us otherwise + * we do not change it + */ + if (this_dbs_info->requested_freq > policy->max + || this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = freq->new; + + return 0; } -#define define_one_ro(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0444, show_##_name, NULL) +static struct notifier_block dbs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier +}; -define_one_ro(sampling_rate_max); -define_one_ro(sampling_rate_min); +/************************** sysfs interface ************************/ +static ssize_t show_sampling_rate_max(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " + "sysfs file is deprecated - used by: %s\n", current->comm); + return sprintf(buf, "%u\n", -1U); +} + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_max); +define_one_global_ro(sampling_rate_min); /* cpufreq_conservative Governor Tunables */ #define show_one(file_name, object) \ static ssize_t show_##file_name \ -(struct cpufreq_policy *unused, char *buf) \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ } @@ -129,12 +195,41 @@ show_one(down_threshold, down_threshold); show_one(ignore_nice_load, ignore_nice); show_one(freq_step, freq_step); -static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, - const char *buf, size_t count) +/*** delete after deprecation time ***/ +#define DEPRECATION_MSG(file_name) \ + printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ + "interface is deprecated - " #file_name "\n"); + +#define show_one_old(file_name) \ +static ssize_t show_##file_name##_old \ +(struct cpufreq_policy *unused, char *buf) \ +{ \ + printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ + "interface is deprecated - " #file_name "\n"); \ + return show_##file_name(NULL, NULL, buf); \ +} +show_one_old(sampling_rate); +show_one_old(sampling_down_factor); +show_one_old(up_threshold); +show_one_old(down_threshold); +show_one_old(ignore_nice_load); +show_one_old(freq_step); +show_one_old(sampling_rate_min); +show_one_old(sampling_rate_max); + +cpufreq_freq_attr_ro_old(sampling_rate_min); +cpufreq_freq_attr_ro_old(sampling_rate_max); + +/*** delete after deprecation time ***/ + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, + const char *buf, size_t count) { unsigned int input; int ret; - ret = sscanf (buf, "%u", &input); + ret = sscanf(buf, "%u", &input); + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; @@ -145,34 +240,32 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, return count; } -static ssize_t store_sampling_rate(struct cpufreq_policy *unused, - const char *buf, size_t count) +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) { unsigned int input; int ret; - ret = sscanf (buf, "%u", &input); + ret = sscanf(buf, "%u", &input); - mutex_lock(&dbs_mutex); - if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { - mutex_unlock(&dbs_mutex); + if (ret != 1) return -EINVAL; - } - dbs_tuners_ins.sampling_rate = input; + mutex_lock(&dbs_mutex); + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); mutex_unlock(&dbs_mutex); return count; } -static ssize_t store_up_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) { unsigned int input; int ret; - ret = sscanf (buf, "%u", &input); + ret = sscanf(buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input < 0 || + if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; @@ -184,15 +277,16 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, return count; } -static ssize_t store_down_threshold(struct cpufreq_policy *unused, - const char *buf, size_t count) +static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) { unsigned int input; int ret; - ret = sscanf (buf, "%u", &input); + ret = sscanf(buf, "%u", &input); mutex_lock(&dbs_mutex); - if (ret != 1 || input > 100 || input < 0 || + /* cannot be lower than 11 otherwise freq will not fall */ + if (ret != 1 || input < 11 || input > 100 || input >= dbs_tuners_ins.up_threshold) { mutex_unlock(&dbs_mutex); return -EINVAL; @@ -204,54 +298,55 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused, return count; } -static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, - const char *buf, size_t count) +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) { unsigned int input; int ret; unsigned int j; - - ret = sscanf (buf, "%u", &input); - if ( ret != 1 ) + + ret = sscanf(buf, "%u", &input); + if (ret != 1) return -EINVAL; - if ( input > 1 ) + if (input > 1) input = 1; - + mutex_lock(&dbs_mutex); - if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ mutex_unlock(&dbs_mutex); return count; } dbs_tuners_ins.ignore_nice = input; - /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ + /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); - j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(cs_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } mutex_unlock(&dbs_mutex); return count; } -static ssize_t store_freq_step(struct cpufreq_policy *policy, - const char *buf, size_t count) +static ssize_t store_freq_step(struct kobject *a, struct attribute *b, + const char *buf, size_t count) { unsigned int input; int ret; + ret = sscanf(buf, "%u", &input); - ret = sscanf (buf, "%u", &input); - - if ( ret != 1 ) + if (ret != 1) return -EINVAL; - if ( input > 100 ) + if (input > 100) input = 100; - + /* no need to test here if freq_step is zero as the user might actually * want this, they would be crazy though :) */ mutex_lock(&dbs_mutex); @@ -261,18 +356,14 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy, return count; } -#define define_one_rw(_name) \ -static struct freq_attr _name = \ -__ATTR(_name, 0644, show_##_name, store_##_name) - -define_one_rw(sampling_rate); -define_one_rw(sampling_down_factor); -define_one_rw(up_threshold); -define_one_rw(down_threshold); -define_one_rw(ignore_nice_load); -define_one_rw(freq_step); +define_one_global_rw(sampling_rate); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(up_threshold); +define_one_global_rw(down_threshold); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(freq_step); -static struct attribute * dbs_attributes[] = { +static struct attribute *dbs_attributes[] = { &sampling_rate_max.attr, &sampling_rate_min.attr, &sampling_rate.attr, @@ -289,172 +380,206 @@ static struct attribute_group dbs_attr_group = { .name = "conservative", }; +/*** delete after deprecation time ***/ + +#define write_one_old(file_name) \ +static ssize_t store_##file_name##_old \ +(struct cpufreq_policy *unused, const char *buf, size_t count) \ +{ \ + printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ + "interface is deprecated - " #file_name "\n"); \ + return store_##file_name(NULL, NULL, buf, count); \ +} +write_one_old(sampling_rate); +write_one_old(sampling_down_factor); +write_one_old(up_threshold); +write_one_old(down_threshold); +write_one_old(ignore_nice_load); +write_one_old(freq_step); + +cpufreq_freq_attr_rw_old(sampling_rate); +cpufreq_freq_attr_rw_old(sampling_down_factor); +cpufreq_freq_attr_rw_old(up_threshold); +cpufreq_freq_attr_rw_old(down_threshold); +cpufreq_freq_attr_rw_old(ignore_nice_load); +cpufreq_freq_attr_rw_old(freq_step); + +static struct attribute *dbs_attributes_old[] = { + &sampling_rate_max_old.attr, + &sampling_rate_min_old.attr, + &sampling_rate_old.attr, + &sampling_down_factor_old.attr, + &up_threshold_old.attr, + &down_threshold_old.attr, + &ignore_nice_load_old.attr, + &freq_step_old.attr, + NULL +}; + +static struct attribute_group dbs_attr_group_old = { + .attrs = dbs_attributes_old, + .name = "conservative", +}; + +/*** delete after deprecation time ***/ + /************************** sysfs end ************************/ -static void dbs_check_cpu(int cpu) +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) { - unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; - unsigned int tmp_idle_ticks, total_idle_ticks; - unsigned int freq_step; - unsigned int freq_down_sampling_rate; - static unsigned short down_skip[NR_CPUS]; - static unsigned int requested_freq[NR_CPUS]; - static unsigned int init_flag = NR_CPUS; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + unsigned int load = 0; + unsigned int max_load = 0; + unsigned int freq_target; + struct cpufreq_policy *policy; + unsigned int j; - if (!this_dbs_info->enable) - return; + policy = this_dbs_info->cur_policy; - if ( init_flag != 0 ) { - for_each_cpu(init_flag) { - down_skip[init_flag] = 0; - /* I doubt a CPU exists with a freq of 0hz :) */ - requested_freq[init_flag] = 0; - } - init_flag = 0; - } - /* - * If its a freshly initialised cpu we setup requested_freq. This - * check could be avoided if we did not care about a first time - * stunted increase in CPU speed when there is a load. I feel we - * should be initialising this to something. The removal of a CPU - * is not a problem, after a short time the CPU should settle down - * to a 'natural' frequency. + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate*sampling_down_factor, we check, if current + * idle time is more than 80%, then we try to decrease frequency + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of maximum frequency */ - if (requested_freq[cpu] == 0) - requested_freq[cpu] = this_dbs_info->cur_policy->cur; - policy = this_dbs_info->cur_policy; + /* Get Absolute Load */ + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time; + unsigned int idle_time, wall_time; - /* - * The default safe range is 20% to 80% - * Every sampling_rate, we check - * - If current idle time is less than 20%, then we try to - * increase frequency - * Every sampling_rate*sampling_down_factor, we check - * - If current idle time is more than 80%, then we try to - * decrease frequency - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of max_frequency - */ + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - /* Check for frequency increase */ - idle_ticks = UINT_MAX; + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - /* Check for frequency increase */ - total_idle_ticks = get_cpu_idle_time(cpu); - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_up; - this_dbs_info->prev_cpu_idle_up = total_idle_ticks; + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, + j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, + j_dbs_info->prev_cpu_nice); + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; + load = 100 * (wall_time - idle_time) / wall_time; - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * - usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + if (load > max_load) + max_load = load; + } - if (idle_ticks < up_idle_ticks) { - down_skip[cpu] = 0; - this_dbs_info->prev_cpu_idle_down = - this_dbs_info->prev_cpu_idle_up; + /* + * break out if we 'cannot' reduce the speed as the user might + * want freq_step to be zero + */ + if (dbs_tuners_ins.freq_step == 0) + return; + + /* Check for frequency increase */ + if (max_load > dbs_tuners_ins.up_threshold) { + this_dbs_info->down_skip = 0; /* if we are already at full speed then break out early */ - if (requested_freq[cpu] == policy->max) + if (this_dbs_info->requested_freq == policy->max) return; - - freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; + + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_step == 0)) - freq_step = 5; - - requested_freq[cpu] += freq_step; - if (requested_freq[cpu] > policy->max) - requested_freq[cpu] = policy->max; - - __cpufreq_driver_target(policy, requested_freq[cpu], + if (unlikely(freq_target == 0)) + freq_target = 5; + + this_dbs_info->requested_freq += freq_target; + if (this_dbs_info->requested_freq > policy->max) + this_dbs_info->requested_freq = policy->max; + + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } - /* Check for frequency decrease */ - down_skip[cpu]++; - if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) - return; - - /* Check for frequency decrease */ - total_idle_ticks = this_dbs_info->prev_cpu_idle_up; - tmp_idle_ticks = total_idle_ticks - - this_dbs_info->prev_cpu_idle_down; - this_dbs_info->prev_cpu_idle_down = total_idle_ticks; - - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; - - /* Scale idle ticks by 100 and compare with up and down ticks */ - idle_ticks *= 100; - down_skip[cpu] = 0; + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load < (dbs_tuners_ins.down_threshold - 10)) { + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * - dbs_tuners_ins.sampling_down_factor; - down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * - usecs_to_jiffies(freq_down_sampling_rate); + this_dbs_info->requested_freq -= freq_target; + if (this_dbs_info->requested_freq < policy->min) + this_dbs_info->requested_freq = policy->min; - if (idle_ticks > down_idle_ticks) { /* - * if we are already at the lowest speed then break out early - * or if we 'cannot' reduce the speed as the user might want - * freq_step to be zero + * if we cannot reduce the frequency anymore, break out early */ - if (requested_freq[cpu] == policy->min - || dbs_tuners_ins.freq_step == 0) + if (policy->cur == policy->min) return; - freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_step == 0)) - freq_step = 5; - - requested_freq[cpu] -= freq_step; - if (requested_freq[cpu] < policy->min) - requested_freq[cpu] = policy->min; - - __cpufreq_driver_target(policy, requested_freq[cpu], + __cpufreq_driver_target(policy, this_dbs_info->requested_freq, CPUFREQ_RELATION_H); return; } } -static void do_dbs_timer(void *data) -{ - int i; - mutex_lock(&dbs_mutex); - for_each_online_cpu(i) - dbs_check_cpu(i); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - mutex_unlock(&dbs_mutex); -} +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + delay -= jiffies % delay; + + mutex_lock(&dbs_info->timer_mutex); + + dbs_check_cpu(dbs_info); -static inline void dbs_timer_init(void) + queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) { - INIT_WORK(&dbs_work, do_dbs_timer, NULL); - schedule_delayed_work(&dbs_work, - usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - return; + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + delay -= jiffies % delay; + + dbs_info->enable = 1; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, + delay); } -static inline void dbs_timer_exit(void) +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) { - cancel_delayed_work(&dbs_work); - return; + dbs_info->enable = 0; + cancel_delayed_work_sync(&dbs_info->work); } static int cpufreq_governor_dbs(struct cpufreq_policy *policy, @@ -463,33 +588,39 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int cpu = policy->cpu; struct cpu_dbs_info_s *this_dbs_info; unsigned int j; + int rc; - this_dbs_info = &per_cpu(cpu_dbs_info, cpu); + this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); switch (event) { case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || - (!policy->cur)) + if ((!cpu_online(cpu)) || (!policy->cur)) return -EINVAL; - if (policy->cpuinfo.transition_latency > - (TRANSITION_LATENCY_LIMIT * 1000)) - return -EINVAL; - if (this_dbs_info->enable) /* Already enabled */ - break; - mutex_lock(&dbs_mutex); - for_each_cpu_mask(j, policy->cpus) { + + rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cpu_dbs_info, j); + j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); - j_dbs_info->prev_cpu_idle_down - = j_dbs_info->prev_cpu_idle_up; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); + if (dbs_tuners_ins.ignore_nice) { + j_dbs_info->prev_cpu_nice = + kstat_cpu(j).cpustat.nice; + } } - this_dbs_info->enable = 1; - sysfs_create_group(&policy->kobj, &dbs_attr_group); + this_dbs_info->down_skip = 0; + this_dbs_info->requested_freq = policy->cur; + + mutex_init(&this_dbs_info->timer_mutex); dbs_enable++; /* * Start the timerschedule work, when this governor @@ -502,79 +633,120 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (latency == 0) latency = 1; - def_sampling_rate = 10 * latency * - DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; - - if (def_sampling_rate < MIN_STAT_SAMPLING_RATE) - def_sampling_rate = MIN_STAT_SAMPLING_RATE; - - dbs_tuners_ins.sampling_rate = def_sampling_rate; - dbs_tuners_ins.ignore_nice = 0; - dbs_tuners_ins.freq_step = 5; - - dbs_timer_init(); + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + mutex_unlock(&dbs_mutex); + return rc; + } + + /* + * conservative does not implement micro like ondemand + * governor, thus we are bound to jiffes/HZ + */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + dbs_tuners_ins.sampling_rate = + max(min_sampling_rate, + latency * LATENCY_MULTIPLIER); + + cpufreq_register_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); } - mutex_unlock(&dbs_mutex); + + dbs_timer_init(this_dbs_info); + break; case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + mutex_lock(&dbs_mutex); - this_dbs_info->enable = 0; - sysfs_remove_group(&policy->kobj, &dbs_attr_group); + sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); dbs_enable--; + mutex_destroy(&this_dbs_info->timer_mutex); + /* * Stop the timerschedule work, when this governor * is used for first time */ - if (dbs_enable == 0) - dbs_timer_exit(); - + if (dbs_enable == 0) + cpufreq_unregister_notifier( + &dbs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + mutex_unlock(&dbs_mutex); + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); break; case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_mutex); + mutex_lock(&this_dbs_info->timer_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); + policy->max, CPUFREQ_RELATION_H); else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - mutex_unlock(&dbs_mutex); + policy->min, CPUFREQ_RELATION_L); + mutex_unlock(&this_dbs_info->timer_mutex); + break; } return 0; } -static struct cpufreq_governor cpufreq_gov_dbs = { - .name = "conservative", - .governor = cpufreq_governor_dbs, - .owner = THIS_MODULE, +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE +static +#endif +struct cpufreq_governor cpufreq_gov_conservative = { + .name = "conservative", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, }; static int __init cpufreq_gov_dbs_init(void) { - return cpufreq_register_governor(&cpufreq_gov_dbs); + int err; + + kconservative_wq = create_workqueue("kconservative"); + if (!kconservative_wq) { + printk(KERN_ERR "Creation of kconservative failed\n"); + return -EFAULT; + } + + err = cpufreq_register_governor(&cpufreq_gov_conservative); + if (err) + destroy_workqueue(kconservative_wq); + + return err; } static void __exit cpufreq_gov_dbs_exit(void) { - /* Make sure that the scheduled work is indeed not running */ - flush_scheduled_work(); - - cpufreq_unregister_governor(&cpufreq_gov_dbs); + cpufreq_unregister_governor(&cpufreq_gov_conservative); + destroy_workqueue(kconservative_wq); } -MODULE_AUTHOR ("Alexander Clouter "); -MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for " +MODULE_AUTHOR("Alexander Clouter "); +MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " "Low Latency Frequency Transition capable processors " "optimised for use in a battery environment"); -MODULE_LICENSE ("GPL"); +MODULE_LICENSE("GPL"); +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE +fs_initcall(cpufreq_gov_dbs_init); +#else module_init(cpufreq_gov_dbs_init); +#endif module_exit(cpufreq_gov_dbs_exit);