splice: fix deadlock in splicing to file
[safe/jmp/linux-2.6] / drivers / cpufreq / cpufreq_ondemand.c
index 18b016e..338f428 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/smp.h>
 #include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ctype.h>
 #include <linux/cpufreq.h>
-#include <linux/sysctl.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/sysfs.h>
 #include <linux/cpu.h>
-#include <linux/sched.h>
-#include <linux/kmod.h>
-#include <linux/workqueue.h>
 #include <linux/jiffies.h>
 #include <linux/kernel_stat.h>
-#include <linux/percpu.h>
 #include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
 
 /*
  * dbs is used in this file as a shortform for demandbased switching
  * It helps to keep variable names smaller, simpler
  */
 
+#define DEF_FREQUENCY_DOWN_DIFFERENTIAL                (10)
 #define DEF_FREQUENCY_UP_THRESHOLD             (80)
+#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL      (3)
+#define MICRO_FREQUENCY_UP_THRESHOLD           (95)
 #define MIN_FREQUENCY_UP_THRESHOLD             (11)
 #define MAX_FREQUENCY_UP_THRESHOLD             (100)
 
 static unsigned int def_sampling_rate;
 #define MIN_SAMPLING_RATE_RATIO                        (2)
 /* for correct statistics, we need at least 10 ticks between each measure */
-#define MIN_STAT_SAMPLING_RATE                 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
-#define MIN_SAMPLING_RATE                      (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
+#define MIN_STAT_SAMPLING_RATE                         \
+                       (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
+#define MIN_SAMPLING_RATE                      \
+                       (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
+/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
+ * Define the minimal settable sampling rate to the greater of:
+ *   - "HW transition latency" * 100 (same as default sampling / 10)
+ *   - MIN_STAT_SAMPLING_RATE
+ * To avoid that userspace shoots itself.
+*/
+static unsigned int minimum_sampling_rate(void)
+{
+       return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
+}
+
+/* This will also vanish soon with removing sampling_rate_max */
 #define MAX_SAMPLING_RATE                      (500 * def_sampling_rate)
-#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER   (1000)
-#define TRANSITION_LATENCY_LIMIT               (10 * 1000)
+#define LATENCY_MULTIPLIER                     (1000)
+#define TRANSITION_LATENCY_LIMIT               (10 * 1000 * 1000)
+
+static void do_dbs_timer(struct work_struct *work);
 
-static void do_dbs_timer(void *data);
+/* Sampling types */
+enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
 
 struct cpu_dbs_info_s {
        cputime64_t prev_cpu_idle;
        cputime64_t prev_cpu_wall;
+       cputime64_t prev_cpu_nice;
        struct cpufreq_policy *cur_policy;
-       struct work_struct work;
-       unsigned int enable;
+       struct delayed_work work;
+       struct cpufreq_frequency_table *freq_table;
+       unsigned int freq_lo;
+       unsigned int freq_lo_jiffies;
+       unsigned int freq_hi_jiffies;
+       int cpu;
+       unsigned int enable:1,
+               sample_type:1;
 };
 static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
 
@@ -79,44 +99,145 @@ static unsigned int dbs_enable;    /* number of CPUs using this policy */
  * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
  * is recursive for the same process. -Venki
  */
-static DEFINE_MUTEX (dbs_mutex);
-static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
+static DEFINE_MUTEX(dbs_mutex);
 
 static struct workqueue_struct *kondemand_wq;
 
-struct dbs_tuners {
+static struct dbs_tuners {
        unsigned int sampling_rate;
        unsigned int up_threshold;
+       unsigned int down_differential;
        unsigned int ignore_nice;
-};
-
-static struct dbs_tuners dbs_tuners_ins = {
+       unsigned int powersave_bias;
+} dbs_tuners_ins = {
        .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
+       .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
        .ignore_nice = 0,
+       .powersave_bias = 0,
 };
 
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
+static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
+                                                       cputime64_t *wall)
 {
-       cputime64_t retval;
+       cputime64_t idle_time;
+       cputime64_t cur_wall_time;
+       cputime64_t busy_time;
+
+       cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+       busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
+                       kstat_cpu(cpu).cpustat.system);
+
+       busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
+       busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
+       busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
+       busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+
+       idle_time = cputime64_sub(cur_wall_time, busy_time);
+       if (wall)
+               *wall = cur_wall_time;
 
-       retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
-                       kstat_cpu(cpu).cpustat.iowait);
+       return idle_time;
+}
+
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+{
+       u64 idle_time = get_cpu_idle_time_us(cpu, wall);
+
+       if (idle_time == -1ULL)
+               return get_cpu_idle_time_jiffy(cpu, wall);
+
+       return idle_time;
+}
+
+/*
+ * Find right freq to be set now with powersave_bias on.
+ * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
+ * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
+ */
+static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
+                                         unsigned int freq_next,
+                                         unsigned int relation)
+{
+       unsigned int freq_req, freq_reduc, freq_avg;
+       unsigned int freq_hi, freq_lo;
+       unsigned int index = 0;
+       unsigned int jiffies_total, jiffies_hi, jiffies_lo;
+       struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
+
+       if (!dbs_info->freq_table) {
+               dbs_info->freq_lo = 0;
+               dbs_info->freq_lo_jiffies = 0;
+               return freq_next;
+       }
 
-       if (dbs_tuners_ins.ignore_nice)
-               retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+       cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
+                       relation, &index);
+       freq_req = dbs_info->freq_table[index].frequency;
+       freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
+       freq_avg = freq_req - freq_reduc;
+
+       /* Find freq bounds for freq_avg in freq_table */
+       index = 0;
+       cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
+                       CPUFREQ_RELATION_H, &index);
+       freq_lo = dbs_info->freq_table[index].frequency;
+       index = 0;
+       cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
+                       CPUFREQ_RELATION_L, &index);
+       freq_hi = dbs_info->freq_table[index].frequency;
+
+       /* Find out how long we have to be in hi and lo freqs */
+       if (freq_hi == freq_lo) {
+               dbs_info->freq_lo = 0;
+               dbs_info->freq_lo_jiffies = 0;
+               return freq_lo;
+       }
+       jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+       jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
+       jiffies_hi += ((freq_hi - freq_lo) / 2);
+       jiffies_hi /= (freq_hi - freq_lo);
+       jiffies_lo = jiffies_total - jiffies_hi;
+       dbs_info->freq_lo = freq_lo;
+       dbs_info->freq_lo_jiffies = jiffies_lo;
+       dbs_info->freq_hi_jiffies = jiffies_hi;
+       return freq_hi;
+}
 
-       return retval;
+static void ondemand_powersave_bias_init(void)
+{
+       int i;
+       for_each_online_cpu(i) {
+               struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
+               dbs_info->freq_table = cpufreq_frequency_get_table(i);
+               dbs_info->freq_lo = 0;
+       }
 }
 
 /************************** sysfs interface ************************/
 static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
 {
-       return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
+       static int print_once;
+
+       if (!print_once) {
+               printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
+                      "sysfs file is deprecated - used by: %s\n",
+                      current->comm);
+               print_once = 1;
+       }
+       return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
 }
 
 static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
 {
-       return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
+       static int print_once;
+
+       if (!print_once) {
+               printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
+                      "sysfs file is deprecated - used by: %s\n",
+                      current->comm);
+               print_once = 1;
+       }
+       return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
 }
 
 #define define_one_ro(_name)           \
@@ -136,21 +257,21 @@ static ssize_t show_##file_name                                           \
 show_one(sampling_rate, sampling_rate);
 show_one(up_threshold, up_threshold);
 show_one(ignore_nice_load, ignore_nice);
+show_one(powersave_bias, powersave_bias);
 
 static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
                const char *buf, size_t count)
 {
        unsigned int input;
        int ret;
-       ret = sscanf (buf, "%u", &input);
+       ret = sscanf(buf, "%u", &input);
 
        mutex_lock(&dbs_mutex);
-       if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
+       if (ret != 1) {
                mutex_unlock(&dbs_mutex);
                return -EINVAL;
        }
-
-       dbs_tuners_ins.sampling_rate = input;
+       dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
        mutex_unlock(&dbs_mutex);
 
        return count;
@@ -161,7 +282,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
 {
        unsigned int input;
        int ret;
-       ret = sscanf (buf, "%u", &input);
+       ret = sscanf(buf, "%u", &input);
 
        mutex_lock(&dbs_mutex);
        if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
@@ -184,15 +305,15 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
 
        unsigned int j;
 
-       ret = sscanf (buf, "%u", &input);
-       if ( ret != 1 )
+       ret = sscanf(buf, "%u", &input);
+       if (ret != 1)
                return -EINVAL;
 
-       if ( input > 1 )
+       if (input > 1)
                input = 1;
 
        mutex_lock(&dbs_mutex);
-       if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
+       if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
                mutex_unlock(&dbs_mutex);
                return count;
        }
@@ -202,14 +323,38 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
        for_each_online_cpu(j) {
                struct cpu_dbs_info_s *dbs_info;
                dbs_info = &per_cpu(cpu_dbs_info, j);
-               dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
-               dbs_info->prev_cpu_wall = get_jiffies_64();
+               dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+                                               &dbs_info->prev_cpu_wall);
+               if (dbs_tuners_ins.ignore_nice)
+                       dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+
        }
        mutex_unlock(&dbs_mutex);
 
        return count;
 }
 
+static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
+               const char *buf, size_t count)
+{
+       unsigned int input;
+       int ret;
+       ret = sscanf(buf, "%u", &input);
+
+       if (ret != 1)
+               return -EINVAL;
+
+       if (input > 1000)
+               input = 1000;
+
+       mutex_lock(&dbs_mutex);
+       dbs_tuners_ins.powersave_bias = input;
+       ondemand_powersave_bias_init();
+       mutex_unlock(&dbs_mutex);
+
+       return count;
+}
+
 #define define_one_rw(_name) \
 static struct freq_attr _name = \
 __ATTR(_name, 0644, show_##_name, store_##_name)
@@ -217,13 +362,15 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
 define_one_rw(sampling_rate);
 define_one_rw(up_threshold);
 define_one_rw(ignore_nice_load);
+define_one_rw(powersave_bias);
 
-static struct attribute * dbs_attributes[] = {
+static struct attribute *dbs_attributes[] = {
        &sampling_rate_max.attr,
        &sampling_rate_min.attr,
        &sampling_rate.attr,
        &up_threshold.attr,
        &ignore_nice_load.attr,
+       &powersave_bias.attr,
        NULL
 };
 
@@ -236,9 +383,7 @@ static struct attribute_group dbs_attr_group = {
 
 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
 {
-       unsigned int idle_ticks, total_ticks;
-       unsigned int load;
-       cputime64_t cur_jiffies;
+       unsigned int max_load_freq;
 
        struct cpufreq_policy *policy;
        unsigned int j;
@@ -246,11 +391,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
        if (!this_dbs_info->enable)
                return;
 
+       this_dbs_info->freq_lo = 0;
        policy = this_dbs_info->cur_policy;
-       cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
-       total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
-                       this_dbs_info->prev_cpu_wall);
-       this_dbs_info->prev_cpu_wall = cur_jiffies;
+
        /*
         * Every sampling_rate, we check, if current idle time is less
         * than 20% (default), then we try to increase frequency
@@ -263,32 +406,74 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
         * 5% (default) of current frequency
         */
 
-       /* Get Idle Time */
-       idle_ticks = UINT_MAX;
-       for_each_cpu_mask(j, policy->cpus) {
-               cputime64_t total_idle_ticks;
-               unsigned int tmp_idle_ticks;
+       /* Get Absolute Load - in terms of freq */
+       max_load_freq = 0;
+
+       for_each_cpu(j, policy->cpus) {
                struct cpu_dbs_info_s *j_dbs_info;
+               cputime64_t cur_wall_time, cur_idle_time;
+               unsigned int idle_time, wall_time;
+               unsigned int load, load_freq;
+               int freq_avg;
 
                j_dbs_info = &per_cpu(cpu_dbs_info, j);
-               total_idle_ticks = get_cpu_idle_time(j);
-               tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
+
+               cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+
+               wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+                               j_dbs_info->prev_cpu_wall);
+               j_dbs_info->prev_cpu_wall = cur_wall_time;
+
+               idle_time = (unsigned int) cputime64_sub(cur_idle_time,
                                j_dbs_info->prev_cpu_idle);
-               j_dbs_info->prev_cpu_idle = total_idle_ticks;
+               j_dbs_info->prev_cpu_idle = cur_idle_time;
+
+               if (dbs_tuners_ins.ignore_nice) {
+                       cputime64_t cur_nice;
+                       unsigned long cur_nice_jiffies;
+
+                       cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
+                                        j_dbs_info->prev_cpu_nice);
+                       /*
+                        * Assumption: nice time between sampling periods will
+                        * be less than 2^32 jiffies for 32 bit sys
+                        */
+                       cur_nice_jiffies = (unsigned long)
+                                       cputime64_to_jiffies64(cur_nice);
+
+                       j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+                       idle_time += jiffies_to_usecs(cur_nice_jiffies);
+               }
+
+               if (unlikely(!wall_time || wall_time < idle_time))
+                       continue;
 
-               if (tmp_idle_ticks < idle_ticks)
-                       idle_ticks = tmp_idle_ticks;
+               load = 100 * (wall_time - idle_time) / wall_time;
+
+               freq_avg = __cpufreq_driver_getavg(policy, j);
+               if (freq_avg <= 0)
+                       freq_avg = policy->cur;
+
+               load_freq = load * freq_avg;
+               if (load_freq > max_load_freq)
+                       max_load_freq = load_freq;
        }
-       load = (100 * (total_ticks - idle_ticks)) / total_ticks;
 
        /* Check for frequency increase */
-       if (load > dbs_tuners_ins.up_threshold) {
+       if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
                /* if we are already at full speed then break out early */
-               if (policy->cur == policy->max)
-                       return;
-
-               __cpufreq_driver_target(policy, policy->max,
-                       CPUFREQ_RELATION_H);
+               if (!dbs_tuners_ins.powersave_bias) {
+                       if (policy->cur == policy->max)
+                               return;
+
+                       __cpufreq_driver_target(policy, policy->max,
+                               CPUFREQ_RELATION_H);
+               } else {
+                       int freq = powersave_bias_target(policy, policy->max,
+                                       CPUFREQ_RELATION_H);
+                       __cpufreq_driver_target(policy, freq,
+                               CPUFREQ_RELATION_L);
+               }
                return;
        }
 
@@ -302,40 +487,82 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
         * can support the current CPU usage without triggering the up
         * policy. To be safe, we focus 10 points under the threshold.
         */
-       if (load < (dbs_tuners_ins.up_threshold - 10)) {
+       if (max_load_freq <
+           (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
+            policy->cur) {
                unsigned int freq_next;
-               freq_next = (policy->cur * load) /
-                       (dbs_tuners_ins.up_threshold - 10);
-
-               __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
+               freq_next = max_load_freq /
+                               (dbs_tuners_ins.up_threshold -
+                                dbs_tuners_ins.down_differential);
+
+               if (!dbs_tuners_ins.powersave_bias) {
+                       __cpufreq_driver_target(policy, freq_next,
+                                       CPUFREQ_RELATION_L);
+               } else {
+                       int freq = powersave_bias_target(policy, freq_next,
+                                       CPUFREQ_RELATION_L);
+                       __cpufreq_driver_target(policy, freq,
+                               CPUFREQ_RELATION_L);
+               }
        }
 }
 
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 {
-       unsigned int cpu = smp_processor_id();
-       struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+       struct cpu_dbs_info_s *dbs_info =
+               container_of(work, struct cpu_dbs_info_s, work.work);
+       unsigned int cpu = dbs_info->cpu;
+       int sample_type = dbs_info->sample_type;
 
-       dbs_check_cpu(dbs_info);
-       queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
-                       usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
-}
+       /* We want all CPUs to do sampling nearly on same jiffy */
+       int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
 
-static inline void dbs_timer_init(unsigned int cpu)
-{
-       struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+       delay -= jiffies % delay;
+
+       if (lock_policy_rwsem_write(cpu) < 0)
+               return;
 
-       INIT_WORK(&dbs_info->work, do_dbs_timer, 0);
-       queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
-                       usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
-       return;
+       if (!dbs_info->enable) {
+               unlock_policy_rwsem_write(cpu);
+               return;
+       }
+
+       /* Common NORMAL_SAMPLE setup */
+       dbs_info->sample_type = DBS_NORMAL_SAMPLE;
+       if (!dbs_tuners_ins.powersave_bias ||
+           sample_type == DBS_NORMAL_SAMPLE) {
+               dbs_check_cpu(dbs_info);
+               if (dbs_info->freq_lo) {
+                       /* Setup timer for SUB_SAMPLE */
+                       dbs_info->sample_type = DBS_SUB_SAMPLE;
+                       delay = dbs_info->freq_hi_jiffies;
+               }
+       } else {
+               __cpufreq_driver_target(dbs_info->cur_policy,
+                       dbs_info->freq_lo, CPUFREQ_RELATION_H);
+       }
+       queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
+       unlock_policy_rwsem_write(cpu);
 }
 
-static inline void dbs_timer_exit(unsigned int cpu)
+static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
 {
-       struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+       /* We want all CPUs to do sampling nearly on same jiffy */
+       int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+       delay -= jiffies % delay;
+
+       dbs_info->enable = 1;
+       ondemand_powersave_bias_init();
+       dbs_info->sample_type = DBS_NORMAL_SAMPLE;
+       INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+       queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
+               delay);
+}
 
-       cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work);
+static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
+{
+       dbs_info->enable = 0;
+       cancel_delayed_work(&dbs_info->work);
 }
 
 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -344,45 +571,41 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
        unsigned int cpu = policy->cpu;
        struct cpu_dbs_info_s *this_dbs_info;
        unsigned int j;
+       int rc;
 
        this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
 
        switch (event) {
        case CPUFREQ_GOV_START:
-               if ((!cpu_online(cpu)) ||
-                   (!policy->cur))
+               if ((!cpu_online(cpu)) || (!policy->cur))
                        return -EINVAL;
 
-               if (policy->cpuinfo.transition_latency >
-                               (TRANSITION_LATENCY_LIMIT * 1000)) {
-                       printk(KERN_WARNING "ondemand governor failed to load "
-                              "due to too long transition latency\n");
-                       return -EINVAL;
-               }
                if (this_dbs_info->enable) /* Already enabled */
                        break;
 
                mutex_lock(&dbs_mutex);
                dbs_enable++;
-               if (dbs_enable == 1) {
-                       kondemand_wq = create_workqueue("kondemand");
-                       if (!kondemand_wq) {
-                               printk(KERN_ERR "Creation of kondemand failed\n");
-                               dbs_enable--;
-                               mutex_unlock(&dbs_mutex);
-                               return -ENOSPC;
-                       }
+
+               rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
+               if (rc) {
+                       dbs_enable--;
+                       mutex_unlock(&dbs_mutex);
+                       return rc;
                }
-               for_each_cpu_mask(j, policy->cpus) {
+
+               for_each_cpu(j, policy->cpus) {
                        struct cpu_dbs_info_s *j_dbs_info;
                        j_dbs_info = &per_cpu(cpu_dbs_info, j);
                        j_dbs_info->cur_policy = policy;
 
-                       j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
-                       j_dbs_info->prev_cpu_wall = get_jiffies_64();
+                       j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+                                               &j_dbs_info->prev_cpu_wall);
+                       if (dbs_tuners_ins.ignore_nice) {
+                               j_dbs_info->prev_cpu_nice =
+                                               kstat_cpu(j).cpustat.nice;
+                       }
                }
-               this_dbs_info->enable = 1;
-               sysfs_create_group(&policy->kobj, &dbs_attr_group);
+               this_dbs_info->cpu = cpu;
                /*
                 * Start the timerschedule work, when this governor
                 * is used for first time
@@ -394,71 +617,94 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
                        if (latency == 0)
                                latency = 1;
 
-                       def_sampling_rate = latency *
-                                       DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
-
-                       if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
-                               def_sampling_rate = MIN_STAT_SAMPLING_RATE;
+                       def_sampling_rate =
+                               max(latency * LATENCY_MULTIPLIER,
+                                   MIN_STAT_SAMPLING_RATE);
 
                        dbs_tuners_ins.sampling_rate = def_sampling_rate;
                }
-               dbs_timer_init(policy->cpu);
+               dbs_timer_init(this_dbs_info);
 
                mutex_unlock(&dbs_mutex);
                break;
 
        case CPUFREQ_GOV_STOP:
                mutex_lock(&dbs_mutex);
-               dbs_timer_exit(policy->cpu);
-               this_dbs_info->enable = 0;
+               dbs_timer_exit(this_dbs_info);
                sysfs_remove_group(&policy->kobj, &dbs_attr_group);
                dbs_enable--;
-               if (dbs_enable == 0)
-                       destroy_workqueue(kondemand_wq);
-
                mutex_unlock(&dbs_mutex);
 
                break;
 
        case CPUFREQ_GOV_LIMITS:
-               lock_cpu_hotplug();
                mutex_lock(&dbs_mutex);
                if (policy->max < this_dbs_info->cur_policy->cur)
-                       __cpufreq_driver_target(
-                                       this_dbs_info->cur_policy,
-                                       policy->max, CPUFREQ_RELATION_H);
+                       __cpufreq_driver_target(this_dbs_info->cur_policy,
+                               policy->max, CPUFREQ_RELATION_H);
                else if (policy->min > this_dbs_info->cur_policy->cur)
-                       __cpufreq_driver_target(
-                                       this_dbs_info->cur_policy,
-                                       policy->min, CPUFREQ_RELATION_L);
+                       __cpufreq_driver_target(this_dbs_info->cur_policy,
+                               policy->min, CPUFREQ_RELATION_L);
                mutex_unlock(&dbs_mutex);
-               unlock_cpu_hotplug();
                break;
        }
        return 0;
 }
 
-static struct cpufreq_governor cpufreq_gov_dbs = {
-       .name           = "ondemand",
-       .governor       = cpufreq_governor_dbs,
-       .owner          = THIS_MODULE,
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+static
+#endif
+struct cpufreq_governor cpufreq_gov_ondemand = {
+       .name                   = "ondemand",
+       .governor               = cpufreq_governor_dbs,
+       .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+       .owner                  = THIS_MODULE,
 };
 
 static int __init cpufreq_gov_dbs_init(void)
 {
-       return cpufreq_register_governor(&cpufreq_gov_dbs);
+       int err;
+       cputime64_t wall;
+       u64 idle_time;
+       int cpu = get_cpu();
+
+       idle_time = get_cpu_idle_time_us(cpu, &wall);
+       put_cpu();
+       if (idle_time != -1ULL) {
+               /* Idle micro accounting is supported. Use finer thresholds */
+               dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+               dbs_tuners_ins.down_differential =
+                                       MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+       }
+
+       kondemand_wq = create_workqueue("kondemand");
+       if (!kondemand_wq) {
+               printk(KERN_ERR "Creation of kondemand failed\n");
+               return -EFAULT;
+       }
+       err = cpufreq_register_governor(&cpufreq_gov_ondemand);
+       if (err)
+               destroy_workqueue(kondemand_wq);
+
+       return err;
 }
 
 static void __exit cpufreq_gov_dbs_exit(void)
 {
-       cpufreq_unregister_governor(&cpufreq_gov_dbs);
+       cpufreq_unregister_governor(&cpufreq_gov_ondemand);
+       destroy_workqueue(kondemand_wq);
 }
 
 
-MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
-MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
-               "Low Latency Frequency Transition capable processors");
-MODULE_LICENSE ("GPL");
+MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
+MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
+MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
+       "Low Latency Frequency Transition capable processors");
+MODULE_LICENSE("GPL");
 
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+fs_initcall(cpufreq_gov_dbs_init);
+#else
 module_init(cpufreq_gov_dbs_init);
+#endif
 module_exit(cpufreq_gov_dbs_exit);