* also protects the cpufreq_cpu_data array.
*/
static struct cpufreq_driver *cpufreq_driver;
-static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
+static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
+#ifdef CONFIG_HOTPLUG_CPU
+/* This one keeps track of the previously set governor of a removed CPU */
+static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
+#endif
static DEFINE_SPINLOCK(cpufreq_driver_lock);
+/*
+ * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
+ * all cpufreq/hotplug/workqueue/etc related lock issues.
+ *
+ * The rules for this semaphore:
+ * - Any routine that wants to read from the policy structure will
+ * do a down_read on this semaphore.
+ * - Any routine that will write to the policy structure and/or may take away
+ * the policy altogether (eg. CPU hotplug), will hold this lock in write
+ * mode before doing so.
+ *
+ * Additional rules:
+ * - All holders of the lock should check to make sure that the CPU they
+ * are concerned with are online after they get the lock.
+ * - Governor routines that can be called in cpufreq hotplug path should not
+ * take this sem as top level hotplug notifier handler takes this.
+ */
+static DEFINE_PER_CPU(int, policy_cpu);
+static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
+
+#define lock_policy_rwsem(mode, cpu) \
+int lock_policy_rwsem_##mode \
+(int cpu) \
+{ \
+ int policy_cpu = per_cpu(policy_cpu, cpu); \
+ BUG_ON(policy_cpu == -1); \
+ down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
+ if (unlikely(!cpu_online(cpu))) { \
+ up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
+ return -1; \
+ } \
+ \
+ return 0; \
+}
+
+lock_policy_rwsem(read, cpu);
+EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
+
+lock_policy_rwsem(write, cpu);
+EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
+
+void unlock_policy_rwsem_read(int cpu)
+{
+ int policy_cpu = per_cpu(policy_cpu, cpu);
+ BUG_ON(policy_cpu == -1);
+ up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
+}
+EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
+
+void unlock_policy_rwsem_write(int cpu)
+{
+ int policy_cpu = per_cpu(policy_cpu, cpu);
+ BUG_ON(policy_cpu == -1);
+ up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
+}
+EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
+
+
/* internal prototypes */
-static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
+static int __cpufreq_governor(struct cpufreq_policy *policy,
+ unsigned int event);
+static unsigned int __cpufreq_get(unsigned int cpu);
static void handle_update(struct work_struct *work);
/**
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
static struct srcu_notifier_head cpufreq_transition_notifier_list;
+static bool init_cpufreq_transition_notifier_list_called;
static int __init init_cpufreq_transition_notifier_list(void)
{
srcu_init_notifier_head(&cpufreq_transition_notifier_list);
+ init_cpufreq_transition_notifier_list_called = true;
return 0;
}
pure_initcall(init_cpufreq_transition_notifier_list);
static LIST_HEAD(cpufreq_governor_list);
-static DEFINE_MUTEX (cpufreq_governor_mutex);
+static DEFINE_MUTEX(cpufreq_governor_mutex);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *data;
unsigned long flags;
- if (cpu >= NR_CPUS)
+ if (cpu >= nr_cpu_ids)
goto err_out;
/* get the cpufreq driver */
/* get the CPU */
- data = cpufreq_cpu_data[cpu];
+ data = per_cpu(cpufreq_cpu_data, cpu);
if (!data)
goto err_out_put_module;
}
void cpufreq_debug_printk(unsigned int type, const char *prefix,
- const char *fmt, ...)
+ const char *fmt, ...)
{
char s[256];
va_list args;
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
- dprintk("saving %lu as reference value for loops_per_jiffy;"
+ dprintk("saving %lu as reference value for loops_per_jiffy; "
"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
}
if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
- dprintk("scaling loops_per_jiffy to %lu"
+ dprintk("scaling loops_per_jiffy to %lu "
"for frequency %u kHz\n", loops_per_jiffy, ci->new);
}
}
dprintk("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
- policy = cpufreq_cpu_data[freqs->cpu];
+ policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
switch (state) {
case CPUFREQ_PRECHANGE:
struct cpufreq_governor *t;
list_for_each_entry(t, &cpufreq_governor_list, governor_list)
- if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
+ if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
return t;
return NULL;
/**
* cpufreq_parse_governor - parse a governor string
*/
-static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
+static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
struct cpufreq_governor **governor)
{
int err = -EINVAL;
int ret;
mutex_unlock(&cpufreq_governor_mutex);
- ret = request_module(name);
+ ret = request_module("%s", name);
mutex_lock(&cpufreq_governor_mutex);
if (ret == 0)
mutex_unlock(&cpufreq_governor_mutex);
}
- out:
+out:
return err;
}
-/* drivers/base/cpu.c */
-extern struct sysdev_class cpu_sysdev_class;
-
-
/**
* cpufreq_per_cpu_attr_read() / show_##file_name() -
* print out cpufreq information
#define show_one(file_name, object) \
static ssize_t show_##file_name \
-(struct cpufreq_policy * policy, char *buf) \
+(struct cpufreq_policy *policy, char *buf) \
{ \
- return sprintf (buf, "%u\n", policy->object); \
+ return sprintf(buf, "%u\n", policy->object); \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
*/
#define store_one(file_name, object) \
static ssize_t store_##file_name \
-(struct cpufreq_policy * policy, const char *buf, size_t count) \
+(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
unsigned int ret = -EINVAL; \
struct cpufreq_policy new_policy; \
if (ret) \
return -EINVAL; \
\
- ret = sscanf (buf, "%u", &new_policy.object); \
+ ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \
return -EINVAL; \
\
- mutex_lock(&policy->lock); \
ret = __cpufreq_set_policy(policy, &new_policy); \
policy->user_policy.object = policy->object; \
- mutex_unlock(&policy->lock); \
\
return ret ? ret : count; \
}
-store_one(scaling_min_freq,min);
-store_one(scaling_max_freq,max);
+store_one(scaling_min_freq, min);
+store_one(scaling_max_freq, max);
/**
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
*/
-static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
- char *buf)
+static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
+ char *buf)
{
- unsigned int cur_freq = cpufreq_get(policy->cpu);
+ unsigned int cur_freq = __cpufreq_get(policy->cpu);
if (!cur_freq)
return sprintf(buf, "<unknown>");
return sprintf(buf, "%u\n", cur_freq);
/**
* show_scaling_governor - show the current policy for the specified CPU
*/
-static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
- char *buf)
+static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
- if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
+ if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
return sprintf(buf, "powersave\n");
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
return sprintf(buf, "performance\n");
else if (policy->governor)
- return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
+ return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
+ policy->governor->name);
return -EINVAL;
}
/**
* store_scaling_governor - store policy for the specified CPU
*/
-static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
- const char *buf, size_t count)
+static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
{
unsigned int ret = -EINVAL;
char str_governor[16];
if (ret)
return ret;
- ret = sscanf (buf, "%15s", str_governor);
+ ret = sscanf(buf, "%15s", str_governor);
if (ret != 1)
return -EINVAL;
/* Do not use cpufreq_set_policy here or the user_policy.max
will be wrongly overridden */
- mutex_lock(&policy->lock);
ret = __cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
- mutex_unlock(&policy->lock);
if (ret)
return ret;
/**
* show_scaling_driver - show the cpufreq driver currently loaded
*/
-static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
+static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
{
return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
}
/**
* show_scaling_available_governors - show the available CPUfreq governors
*/
-static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
- char *buf)
+static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
+ char *buf)
{
ssize_t i = 0;
struct cpufreq_governor *t;
}
list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
- if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
+ if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
+ - (CPUFREQ_NAME_LEN + 2)))
goto out;
i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
}
i += sprintf(&buf[i], "\n");
return i;
}
-/**
- * show_affected_cpus - show the CPUs affected by each transition
- */
-static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
+
+static ssize_t show_cpus(const struct cpumask *mask, char *buf)
{
ssize_t i = 0;
unsigned int cpu;
- for_each_cpu_mask(cpu, policy->cpus) {
+ for_each_cpu(cpu, mask) {
if (i)
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
if (i >= (PAGE_SIZE - 5))
- break;
+ break;
}
i += sprintf(&buf[i], "\n");
return i;
}
+/**
+ * show_related_cpus - show the CPUs affected by each transition even if
+ * hw coordination is in use
+ */
+static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ if (cpumask_empty(policy->related_cpus))
+ return show_cpus(policy->cpus, buf);
+ return show_cpus(policy->related_cpus, buf);
+}
+
+/**
+ * show_affected_cpus - show the CPUs affected by each transition
+ */
+static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ return show_cpus(policy->cpus, buf);
+}
+
+static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int freq = 0;
+ unsigned int ret;
+
+ if (!policy->governor || !policy->governor->store_setspeed)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%u", &freq);
+ if (ret != 1)
+ return -EINVAL;
+
+ policy->governor->store_setspeed(policy, freq);
+
+ return count;
+}
+
+static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
+{
+ if (!policy->governor || !policy->governor->show_setspeed)
+ return sprintf(buf, "<unsupported>\n");
+
+ return policy->governor->show_setspeed(policy, buf);
+}
#define define_one_ro(_name) \
static struct freq_attr _name = \
define_one_ro0400(cpuinfo_cur_freq);
define_one_ro(cpuinfo_min_freq);
define_one_ro(cpuinfo_max_freq);
+define_one_ro(cpuinfo_transition_latency);
define_one_ro(scaling_available_governors);
define_one_ro(scaling_driver);
define_one_ro(scaling_cur_freq);
+define_one_ro(related_cpus);
define_one_ro(affected_cpus);
define_one_rw(scaling_min_freq);
define_one_rw(scaling_max_freq);
define_one_rw(scaling_governor);
+define_one_rw(scaling_setspeed);
-static struct attribute * default_attrs[] = {
+static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
+ &cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
+ &related_cpus.attr,
&scaling_governor.attr,
&scaling_driver.attr,
&scaling_available_governors.attr,
+ &scaling_setspeed.attr,
NULL
};
-#define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
-#define to_attr(a) container_of(a,struct freq_attr,attr)
+#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
+#define to_attr(a) container_of(a, struct freq_attr, attr)
-static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
- struct cpufreq_policy * policy = to_policy(kobj);
- struct freq_attr * fattr = to_attr(attr);
- ssize_t ret;
+ struct cpufreq_policy *policy = to_policy(kobj);
+ struct freq_attr *fattr = to_attr(attr);
+ ssize_t ret = -EINVAL;
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
- return -EINVAL;
+ goto no_policy;
+
+ if (lock_policy_rwsem_read(policy->cpu) < 0)
+ goto fail;
+
if (fattr->show)
ret = fattr->show(policy, buf);
else
ret = -EIO;
+ unlock_policy_rwsem_read(policy->cpu);
+fail:
cpufreq_cpu_put(policy);
+no_policy:
return ret;
}
-static ssize_t store(struct kobject * kobj, struct attribute * attr,
- const char * buf, size_t count)
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
{
- struct cpufreq_policy * policy = to_policy(kobj);
- struct freq_attr * fattr = to_attr(attr);
- ssize_t ret;
+ struct cpufreq_policy *policy = to_policy(kobj);
+ struct freq_attr *fattr = to_attr(attr);
+ ssize_t ret = -EINVAL;
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
- return -EINVAL;
+ goto no_policy;
+
+ if (lock_policy_rwsem_write(policy->cpu) < 0)
+ goto fail;
+
if (fattr->store)
ret = fattr->store(policy, buf, count);
else
ret = -EIO;
+ unlock_policy_rwsem_write(policy->cpu);
+fail:
cpufreq_cpu_put(policy);
+no_policy:
return ret;
}
-static void cpufreq_sysfs_release(struct kobject * kobj)
+static void cpufreq_sysfs_release(struct kobject *kobj)
{
- struct cpufreq_policy * policy = to_policy(kobj);
+ struct cpufreq_policy *policy = to_policy(kobj);
dprintk("last reference is dropped\n");
complete(&policy->kobj_unregister);
}
*
* Adds the cpufreq interface for a CPU device.
*/
-static int cpufreq_add_dev (struct sys_device * sys_dev)
+static int cpufreq_add_dev(struct sys_device *sys_dev)
{
unsigned int cpu = sys_dev->id;
int ret = 0;
ret = -ENOMEM;
goto nomem_out;
}
+ if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
+ kfree(policy);
+ ret = -ENOMEM;
+ goto nomem_out;
+ }
+ if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
+ free_cpumask_var(policy->cpus);
+ kfree(policy);
+ ret = -ENOMEM;
+ goto nomem_out;
+ }
policy->cpu = cpu;
- policy->cpus = cpumask_of_cpu(cpu);
+ cpumask_copy(policy->cpus, cpumask_of(cpu));
+
+ /* Initially set CPU itself as the policy_cpu */
+ per_cpu(policy_cpu, cpu) = cpu;
+ lock_policy_rwsem_write(cpu);
- mutex_init(&policy->lock);
- mutex_lock(&policy->lock);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
+ /* Set governor before ->init, so that driver could check it */
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret = cpufreq_driver->init(policy);
if (ret) {
dprintk("initialization failed\n");
- mutex_unlock(&policy->lock);
goto err_out;
}
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_START, policy);
#ifdef CONFIG_SMP
- for_each_cpu_mask(j, policy->cpus) {
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (per_cpu(cpufreq_cpu_governor, cpu)) {
+ policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
+ dprintk("Restoring governor %s for cpu %d\n",
+ policy->governor->name, cpu);
+ }
+#endif
+
+ for_each_cpu(j, policy->cpus) {
if (cpu == j)
continue;
- /* check for existing affected CPUs. They may not be aware
- * of it due to CPU Hotplug.
+ /* Check for existing affected CPUs.
+ * They may not be aware of it due to CPU Hotplug.
*/
- managed_policy = cpufreq_cpu_get(j);
+ managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */
if (unlikely(managed_policy)) {
+
+ /* Set proper policy_cpu */
+ unlock_policy_rwsem_write(cpu);
+ per_cpu(policy_cpu, cpu) = managed_policy->cpu;
+
+ if (lock_policy_rwsem_write(cpu) < 0)
+ goto err_out_driver_exit;
+
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- managed_policy->cpus = policy->cpus;
- cpufreq_cpu_data[cpu] = managed_policy;
+ cpumask_copy(managed_policy->cpus, policy->cpus);
+ per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
dprintk("CPU already managed, adding link\n");
ret = sysfs_create_link(&sys_dev->kobj,
&managed_policy->kobj,
"cpufreq");
- if (ret) {
- mutex_unlock(&policy->lock);
+ if (ret)
goto err_out_driver_exit;
- }
cpufreq_debug_enable_ratelimit();
- mutex_unlock(&policy->lock);
ret = 0;
goto err_out_driver_exit; /* call driver->exit() */
}
memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
/* prepare interface data */
- policy->kobj.parent = &sys_dev->kobj;
- policy->kobj.ktype = &ktype_cpufreq;
- strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
-
- ret = kobject_register(&policy->kobj);
- if (ret) {
- mutex_unlock(&policy->lock);
+ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
+ "cpufreq");
+ if (ret)
goto err_out_driver_exit;
- }
+
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while ((drv_attr) && (*drv_attr)) {
- sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
+ ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
+ if (ret)
+ goto err_out_driver_exit;
drv_attr++;
}
- if (cpufreq_driver->get)
- sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
- if (cpufreq_driver->target)
- sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+ if (cpufreq_driver->get) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
+ if (ret)
+ goto err_out_driver_exit;
+ }
+ if (cpufreq_driver->target) {
+ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+ if (ret)
+ goto err_out_driver_exit;
+ }
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu_mask(j, policy->cpus)
- cpufreq_cpu_data[j] = policy;
+ for_each_cpu(j, policy->cpus) {
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ per_cpu(policy_cpu, j) = policy->cpu;
+ }
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
/* symlink affected CPUs */
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu(j, policy->cpus) {
if (j == cpu)
continue;
if (!cpu_online(j))
cpu_sys_dev = get_cpu_sysdev(j);
ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
"cpufreq");
- if (ret) {
- mutex_unlock(&policy->lock);
+ if (ret)
goto err_out_unregister;
- }
}
policy->governor = NULL; /* to assure that the starting sequence is
* run in cpufreq_set_policy */
- mutex_unlock(&policy->lock);
/* set default policy */
- ret = cpufreq_set_policy(&new_policy);
+ ret = __cpufreq_set_policy(policy, &new_policy);
+ policy->user_policy.policy = policy->policy;
+ policy->user_policy.governor = policy->governor;
+
if (ret) {
dprintk("setting policy failed\n");
goto err_out_unregister;
}
+ unlock_policy_rwsem_write(cpu);
+
+ kobject_uevent(&policy->kobj, KOBJ_ADD);
module_put(cpufreq_driver->owner);
dprintk("initialization complete\n");
cpufreq_debug_enable_ratelimit();
err_out_unregister:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu_mask(j, policy->cpus)
- cpufreq_cpu_data[j] = NULL;
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_cpu_data, j) = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobject_unregister(&policy->kobj);
+ kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
err_out_driver_exit:
cpufreq_driver->exit(policy);
err_out:
+ unlock_policy_rwsem_write(cpu);
kfree(policy);
nomem_out:
/**
- * cpufreq_remove_dev - remove a CPU device
+ * __cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
+ * Caller should already have policy_rwsem in write mode for this CPU.
+ * This routine frees the rwsem before returning.
*/
-static int cpufreq_remove_dev (struct sys_device * sys_dev)
+static int __cpufreq_remove_dev(struct sys_device *sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long flags;
dprintk("unregistering CPU %u\n", cpu);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- data = cpufreq_cpu_data[cpu];
+ data = per_cpu(cpufreq_cpu_data, cpu);
if (!data) {
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpufreq_debug_enable_ratelimit();
+ unlock_policy_rwsem_write(cpu);
return -EINVAL;
}
- cpufreq_cpu_data[cpu] = NULL;
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
#ifdef CONFIG_SMP
*/
if (unlikely(cpu != data->cpu)) {
dprintk("removing link\n");
- cpu_clear(cpu, data->cpus);
+ cpumask_clear_cpu(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
sysfs_remove_link(&sys_dev->kobj, "cpufreq");
cpufreq_cpu_put(data);
cpufreq_debug_enable_ratelimit();
+ unlock_policy_rwsem_write(cpu);
return 0;
}
#endif
+#ifdef CONFIG_SMP
- if (!kobject_get(&data->kobj)) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- cpufreq_debug_enable_ratelimit();
- return -EFAULT;
- }
+#ifdef CONFIG_HOTPLUG_CPU
+ per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
+#endif
-#ifdef CONFIG_SMP
/* if we have other CPUs still registered, we need to unlink them,
* or else wait_for_completion below will lock up. Clean the
- * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
- * links afterwards.
+ * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
+ * the sysfs links afterwards.
*/
- if (unlikely(cpus_weight(data->cpus) > 1)) {
- for_each_cpu_mask(j, data->cpus) {
+ if (unlikely(cpumask_weight(data->cpus) > 1)) {
+ for_each_cpu(j, data->cpus) {
if (j == cpu)
continue;
- cpufreq_cpu_data[j] = NULL;
+ per_cpu(cpufreq_cpu_data, j) = NULL;
}
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (unlikely(cpus_weight(data->cpus) > 1)) {
- for_each_cpu_mask(j, data->cpus) {
+ if (unlikely(cpumask_weight(data->cpus) > 1)) {
+ for_each_cpu(j, data->cpus) {
if (j == cpu)
continue;
dprintk("removing link for cpu %u\n", j);
+#ifdef CONFIG_HOTPLUG_CPU
+ per_cpu(cpufreq_cpu_governor, j) = data->governor;
+#endif
cpu_sys_dev = get_cpu_sysdev(j);
sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
cpufreq_cpu_put(data);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
- mutex_lock(&data->lock);
+ unlock_policy_rwsem_write(cpu);
+
if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
- mutex_unlock(&data->lock);
-
- kobject_unregister(&data->kobj);
kobject_put(&data->kobj);
if (cpufreq_driver->exit)
cpufreq_driver->exit(data);
+ free_cpumask_var(data->related_cpus);
+ free_cpumask_var(data->cpus);
kfree(data);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
cpufreq_debug_enable_ratelimit();
return 0;
}
+static int cpufreq_remove_dev(struct sys_device *sys_dev)
+{
+ unsigned int cpu = sys_dev->id;
+ int retval;
+
+ if (cpu_is_offline(cpu))
+ return 0;
+
+ if (unlikely(lock_policy_rwsem_write(cpu)))
+ BUG();
+
+ retval = __cpufreq_remove_dev(sys_dev);
+ return retval;
+}
+
+
static void handle_update(struct work_struct *work)
{
struct cpufreq_policy *policy =
* @old_freq: CPU frequency the kernel thinks the CPU runs at
* @new_freq: CPU frequency the CPU actually runs at
*
- * We adjust to current frequency first, and need to clean up later. So either call
- * to cpufreq_update_policy() or schedule handle_update()).
+ * We adjust to current frequency first, and need to clean up later.
+ * So either call to cpufreq_update_policy() or schedule handle_update()).
*/
static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
unsigned int new_freq)
unsigned int ret_freq = 0;
if (policy) {
- mutex_lock(&policy->lock);
ret_freq = policy->cur;
- mutex_unlock(&policy->lock);
cpufreq_cpu_put(policy);
}
- return (ret_freq);
+ return ret_freq;
}
EXPORT_SYMBOL(cpufreq_quick_get);
-/**
- * cpufreq_get - get the current CPU frequency (in kHz)
- * @cpu: CPU number
- *
- * Get the CPU current (static) CPU frequency
- */
-unsigned int cpufreq_get(unsigned int cpu)
+static unsigned int __cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
unsigned int ret_freq = 0;
- if (!policy)
- return 0;
-
if (!cpufreq_driver->get)
- goto out;
-
- mutex_lock(&policy->lock);
+ return ret_freq;
ret_freq = cpufreq_driver->get(cpu);
}
}
- mutex_unlock(&policy->lock);
+ return ret_freq;
+}
-out:
- cpufreq_cpu_put(policy);
+/**
+ * cpufreq_get - get the current CPU frequency (in kHz)
+ * @cpu: CPU number
+ *
+ * Get the CPU current (static) CPU frequency
+ */
+unsigned int cpufreq_get(unsigned int cpu)
+{
+ unsigned int ret_freq = 0;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- return (ret_freq);
+ if (!policy)
+ goto out;
+
+ if (unlikely(lock_policy_rwsem_read(cpu)))
+ goto out_policy;
+
+ ret_freq = __cpufreq_get(cpu);
+
+ unlock_policy_rwsem_read(cpu);
+
+out_policy:
+ cpufreq_cpu_put(policy);
+out:
+ return ret_freq;
}
EXPORT_SYMBOL(cpufreq_get);
* cpufreq_suspend - let the low level driver prepare for suspend
*/
-static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
+static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
{
int cpu = sysdev->id;
int ret = 0;
return -EINVAL;
/* only handle each CPU group once */
- if (unlikely(cpu_policy->cpu != cpu)) {
- cpufreq_cpu_put(cpu_policy);
- return 0;
- }
+ if (unlikely(cpu_policy->cpu != cpu))
+ goto out;
if (cpufreq_driver->suspend) {
ret = cpufreq_driver->suspend(cpu_policy, pmsg);
if (ret) {
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu);
- cpufreq_cpu_put(cpu_policy);
- return ret;
+ goto out;
}
}
-
if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
goto out;
out:
cpufreq_cpu_put(cpu_policy);
- return 0;
+ return ret;
}
/**
* 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
* restored.
*/
-static int cpufreq_resume(struct sys_device * sysdev)
+static int cpufreq_resume(struct sys_device *sysdev)
{
int cpu = sysdev->id;
int ret = 0;
return -EINVAL;
/* only handle each CPU group once */
- if (unlikely(cpu_policy->cpu != cpu)) {
- cpufreq_cpu_put(cpu_policy);
- return 0;
- }
+ if (unlikely(cpu_policy->cpu != cpu))
+ goto fail;
if (cpufreq_driver->resume) {
ret = cpufreq_driver->resume(cpu_policy);
if (ret) {
printk(KERN_ERR "cpufreq: resume failed in ->resume "
"step on CPU %u\n", cpu_policy->cpu);
- cpufreq_cpu_put(cpu_policy);
- return ret;
+ goto fail;
}
}
struct cpufreq_freqs freqs;
if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
- dprintk("Warning: CPU frequency"
+ dprintk("Warning: CPU frequency "
"is %u, cpufreq assumed %u kHz.\n",
cur_freq, cpu_policy->cur);
out:
schedule_work(&cpu_policy->update);
+fail:
cpufreq_cpu_put(cpu_policy);
return ret;
}
{
int ret;
+ WARN_ON(!init_cpufreq_transition_notifier_list_called);
+
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = srcu_notifier_chain_register(
unsigned int target_freq,
unsigned int relation)
{
- int ret;
+ int ret = -EINVAL;
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
- return -EINVAL;
+ goto no_policy;
- mutex_lock(&policy->lock);
+ if (unlikely(lock_policy_rwsem_write(policy->cpu)))
+ goto fail;
ret = __cpufreq_driver_target(policy, target_freq, relation);
- mutex_unlock(&policy->lock);
+ unlock_policy_rwsem_write(policy->cpu);
+fail:
cpufreq_cpu_put(policy);
+no_policy:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
-int cpufreq_driver_getavg(struct cpufreq_policy *policy)
+int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
if (!policy)
return -EINVAL;
- mutex_lock(&policy->lock);
-
- if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
- ret = cpufreq_driver->getavg(policy->cpu);
-
- mutex_unlock(&policy->lock);
+ if (cpu_online(cpu) && cpufreq_driver->getavg)
+ ret = cpufreq_driver->getavg(policy, cpu);
cpufreq_cpu_put(policy);
return ret;
}
-EXPORT_SYMBOL_GPL(cpufreq_driver_getavg);
+EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
/*
* when "event" is CPUFREQ_GOV_LIMITS
{
int ret;
+ /* Only must be defined when default governor is known to have latency
+ restrictions, like e.g. conservative or ondemand.
+ That this is the case is already ensured in Kconfig
+ */
+#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
+ struct cpufreq_governor *gov = &cpufreq_gov_performance;
+#else
+ struct cpufreq_governor *gov = NULL;
+#endif
+
+ if (policy->governor->max_transition_latency &&
+ policy->cpuinfo.transition_latency >
+ policy->governor->max_transition_latency) {
+ if (!gov)
+ return -EINVAL;
+ else {
+ printk(KERN_WARNING "%s governor failed, too long"
+ " transition latency of HW, fallback"
+ " to %s governor\n",
+ policy->governor->name,
+ gov->name);
+ policy->governor = gov;
+ }
+ }
+
if (!try_module_get(policy->governor->owner))
return -EINVAL;
/**
* cpufreq_get_policy - get the current cpufreq_policy
- * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
+ * @policy: struct cpufreq_policy into which the current cpufreq_policy
+ * is written
*
* Reads the current cpufreq policy.
*/
if (!cpu_policy)
return -EINVAL;
- mutex_lock(&cpu_policy->lock);
memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
- mutex_unlock(&cpu_policy->lock);
cpufreq_cpu_put(cpu_policy);
return 0;
memcpy(&policy->cpuinfo, &data->cpuinfo,
sizeof(struct cpufreq_cpuinfo));
- if (policy->min > data->min && policy->min > policy->max) {
+ if (policy->min > data->max || policy->max < data->min) {
ret = -EINVAL;
goto error_out;
}
}
/**
- * cpufreq_set_policy - set a new CPUFreq policy
- * @policy: policy to be set.
- *
- * Sets a new CPU frequency and voltage scaling policy.
- */
-int cpufreq_set_policy(struct cpufreq_policy *policy)
-{
- int ret = 0;
- struct cpufreq_policy *data;
-
- if (!policy)
- return -EINVAL;
-
- data = cpufreq_cpu_get(policy->cpu);
- if (!data)
- return -EINVAL;
-
- /* lock this CPU */
- mutex_lock(&data->lock);
-
- ret = __cpufreq_set_policy(data, policy);
- data->user_policy.min = data->min;
- data->user_policy.max = data->max;
- data->user_policy.policy = data->policy;
- data->user_policy.governor = data->governor;
-
- mutex_unlock(&data->lock);
-
- cpufreq_cpu_put(data);
-
- return ret;
-}
-EXPORT_SYMBOL(cpufreq_set_policy);
-
-
-/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
*
{
struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
struct cpufreq_policy policy;
- int ret = 0;
+ int ret;
- if (!data)
- return -ENODEV;
+ if (!data) {
+ ret = -ENODEV;
+ goto no_policy;
+ }
- mutex_lock(&data->lock);
+ if (unlikely(lock_policy_rwsem_write(cpu))) {
+ ret = -EINVAL;
+ goto fail;
+ }
dprintk("updating policy for CPU %u\n", cpu);
memcpy(&policy, data, sizeof(struct cpufreq_policy));
ret = __cpufreq_set_policy(data, &policy);
- mutex_unlock(&data->lock);
+ unlock_policy_rwsem_write(cpu);
+
+fail:
cpufreq_cpu_put(data);
+no_policy:
return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
-static int cpufreq_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct cpufreq_policy *policy;
struct sys_device *sys_dev;
sys_dev = get_cpu_sysdev(cpu);
-
if (sys_dev) {
switch (action) {
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
cpufreq_add_dev(sys_dev);
break;
case CPU_DOWN_PREPARE:
- /*
- * We attempt to put this cpu in lowest frequency
- * possible before going down. This will permit
- * hardware-managed P-State to switch other related
- * threads to min or higher speeds if possible.
- */
- policy = cpufreq_cpu_data[cpu];
- if (policy) {
- cpufreq_driver_target(policy, policy->min,
- CPUFREQ_RELATION_H);
- }
+ case CPU_DOWN_PREPARE_FROZEN:
+ if (unlikely(lock_policy_rwsem_write(cpu)))
+ BUG();
+
+ __cpufreq_remove_dev(sys_dev);
break;
- case CPU_DEAD:
- cpufreq_remove_dev(sys_dev);
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ cpufreq_add_dev(sys_dev);
break;
}
}
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
+static struct notifier_block __refdata cpufreq_cpu_notifier =
{
.notifier_call = cpufreq_cpu_callback,
};
cpufreq_driver = driver_data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
+ ret = sysdev_driver_register(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
ret = -ENODEV;
/* check for at least one working CPU */
- for (i=0; i<NR_CPUS; i++)
- if (cpufreq_cpu_data[i])
+ for (i = 0; i < nr_cpu_ids; i++)
+ if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
ret = 0;
+ break;
+ }
/* if all ->init() calls failed, unregister */
if (ret) {
cpufreq_debug_enable_ratelimit();
}
- return (ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
+
+static int __init cpufreq_core_init(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ per_cpu(policy_cpu, cpu) = -1;
+ init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
+ }
+ return 0;
+}
+
+core_initcall(cpufreq_core_init);