* also protects the cpufreq_cpu_data array.
*/
static struct cpufreq_driver *cpufreq_driver;
-static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
+static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
#ifdef CONFIG_HOTPLUG_CPU
/* This one keeps track of the previously set governor of a removed CPU */
-static struct cpufreq_governor *cpufreq_cpu_governor[NR_CPUS];
+static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
#endif
static DEFINE_SPINLOCK(cpufreq_driver_lock);
/* internal prototypes */
-static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
+static int __cpufreq_governor(struct cpufreq_policy *policy,
+ unsigned int event);
static unsigned int __cpufreq_get(unsigned int cpu);
static void handle_update(struct work_struct *work);
pure_initcall(init_cpufreq_transition_notifier_list);
static LIST_HEAD(cpufreq_governor_list);
-static DEFINE_MUTEX (cpufreq_governor_mutex);
+static DEFINE_MUTEX(cpufreq_governor_mutex);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *data;
unsigned long flags;
- if (cpu >= NR_CPUS)
+ if (cpu >= nr_cpu_ids)
goto err_out;
/* get the cpufreq driver */
/* get the CPU */
- data = cpufreq_cpu_data[cpu];
+ data = per_cpu(cpufreq_cpu_data, cpu);
if (!data)
goto err_out_put_module;
dprintk("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
- policy = cpufreq_cpu_data[freqs->cpu];
+ policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
switch (state) {
case CPUFREQ_PRECHANGE:
struct cpufreq_governor *t;
list_for_each_entry(t, &cpufreq_governor_list, governor_list)
- if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
+ if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
return t;
return NULL;
mutex_unlock(&cpufreq_governor_mutex);
}
- out:
+out:
return err;
}
-/* drivers/base/cpu.c */
-extern struct sysdev_class cpu_sysdev_class;
-
-
/**
* cpufreq_per_cpu_attr_read() / show_##file_name() -
* print out cpufreq information
static ssize_t show_##file_name \
(struct cpufreq_policy *policy, char *buf) \
{ \
- return sprintf (buf, "%u\n", policy->object); \
+ return sprintf(buf, "%u\n", policy->object); \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
if (ret) \
return -EINVAL; \
\
- ret = sscanf (buf, "%u", &new_policy.object); \
+ ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \
return -EINVAL; \
\
return ret ? ret : count; \
}
-store_one(scaling_min_freq,min);
-store_one(scaling_max_freq,max);
+store_one(scaling_min_freq, min);
+store_one(scaling_max_freq, max);
/**
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
- if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
+ if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
return sprintf(buf, "powersave\n");
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
return sprintf(buf, "performance\n");
else if (policy->governor)
- return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
+ return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
+ policy->governor->name);
return -EINVAL;
}
if (ret)
return ret;
- ret = sscanf (buf, "%15s", str_governor);
+ ret = sscanf(buf, "%15s", str_governor);
if (ret != 1)
return -EINVAL;
}
list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
- if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
+ if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
+ - (CPUFREQ_NAME_LEN + 2)))
goto out;
i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
}
return i;
}
-static ssize_t show_cpus(cpumask_t mask, char *buf)
+static ssize_t show_cpus(const struct cpumask *mask, char *buf)
{
ssize_t i = 0;
unsigned int cpu;
- for_each_cpu_mask(cpu, mask) {
+ for_each_cpu(cpu, mask) {
if (i)
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
if (i >= (PAGE_SIZE - 5))
- break;
+ break;
}
i += sprintf(&buf[i], "\n");
return i;
*/
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{
- if (cpus_empty(policy->related_cpus))
+ if (cpumask_empty(policy->related_cpus))
return show_cpus(policy->cpus, buf);
return show_cpus(policy->related_cpus, buf);
}
define_one_ro0400(cpuinfo_cur_freq);
define_one_ro(cpuinfo_min_freq);
define_one_ro(cpuinfo_max_freq);
+define_one_ro(cpuinfo_transition_latency);
define_one_ro(scaling_available_governors);
define_one_ro(scaling_driver);
define_one_ro(scaling_cur_freq);
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
+ &cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
NULL
};
-#define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
-#define to_attr(a) container_of(a,struct freq_attr,attr)
+#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
+#define to_attr(a) container_of(a, struct freq_attr, attr)
-static ssize_t show(struct kobject *kobj, struct attribute *attr ,char *buf)
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ret = -ENOMEM;
goto nomem_out;
}
+ if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
+ kfree(policy);
+ ret = -ENOMEM;
+ goto nomem_out;
+ }
+ if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
+ free_cpumask_var(policy->cpus);
+ kfree(policy);
+ ret = -ENOMEM;
+ goto nomem_out;
+ }
policy->cpu = cpu;
- policy->cpus = cpumask_of_cpu(cpu);
+ cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
per_cpu(policy_cpu, cpu) = cpu;
dprintk("initialization failed\n");
goto err_out;
}
- policy->user_policy.min = policy->cpuinfo.min_freq;
- policy->user_policy.max = policy->cpuinfo.max_freq;
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_START, policy);
#ifdef CONFIG_SMP
#ifdef CONFIG_HOTPLUG_CPU
- if (cpufreq_cpu_governor[cpu]){
- policy->governor = cpufreq_cpu_governor[cpu];
+ if (per_cpu(cpufreq_cpu_governor, cpu)) {
+ policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
dprintk("Restoring governor %s for cpu %d\n",
policy->governor->name, cpu);
}
#endif
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu(j, policy->cpus) {
if (cpu == j)
continue;
- /* check for existing affected CPUs. They may not be aware
- * of it due to CPU Hotplug.
+ /* Check for existing affected CPUs.
+ * They may not be aware of it due to CPU Hotplug.
*/
- managed_policy = cpufreq_cpu_get(j); // FIXME: Where is this released? What about error paths?
+ managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */
if (unlikely(managed_policy)) {
/* Set proper policy_cpu */
goto err_out_driver_exit;
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- managed_policy->cpus = policy->cpus;
- cpufreq_cpu_data[cpu] = managed_policy;
+ cpumask_copy(managed_policy->cpus, policy->cpus);
+ per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
dprintk("CPU already managed, adding link\n");
}
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu_mask(j, policy->cpus) {
- cpufreq_cpu_data[j] = policy;
+ for_each_cpu(j, policy->cpus) {
+ per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(policy_cpu, j) = policy->cpu;
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
/* symlink affected CPUs */
- for_each_cpu_mask(j, policy->cpus) {
+ for_each_cpu(j, policy->cpus) {
if (j == cpu)
continue;
if (!cpu_online(j))
err_out_unregister:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu_mask(j, policy->cpus)
- cpufreq_cpu_data[j] = NULL;
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_cpu_data, j) = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
kobject_put(&policy->kobj);
dprintk("unregistering CPU %u\n", cpu);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
- data = cpufreq_cpu_data[cpu];
+ data = per_cpu(cpufreq_cpu_data, cpu);
if (!data) {
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
unlock_policy_rwsem_write(cpu);
return -EINVAL;
}
- cpufreq_cpu_data[cpu] = NULL;
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
#ifdef CONFIG_SMP
*/
if (unlikely(cpu != data->cpu)) {
dprintk("removing link\n");
- cpu_clear(cpu, data->cpus);
+ cpumask_clear_cpu(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
sysfs_remove_link(&sys_dev->kobj, "cpufreq");
cpufreq_cpu_put(data);
#ifdef CONFIG_SMP
#ifdef CONFIG_HOTPLUG_CPU
- cpufreq_cpu_governor[cpu] = data->governor;
+ per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
#endif
/* if we have other CPUs still registered, we need to unlink them,
* or else wait_for_completion below will lock up. Clean the
- * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
- * links afterwards.
+ * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
+ * the sysfs links afterwards.
*/
- if (unlikely(cpus_weight(data->cpus) > 1)) {
- for_each_cpu_mask(j, data->cpus) {
+ if (unlikely(cpumask_weight(data->cpus) > 1)) {
+ for_each_cpu(j, data->cpus) {
if (j == cpu)
continue;
- cpufreq_cpu_data[j] = NULL;
+ per_cpu(cpufreq_cpu_data, j) = NULL;
}
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (unlikely(cpus_weight(data->cpus) > 1)) {
- for_each_cpu_mask(j, data->cpus) {
+ if (unlikely(cpumask_weight(data->cpus) > 1)) {
+ for_each_cpu(j, data->cpus) {
if (j == cpu)
continue;
dprintk("removing link for cpu %u\n", j);
#ifdef CONFIG_HOTPLUG_CPU
- cpufreq_cpu_governor[j] = data->governor;
+ per_cpu(cpufreq_cpu_governor, j) = data->governor;
#endif
cpu_sys_dev = get_cpu_sysdev(j);
sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
+ unlock_policy_rwsem_write(cpu);
+
if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
- unlock_policy_rwsem_write(cpu);
-
kobject_put(&data->kobj);
/* we need to make sure that the underlying kobj is actually
if (cpufreq_driver->exit)
cpufreq_driver->exit(data);
+ free_cpumask_var(data->related_cpus);
+ free_cpumask_var(data->cpus);
kfree(data);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
cpufreq_debug_enable_ratelimit();
return 0;
* @old_freq: CPU frequency the kernel thinks the CPU runs at
* @new_freq: CPU frequency the CPU actually runs at
*
- * We adjust to current frequency first, and need to clean up later. So either call
- * to cpufreq_update_policy() or schedule handle_update()).
+ * We adjust to current frequency first, and need to clean up later.
+ * So either call to cpufreq_update_policy() or schedule handle_update()).
*/
static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
unsigned int new_freq)
static unsigned int __cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_data[cpu];
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
unsigned int ret_freq = 0;
if (!cpufreq_driver->get)
unsigned int target_freq,
unsigned int relation)
{
- int ret;
+ int ret = -EINVAL;
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
- return -EINVAL;
+ goto no_policy;
if (unlikely(lock_policy_rwsem_write(policy->cpu)))
- return -EINVAL;
+ goto fail;
ret = __cpufreq_driver_target(policy, target_freq, relation);
unlock_policy_rwsem_write(policy->cpu);
+fail:
cpufreq_cpu_put(policy);
+no_policy:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
-int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
+int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
if (!policy)
return -EINVAL;
- if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
- ret = cpufreq_driver->getavg(policy->cpu);
+ if (cpu_online(cpu) && cpufreq_driver->getavg)
+ ret = cpufreq_driver->getavg(policy, cpu);
cpufreq_cpu_put(policy);
return ret;
/**
* cpufreq_get_policy - get the current cpufreq_policy
- * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
+ * @policy: struct cpufreq_policy into which the current cpufreq_policy
+ * is written
*
* Reads the current cpufreq policy.
*/
{
struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
struct cpufreq_policy policy;
- int ret = 0;
+ int ret;
- if (!data)
- return -ENODEV;
+ if (!data) {
+ ret = -ENODEV;
+ goto no_policy;
+ }
- if (unlikely(lock_policy_rwsem_write(cpu)))
- return -EINVAL;
+ if (unlikely(lock_policy_rwsem_write(cpu))) {
+ ret = -EINVAL;
+ goto fail;
+ }
dprintk("updating policy for CPU %u\n", cpu);
memcpy(&policy, data, sizeof(struct cpufreq_policy));
unlock_policy_rwsem_write(cpu);
+fail:
cpufreq_cpu_put(data);
+no_policy:
return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
cpufreq_driver = driver_data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
+ ret = sysdev_driver_register(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
ret = -ENODEV;
/* check for at least one working CPU */
- for (i=0; i<NR_CPUS; i++)
- if (cpufreq_cpu_data[i])
+ for (i = 0; i < nr_cpu_ids; i++)
+ if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
ret = 0;
+ break;
+ }
/* if all ->init() calls failed, unregister */
if (ret) {