1
0
Fork 0

cpufreq: remove cpufreq_policy_cpu per-cpu variable

cpufreq_policy_cpu per-cpu variables are used for storing the ID of
the CPU that manages the given CPU's policy.  However, we also store
a policy pointer for each cpu in cpufreq_cpu_data, so the
cpufreq_policy_cpu information is simply redundant.

It is better to use cpufreq_cpu_data to retrieve a policy and get
policy->cpu from there, so make that happen everywhere and drop the
cpufreq_policy_cpu per-cpu variables which aren't necessary any more.

[rjw: Changelog]
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
hifive-unleashed-5.1
Viresh Kumar 2013-08-20 12:08:25 +05:30 committed by Rafael J. Wysocki
parent 9e9fd80167
commit 474deff744
1 changed files with 10 additions and 35 deletions

View File

@ -64,15 +64,14 @@ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
* - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
*/
static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
static int lock_policy_rwsem_##mode(int cpu) \
{ \
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
BUG_ON(policy_cpu == -1); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
BUG_ON(!policy); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
\
return 0; \
}
@ -83,9 +82,9 @@ lock_policy_rwsem(write, cpu);
#define unlock_policy_rwsem(mode, cpu) \
static void unlock_policy_rwsem_##mode(int cpu) \
{ \
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
BUG_ON(policy_cpu == -1); \
up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
BUG_ON(!policy); \
up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
}
unlock_policy_rwsem(read, cpu);
@ -887,7 +886,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpumask_set_cpu(cpu, policy->cpus);
per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
per_cpu(cpufreq_cpu_data, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@ -1013,9 +1011,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
per_cpu(cpufreq_policy_cpu, cpu) = cpu;
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
@ -1053,10 +1048,8 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
#endif
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!frozen) {
@ -1080,15 +1073,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
err_out_unregister:
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = NULL;
if (j != cpu)
per_cpu(cpufreq_policy_cpu, j) = -1;
}
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
err_set_policy_cpu:
per_cpu(cpufreq_policy_cpu, cpu) = -1;
cpufreq_policy_free(policy);
nomem_out:
up_read(&cpufreq_rwsem);
@ -1112,14 +1101,9 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
{
int j;
policy->last_cpu = policy->cpu;
policy->cpu = cpu;
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_policy_cpu, j) = cpu;
#ifdef CONFIG_CPU_FREQ_TABLE
cpufreq_frequency_table_update_policy_cpu(policy);
#endif
@ -1131,7 +1115,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
unsigned int old_cpu, bool frozen)
{
struct device *cpu_dev;
unsigned long flags;
int ret;
/* first sibling now owns the new sysfs dir */
@ -1148,11 +1131,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
WARN_ON(lock_policy_rwsem_write(old_cpu));
cpumask_set_cpu(old_cpu, policy->cpus);
write_lock_irqsave(&cpufreq_driver_lock, flags);
per_cpu(cpufreq_cpu_data, old_cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
unlock_policy_rwsem_write(old_cpu);
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
@ -1186,7 +1164,6 @@ static int __cpufreq_remove_dev(struct device *dev,
write_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
per_cpu(cpufreq_cpu_data, cpu) = NULL;
/* Save the policy somewhere when doing a light-weight tear-down */
if (frozen)
@ -1292,7 +1269,7 @@ static int __cpufreq_remove_dev(struct device *dev,
}
}
per_cpu(cpufreq_policy_cpu, cpu) = -1;
per_cpu(cpufreq_cpu_data, cpu) = NULL;
return 0;
}
@ -2148,10 +2125,8 @@ static int __init cpufreq_core_init(void)
if (cpufreq_disabled())
return -ENODEV;
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_policy_cpu, cpu) = -1;
for_each_possible_cpu(cpu)
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);