1
0
Fork 0

cpufreq: Create for_each_{in}active_policy()

policy->cpus is cleared unconditionally now on hotplug-out of a CPU and
it can be checked to know if a policy is active or not. Create helper
routines to iterate over all active/inactive policies, based on
policy->cpus field.

Replace all instances of for_each_policy() with for_each_active_policy()
to make them iterate only for active policies. (We haven't made changes
yet to keep inactive policies in the same list, but that will be
followed in a later patch).

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
hifive-unleashed-5.1
Viresh Kumar 2015-05-12 12:20:11 +05:30 committed by Rafael J. Wysocki
parent 1473014502
commit f963735a3c
1 changed files with 59 additions and 7 deletions

View File

@ -31,10 +31,62 @@
#include <linux/tick.h>
#include <trace/events/power.h>
/* Macros to iterate over lists */
/* Iterate over online CPUs policies */
static LIST_HEAD(cpufreq_policy_list);
#define for_each_policy(__policy) \
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
{
return cpumask_empty(policy->cpus);
}
static bool suitable_policy(struct cpufreq_policy *policy, bool active)
{
return active == !policy_is_inactive(policy);
}
/* Finds Next Acive/Inactive policy */
static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
bool active)
{
do {
policy = list_next_entry(policy, policy_list);
/* No more policies in the list */
if (&policy->policy_list == &cpufreq_policy_list)
return NULL;
} while (!suitable_policy(policy, active));
return policy;
}
static struct cpufreq_policy *first_policy(bool active)
{
struct cpufreq_policy *policy;
/* No policies in the list */
if (list_empty(&cpufreq_policy_list))
return NULL;
policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
policy_list);
if (!suitable_policy(policy, active))
policy = next_policy(policy, active);
return policy;
}
/* Macros to iterate over CPU policies */
#define for_each_suitable_policy(__policy, __active) \
for (__policy = first_policy(__active); \
__policy; \
__policy = next_policy(__policy, __active))
#define for_each_active_policy(__policy) \
for_each_suitable_policy(__policy, true)
#define for_each_inactive_policy(__policy) \
for_each_suitable_policy(__policy, false)
#define for_each_policy(__policy) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
/* Iterate over governors */
@ -1156,7 +1208,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
/* Check if this CPU already has a policy to manage it */
read_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_policy(policy) {
for_each_active_policy(policy) {
if (cpumask_test_cpu(cpu, policy->related_cpus)) {
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
ret = cpufreq_add_policy_cpu(policy, cpu, dev);
@ -1674,7 +1726,7 @@ void cpufreq_suspend(void)
pr_debug("%s: Suspending Governors\n", __func__);
for_each_policy(policy) {
for_each_active_policy(policy) {
if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
pr_err("%s: Failed to stop governor for policy: %p\n",
__func__, policy);
@ -1708,7 +1760,7 @@ void cpufreq_resume(void)
pr_debug("%s: Resuming Governors\n", __func__);
for_each_policy(policy) {
for_each_active_policy(policy) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
pr_err("%s: Failed to resume driver: %p\n", __func__,
policy);
@ -2354,7 +2406,7 @@ static int cpufreq_boost_set_sw(int state)
struct cpufreq_policy *policy;
int ret = -EINVAL;
for_each_policy(policy) {
for_each_active_policy(policy) {
freq_table = cpufreq_frequency_get_table(policy->cpu);
if (freq_table) {
ret = cpufreq_frequency_table_cpuinfo(policy,