1
0
Fork 0

Merge branches 'pm-cpufreq', 'intel_pstate' and 'pm-cpuidle'

* pm-cpufreq:
  cpufreq / CPPC: Initialize policy->min to lowest nonlinear performance
  cpufreq: sfi: make freq_table static
  cpufreq: exynos5440: Fix inconsistent indenting
  cpufreq: imx6q: imx6ull should use the same flow as imx6ul
  cpufreq: dt: Add support for hi3660

* intel_pstate:
  cpufreq: Update scaling_cur_freq documentation
  cpufreq: intel_pstate: Clean up after performance governor changes
  intel_pstate: skip scheduler hook when in "performance" mode
  intel_pstate: delete scheduler hook in HWP mode
  x86: use common aperfmperf_khz_on_cpu() to calculate KHz using APERF/MPERF
  cpufreq: intel_pstate: Remove max/min fractions to limit performance
  x86: do not use cpufreq_quick_get() for /proc/cpuinfo "cpu MHz"

* pm-cpuidle:
  cpuidle: menu: allow state 0 to be disabled
  intel_idle: Use more common logging style
  x86/ACPI/cstate: Allow ACPI C1 FFH MWAIT use on AMD systems
  ARM: cpuidle: Support asymmetric idle definition
hifive-unleashed-5.1
Rafael J. Wysocki 2017-07-03 14:21:18 +02:00
commit f1c7842e5f
18 changed files with 268 additions and 144 deletions

View File

@ -269,16 +269,16 @@ are the following:
``scaling_cur_freq``
Current frequency of all of the CPUs belonging to this policy (in kHz).
For the majority of scaling drivers, this is the frequency of the last
P-state requested by the driver from the hardware using the scaling
In the majority of cases, this is the frequency of the last P-state
requested by the scaling driver from the hardware using the scaling
interface provided by it, which may or may not reflect the frequency
the CPU is actually running at (due to hardware design and other
limitations).
Some scaling drivers (e.g. |intel_pstate|) attempt to provide
information more precisely reflecting the current CPU frequency through
this attribute, but that still may not be the exact current CPU
frequency as seen by the hardware at the moment.
Some architectures (e.g. ``x86``) may attempt to provide information
more precisely reflecting the current CPU frequency through this
attribute, but that still may not be the exact current CPU frequency as
seen by the hardware at the moment.
``scaling_driver``
The scaling driver currently in use.

View File

@ -157,10 +157,8 @@ Without HWP, this P-state selection algorithm is always the same regardless of
the processor model and platform configuration.
It selects the maximum P-state it is allowed to use, subject to limits set via
``sysfs``, every time the P-state selection computations are carried out by the
driver's utilization update callback for the given CPU (that does not happen
more often than every 10 ms), but the hardware configuration will not be changed
if the new P-state is the same as the current one.
``sysfs``, every time the driver configuration for the given CPU is updated
(e.g. via ``sysfs``).
This is the default P-state selection algorithm if the
:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option

View File

@ -167,7 +167,8 @@ static int __init ffh_cstate_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_INTEL)
if (c->x86_vendor != X86_VENDOR_INTEL &&
c->x86_vendor != X86_VENDOR_AMD)
return -1;
cpu_cstate_entry = alloc_percpu(struct cstate_entry);

View File

@ -21,6 +21,7 @@ obj-y += common.o
obj-y += rdrand.o
obj-y += match.o
obj-y += bugs.o
obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o

View File

@ -0,0 +1,79 @@
/*
* x86 APERF/MPERF KHz calculation for
* /sys/.../cpufreq/scaling_cur_freq
*
* Copyright (C) 2017 Intel Corp.
* Author: Len Brown <len.brown@intel.com>
*
* This file is licensed under GPLv2.
*/
#include <linux/jiffies.h>
#include <linux/math64.h>
#include <linux/percpu.h>
#include <linux/smp.h>
struct aperfmperf_sample {
unsigned int khz;
unsigned long jiffies;
u64 aperf;
u64 mperf;
};
static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
/*
* aperfmperf_snapshot_khz()
* On the current CPU, snapshot APERF, MPERF, and jiffies
* unless we already did it within 10ms
* calculate kHz, save snapshot
*/
static void aperfmperf_snapshot_khz(void *dummy)
{
u64 aperf, aperf_delta;
u64 mperf, mperf_delta;
struct aperfmperf_sample *s = this_cpu_ptr(&samples);
/* Don't bother re-computing within 10 ms */
if (time_before(jiffies, s->jiffies + HZ/100))
return;
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
aperf_delta = aperf - s->aperf;
mperf_delta = mperf - s->mperf;
/*
* There is no architectural guarantee that MPERF
* increments faster than we can read it.
*/
if (mperf_delta == 0)
return;
/*
* if (cpu_khz * aperf_delta) fits into ULLONG_MAX, then
* khz = (cpu_khz * aperf_delta) / mperf_delta
*/
if (div64_u64(ULLONG_MAX, cpu_khz) > aperf_delta)
s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
else /* khz = aperf_delta / (mperf_delta / cpu_khz) */
s->khz = div64_u64(aperf_delta,
div64_u64(mperf_delta, cpu_khz));
s->jiffies = jiffies;
s->aperf = aperf;
s->mperf = mperf;
}
unsigned int arch_freq_get_on_cpu(int cpu)
{
if (!cpu_khz)
return 0;
if (!static_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
return per_cpu(samples.khz, cpu);
}

View File

@ -2,7 +2,6 @@
#include <linux/timex.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/cpufreq.h>
/*
* Get CPU information for use by the procfs.
@ -76,14 +75,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (c->microcode)
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
if (cpu_has(c, X86_FEATURE_TSC)) {
unsigned int freq = cpufreq_quick_get(cpu);
if (!freq)
freq = cpu_khz;
if (cpu_has(c, X86_FEATURE_TSC))
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
freq / 1000, (freq % 1000));
}
cpu_khz / 1000, (cpu_khz % 1000));
/* Cache size */
if (c->x86_cache_size >= 0)

View File

@ -144,10 +144,23 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cppc_dmi_max_khz = cppc_get_dmi_max_khz();
policy->min = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz / cpu->perf_caps.highest_perf;
/*
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz /
cpu->perf_caps.highest_perf;
policy->max = cppc_dmi_max_khz;
policy->cpuinfo.min_freq = policy->min;
policy->cpuinfo.max_freq = policy->max;
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
* available if userspace wants to use any perf between lowest & lowest
* nonlinear perf
*/
policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz /
cpu->perf_caps.highest_perf;
policy->cpuinfo.max_freq = cppc_dmi_max_khz;
policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
policy->shared_type = cpu->shared_type;

View File

@ -31,6 +31,7 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "arm,integrator-ap", },
{ .compatible = "arm,integrator-cp", },
{ .compatible = "hisilicon,hi3660", },
{ .compatible = "hisilicon,hi6220", },
{ .compatible = "fsl,imx27", },

View File

@ -632,11 +632,21 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
__weak unsigned int arch_freq_get_on_cpu(int cpu)
{
return 0;
}
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
unsigned int freq;
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
freq = arch_freq_get_on_cpu(policy->cpu);
if (freq)
ret = sprintf(buf, "%u\n", freq);
else if (cpufreq_driver && cpufreq_driver->setpolicy &&
cpufreq_driver->get)
ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
else
ret = sprintf(buf, "%u\n", policy->cur);

View File

@ -173,12 +173,12 @@ static void exynos_enable_dvfs(unsigned int cur_frequency)
/* Enable PSTATE Change Event */
tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
__raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
__raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
/* Enable PSTATE Change IRQ */
tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
__raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
__raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
/* Set initial performance index */
cpufreq_for_each_entry(pos, freq_table)
@ -330,7 +330,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
struct resource res;
unsigned int cur_frequency;
np = pdev->dev.of_node;
np = pdev->dev.of_node;
if (!np)
return -ENODEV;

View File

@ -101,7 +101,8 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
* - Disable pll2_pfd2_396m_clk
*/
if (of_machine_is_compatible("fsl,imx6ul")) {
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull")) {
/*
* When changing pll1_sw_clk's parent to pll1_sys_clk,
* CPU may run at higher than 528MHz, this will lead to
@ -215,7 +216,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_clk;
}
if (of_machine_is_compatible("fsl,imx6ul")) {
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull")) {
pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {

View File

@ -231,10 +231,8 @@ struct global_params {
* @prev_cummulative_iowait: IO Wait time difference from last and
* current sample
* @sample: Storage for storing last Sample data
* @min_perf: Minimum capacity limit as a fraction of the maximum
* turbo P-state capacity.
* @max_perf: Maximum capacity limit as a fraction of the maximum
* turbo P-state capacity.
* @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
* @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
* @acpi_perf_data: Stores ACPI perf information read from _PSS
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
* @epp_powersave: Last saved HWP energy performance preference
@ -266,8 +264,8 @@ struct cpudata {
u64 prev_tsc;
u64 prev_cummulative_iowait;
struct sample sample;
int32_t min_perf;
int32_t max_perf;
int32_t min_perf_ratio;
int32_t max_perf_ratio;
#ifdef CONFIG_ACPI
struct acpi_processor_performance acpi_perf_data;
bool valid_pss_table;
@ -790,25 +788,32 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
int *current_max)
{
u64 cap;
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
if (global.no_turbo)
*current_max = HWP_GUARANTEED_PERF(cap);
else
*current_max = HWP_HIGHEST_PERF(cap);
*phy_max = HWP_HIGHEST_PERF(cap);
}
static void intel_pstate_hwp_set(unsigned int cpu)
{
struct cpudata *cpu_data = all_cpu_data[cpu];
int min, hw_min, max, hw_max;
u64 value, cap;
int max, min;
u64 value;
s16 epp;
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
hw_min = HWP_LOWEST_PERF(cap);
if (global.no_turbo)
hw_max = HWP_GUARANTEED_PERF(cap);
else
hw_max = HWP_HIGHEST_PERF(cap);
max = cpu_data->max_perf_ratio;
min = cpu_data->min_perf_ratio;
max = fp_ext_toint(hw_max * cpu_data->max_perf);
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
min = max;
else
min = fp_ext_toint(hw_max * cpu_data->min_perf);
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
@ -1524,8 +1529,7 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
update_turbo_state();
pstate = intel_pstate_get_base_pstate(cpu);
pstate = max(cpu->pstate.min_pstate,
fp_ext_toint(pstate * cpu->max_perf));
pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
intel_pstate_set_pstate(cpu, pstate);
}
@ -1612,9 +1616,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
int32_t busy_frac, boost;
int target, avg_pstate;
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
return cpu->pstate.turbo_pstate;
busy_frac = div_fp(sample->mperf, sample->tsc);
boost = cpu->iowait_boost;
@ -1651,9 +1652,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
u64 duration_ns;
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
return cpu->pstate.turbo_pstate;
/*
* perf_scaled is the ratio of the average P-state during the last
* sampling period to the P-state requested last time (in percent).
@ -1691,9 +1689,8 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
int max_pstate = intel_pstate_get_base_pstate(cpu);
int min_pstate;
min_pstate = max(cpu->pstate.min_pstate,
fp_ext_toint(max_pstate * cpu->min_perf));
max_pstate = max(min_pstate, fp_ext_toint(max_pstate * cpu->max_perf));
min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
max_pstate = max(min_pstate, cpu->max_perf_ratio);
return clamp_t(int, pstate, min_pstate, max_pstate);
}
@ -1729,16 +1726,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
fp_toint(cpu->iowait_boost * 100));
}
static void intel_pstate_update_util_hwp(struct update_util_data *data,
u64 time, unsigned int flags)
{
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns = time - cpu->sample.time;
if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL)
intel_pstate_sample(cpu, time);
}
static void intel_pstate_update_util_pid(struct update_util_data *data,
u64 time, unsigned int flags)
{
@ -1930,6 +1917,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
if (hwp_active)
return;
if (cpu->update_util_set)
return;
@ -1963,52 +1953,61 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
{
int max_freq = intel_pstate_get_max_freq(cpu);
int32_t max_policy_perf, min_policy_perf;
int max_state, turbo_max;
max_policy_perf = div_ext_fp(policy->max, max_freq);
max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
/*
* HWP needs some special consideration, because on BDX the
* HWP_REQUEST uses abstract value to represent performance
* rather than pure ratios.
*/
if (hwp_active) {
intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
} else {
max_state = intel_pstate_get_base_pstate(cpu);
turbo_max = cpu->pstate.turbo_pstate;
}
max_policy_perf = max_state * policy->max / max_freq;
if (policy->max == policy->min) {
min_policy_perf = max_policy_perf;
} else {
min_policy_perf = div_ext_fp(policy->min, max_freq);
min_policy_perf = max_state * policy->min / max_freq;
min_policy_perf = clamp_t(int32_t, min_policy_perf,
0, max_policy_perf);
}
pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
policy->cpu, max_state,
min_policy_perf, max_policy_perf);
/* Normalize user input to [min_perf, max_perf] */
if (per_cpu_limits) {
cpu->min_perf = min_policy_perf;
cpu->max_perf = max_policy_perf;
cpu->min_perf_ratio = min_policy_perf;
cpu->max_perf_ratio = max_policy_perf;
} else {
int32_t global_min, global_max;
/* Global limits are in percent of the maximum turbo P-state. */
global_max = percent_ext_fp(global.max_perf_pct);
global_min = percent_ext_fp(global.min_perf_pct);
if (max_freq != cpu->pstate.turbo_freq) {
int32_t turbo_factor;
turbo_factor = div_ext_fp(cpu->pstate.turbo_pstate,
cpu->pstate.max_pstate);
global_min = mul_ext_fp(global_min, turbo_factor);
global_max = mul_ext_fp(global_max, turbo_factor);
}
global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
global_min = clamp_t(int32_t, global_min, 0, global_max);
cpu->min_perf = max(min_policy_perf, global_min);
cpu->min_perf = min(cpu->min_perf, max_policy_perf);
cpu->max_perf = min(max_policy_perf, global_max);
cpu->max_perf = max(min_policy_perf, cpu->max_perf);
pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
global_min, global_max);
cpu->min_perf_ratio = max(min_policy_perf, global_min);
cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
cpu->max_perf_ratio = min(max_policy_perf, global_max);
cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
/* Make sure min_perf <= max_perf */
cpu->min_perf = min(cpu->min_perf, cpu->max_perf);
cpu->min_perf_ratio = min(cpu->min_perf_ratio,
cpu->max_perf_ratio);
}
cpu->max_perf = round_up(cpu->max_perf, EXT_FRAC_BITS);
cpu->min_perf = round_up(cpu->min_perf, EXT_FRAC_BITS);
pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
fp_ext_toint(cpu->max_perf * 100),
fp_ext_toint(cpu->min_perf * 100));
pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
cpu->max_perf_ratio,
cpu->min_perf_ratio);
}
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
@ -2035,10 +2034,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
*/
intel_pstate_clear_update_util_hook(policy->cpu);
intel_pstate_max_within_limits(cpu);
} else {
intel_pstate_set_update_util_hook(policy->cpu);
}
intel_pstate_set_update_util_hook(policy->cpu);
if (hwp_active)
intel_pstate_hwp_set(policy->cpu);
@ -2111,8 +2110,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
cpu->max_perf = int_ext_tofp(1);
cpu->min_perf = 0;
cpu->max_perf_ratio = 0xFF;
cpu->min_perf_ratio = 0;
policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@ -2554,7 +2553,6 @@ static int __init intel_pstate_init(void)
} else {
hwp_active++;
intel_pstate.attr = hwp_cpufreq_attrs;
pstate_funcs.update_util = intel_pstate_update_util_hwp;
goto hwp_cpu_matched;
}
} else {

View File

@ -24,7 +24,7 @@
#include <asm/msr.h>
struct cpufreq_frequency_table *freq_table;
static struct cpufreq_frequency_table *freq_table;
static struct sfi_freq_table_entry *sfi_cpufreq_array;
static int num_freq_table_entries;

View File

@ -4,6 +4,7 @@
config ARM_CPUIDLE
bool "Generic ARM/ARM64 CPU idle Driver"
select DT_IDLE_STATES
select CPU_IDLE_MULTIPLE_DRIVERS
help
Select this to enable generic cpuidle driver for ARM.
It provides a generic idle driver whose idle states are configured

View File

@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <asm/cpuidle.h>
@ -44,7 +45,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
}
static struct cpuidle_driver arm_idle_driver = {
static struct cpuidle_driver arm_idle_driver __initdata = {
.name = "arm_idle",
.owner = THIS_MODULE,
/*
@ -80,30 +81,42 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
static int __init arm_idle_init(void)
{
int cpu, ret;
struct cpuidle_driver *drv = &arm_idle_driver;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
/*
* Initialize idle states data, starting at index 1.
* This driver is DT only, if no DT idle states are detected (ret == 0)
* let the driver initialization fail accordingly since there is no
* reason to initialize the idle driver if only wfi is supported.
*/
ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
if (ret <= 0)
return ret ? : -ENODEV;
ret = cpuidle_register_driver(drv);
if (ret) {
pr_err("Failed to register cpuidle driver\n");
return ret;
}
/*
* Call arch CPU operations in order to initialize
* idle states suspend back-end specific data
*/
for_each_possible_cpu(cpu) {
drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
if (!drv) {
ret = -ENOMEM;
goto out_fail;
}
drv->cpumask = (struct cpumask *)cpumask_of(cpu);
/*
* Initialize idle states data, starting at index 1. This
* driver is DT only, if no DT idle states are detected (ret
* == 0) let the driver initialization fail accordingly since
* there is no reason to initialize the idle driver if only
* wfi is supported.
*/
ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
if (ret <= 0) {
ret = ret ? : -ENODEV;
goto out_fail;
}
ret = cpuidle_register_driver(drv);
if (ret) {
pr_err("Failed to register cpuidle driver\n");
goto out_fail;
}
/*
* Call arch CPU operations in order to initialize
* idle states suspend back-end specific data
*/
ret = arm_cpuidle_init(cpu);
/*
@ -141,10 +154,11 @@ out_fail:
dev = per_cpu(cpuidle_devices, cpu);
cpuidle_unregister_device(dev);
kfree(dev);
drv = cpuidle_get_driver();
cpuidle_unregister_driver(drv);
kfree(drv);
}
cpuidle_unregister_driver(drv);
return ret;
}
device_initcall(arm_idle_init);

View File

@ -286,6 +286,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
struct device *device = get_cpu_device(dev->cpu);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
int first_idx;
int idx;
unsigned int interactivity_req;
unsigned int expected_interval;
unsigned long nr_iowaiters, cpu_load;
@ -335,11 +337,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (data->next_timer_us > polling_threshold &&
latency_req > s->exit_latency && !s->disabled &&
!dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
first_idx = CPUIDLE_DRIVER_STATE_START;
else
data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
first_idx = CPUIDLE_DRIVER_STATE_START - 1;
} else {
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
first_idx = 0;
}
/*
@ -359,20 +361,28 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* Find the idle state with the lowest power while satisfying
* our constraints.
*/
for (i = data->last_state_idx + 1; i < drv->state_count; i++) {
idx = -1;
for (i = first_idx; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable)
continue;
if (idx == -1)
idx = i; /* first enabled state */
if (s->target_residency > data->predicted_us)
break;
if (s->exit_latency > latency_req)
break;
data->last_state_idx = i;
idx = i;
}
if (idx == -1)
idx = 0; /* No states enabled. Must use 0. */
data->last_state_idx = idx;
return data->last_state_idx;
}

View File

@ -51,6 +51,8 @@
/* un-comment DEBUG to enable pr_debug() statements */
#define DEBUG
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/tick.h>
@ -65,7 +67,6 @@
#include <asm/msr.h>
#define INTEL_IDLE_VERSION "0.4.1"
#define PREFIX "intel_idle: "
static struct cpuidle_driver intel_idle_driver = {
.name = "intel_idle",
@ -1111,7 +1112,7 @@ static int __init intel_idle_probe(void)
const struct x86_cpu_id *id;
if (max_cstate == 0) {
pr_debug(PREFIX "disabled\n");
pr_debug("disabled\n");
return -EPERM;
}
@ -1119,8 +1120,8 @@ static int __init intel_idle_probe(void)
if (!id) {
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6)
pr_debug(PREFIX "does not run on family %d model %d\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
pr_debug("does not run on family %d model %d\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
return -ENODEV;
}
@ -1134,13 +1135,13 @@ static int __init intel_idle_probe(void)
!mwait_substates)
return -ENODEV;
pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
icpu = (const struct idle_cpu *)id->driver_data;
cpuidle_state_table = icpu->state_table;
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
boot_cpu_data.x86_model);
return 0;
}
@ -1340,8 +1341,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
break;
if (cstate + 1 > max_cstate) {
printk(PREFIX "max_cstate %d reached\n",
max_cstate);
pr_info("max_cstate %d reached\n", max_cstate);
break;
}
@ -1358,8 +1358,8 @@ static void __init intel_idle_cpuidle_driver_init(void)
/* if state marked as disabled, skip it */
if (cpuidle_state_table[cstate].disabled != 0) {
pr_debug(PREFIX "state %s is disabled",
cpuidle_state_table[cstate].name);
pr_debug("state %s is disabled\n",
cpuidle_state_table[cstate].name);
continue;
}
@ -1395,7 +1395,7 @@ static int intel_idle_cpu_init(unsigned int cpu)
dev->cpu = cpu;
if (cpuidle_register_device(dev)) {
pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
pr_debug("cpuidle_register_device %d failed!\n", cpu);
return -EIO;
}
@ -1447,8 +1447,8 @@ static int __init intel_idle_init(void)
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
drv ? drv->name : "none");
printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
drv ? drv->name : "none");
goto init_driver_fail;
}
@ -1460,8 +1460,8 @@ static int __init intel_idle_init(void)
if (retval < 0)
goto hp_setup_fail;
pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
lapic_timer_reliable_states);
pr_debug("lapic_timer_reliable_states 0x%x\n",
lapic_timer_reliable_states);
return 0;

View File

@ -883,6 +883,8 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
}
#endif
extern unsigned int arch_freq_get_on_cpu(int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;