Merge branches 'pm-cpufreq', 'intel_pstate' and 'pm-cpuidle'
* pm-cpufreq: cpufreq / CPPC: Initialize policy->min to lowest nonlinear performance cpufreq: sfi: make freq_table static cpufreq: exynos5440: Fix inconsistent indenting cpufreq: imx6q: imx6ull should use the same flow as imx6ul cpufreq: dt: Add support for hi3660 * intel_pstate: cpufreq: Update scaling_cur_freq documentation cpufreq: intel_pstate: Clean up after performance governor changes intel_pstate: skip scheduler hook when in "performance" mode intel_pstate: delete scheduler hook in HWP mode x86: use common aperfmperf_khz_on_cpu() to calculate KHz using APERF/MPERF cpufreq: intel_pstate: Remove max/min fractions to limit performance x86: do not use cpufreq_quick_get() for /proc/cpuinfo "cpu MHz" * pm-cpuidle: cpuidle: menu: allow state 0 to be disabled intel_idle: Use more common logging style x86/ACPI/cstate: Allow ACPI C1 FFH MWAIT use on AMD systems ARM: cpuidle: Support asymmetric idle definitionhifive-unleashed-5.1
commit
f1c7842e5f
|
@ -269,16 +269,16 @@ are the following:
|
||||||
``scaling_cur_freq``
|
``scaling_cur_freq``
|
||||||
Current frequency of all of the CPUs belonging to this policy (in kHz).
|
Current frequency of all of the CPUs belonging to this policy (in kHz).
|
||||||
|
|
||||||
For the majority of scaling drivers, this is the frequency of the last
|
In the majority of cases, this is the frequency of the last P-state
|
||||||
P-state requested by the driver from the hardware using the scaling
|
requested by the scaling driver from the hardware using the scaling
|
||||||
interface provided by it, which may or may not reflect the frequency
|
interface provided by it, which may or may not reflect the frequency
|
||||||
the CPU is actually running at (due to hardware design and other
|
the CPU is actually running at (due to hardware design and other
|
||||||
limitations).
|
limitations).
|
||||||
|
|
||||||
Some scaling drivers (e.g. |intel_pstate|) attempt to provide
|
Some architectures (e.g. ``x86``) may attempt to provide information
|
||||||
information more precisely reflecting the current CPU frequency through
|
more precisely reflecting the current CPU frequency through this
|
||||||
this attribute, but that still may not be the exact current CPU
|
attribute, but that still may not be the exact current CPU frequency as
|
||||||
frequency as seen by the hardware at the moment.
|
seen by the hardware at the moment.
|
||||||
|
|
||||||
``scaling_driver``
|
``scaling_driver``
|
||||||
The scaling driver currently in use.
|
The scaling driver currently in use.
|
||||||
|
|
|
@ -157,10 +157,8 @@ Without HWP, this P-state selection algorithm is always the same regardless of
|
||||||
the processor model and platform configuration.
|
the processor model and platform configuration.
|
||||||
|
|
||||||
It selects the maximum P-state it is allowed to use, subject to limits set via
|
It selects the maximum P-state it is allowed to use, subject to limits set via
|
||||||
``sysfs``, every time the P-state selection computations are carried out by the
|
``sysfs``, every time the driver configuration for the given CPU is updated
|
||||||
driver's utilization update callback for the given CPU (that does not happen
|
(e.g. via ``sysfs``).
|
||||||
more often than every 10 ms), but the hardware configuration will not be changed
|
|
||||||
if the new P-state is the same as the current one.
|
|
||||||
|
|
||||||
This is the default P-state selection algorithm if the
|
This is the default P-state selection algorithm if the
|
||||||
:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option
|
:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option
|
||||||
|
|
|
@ -167,7 +167,8 @@ static int __init ffh_cstate_init(void)
|
||||||
{
|
{
|
||||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||||
|
|
||||||
if (c->x86_vendor != X86_VENDOR_INTEL)
|
if (c->x86_vendor != X86_VENDOR_INTEL &&
|
||||||
|
c->x86_vendor != X86_VENDOR_AMD)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
cpu_cstate_entry = alloc_percpu(struct cstate_entry);
|
cpu_cstate_entry = alloc_percpu(struct cstate_entry);
|
||||||
|
|
|
@ -21,6 +21,7 @@ obj-y += common.o
|
||||||
obj-y += rdrand.o
|
obj-y += rdrand.o
|
||||||
obj-y += match.o
|
obj-y += match.o
|
||||||
obj-y += bugs.o
|
obj-y += bugs.o
|
||||||
|
obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
|
||||||
|
|
||||||
obj-$(CONFIG_PROC_FS) += proc.o
|
obj-$(CONFIG_PROC_FS) += proc.o
|
||||||
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
|
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
|
||||||
|
|
|
@ -0,0 +1,79 @@
|
||||||
|
/*
|
||||||
|
* x86 APERF/MPERF KHz calculation for
|
||||||
|
* /sys/.../cpufreq/scaling_cur_freq
|
||||||
|
*
|
||||||
|
* Copyright (C) 2017 Intel Corp.
|
||||||
|
* Author: Len Brown <len.brown@intel.com>
|
||||||
|
*
|
||||||
|
* This file is licensed under GPLv2.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/jiffies.h>
|
||||||
|
#include <linux/math64.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
|
||||||
|
struct aperfmperf_sample {
|
||||||
|
unsigned int khz;
|
||||||
|
unsigned long jiffies;
|
||||||
|
u64 aperf;
|
||||||
|
u64 mperf;
|
||||||
|
};
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* aperfmperf_snapshot_khz()
|
||||||
|
* On the current CPU, snapshot APERF, MPERF, and jiffies
|
||||||
|
* unless we already did it within 10ms
|
||||||
|
* calculate kHz, save snapshot
|
||||||
|
*/
|
||||||
|
static void aperfmperf_snapshot_khz(void *dummy)
|
||||||
|
{
|
||||||
|
u64 aperf, aperf_delta;
|
||||||
|
u64 mperf, mperf_delta;
|
||||||
|
struct aperfmperf_sample *s = this_cpu_ptr(&samples);
|
||||||
|
|
||||||
|
/* Don't bother re-computing within 10 ms */
|
||||||
|
if (time_before(jiffies, s->jiffies + HZ/100))
|
||||||
|
return;
|
||||||
|
|
||||||
|
rdmsrl(MSR_IA32_APERF, aperf);
|
||||||
|
rdmsrl(MSR_IA32_MPERF, mperf);
|
||||||
|
|
||||||
|
aperf_delta = aperf - s->aperf;
|
||||||
|
mperf_delta = mperf - s->mperf;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There is no architectural guarantee that MPERF
|
||||||
|
* increments faster than we can read it.
|
||||||
|
*/
|
||||||
|
if (mperf_delta == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if (cpu_khz * aperf_delta) fits into ULLONG_MAX, then
|
||||||
|
* khz = (cpu_khz * aperf_delta) / mperf_delta
|
||||||
|
*/
|
||||||
|
if (div64_u64(ULLONG_MAX, cpu_khz) > aperf_delta)
|
||||||
|
s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
|
||||||
|
else /* khz = aperf_delta / (mperf_delta / cpu_khz) */
|
||||||
|
s->khz = div64_u64(aperf_delta,
|
||||||
|
div64_u64(mperf_delta, cpu_khz));
|
||||||
|
s->jiffies = jiffies;
|
||||||
|
s->aperf = aperf;
|
||||||
|
s->mperf = mperf;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int arch_freq_get_on_cpu(int cpu)
|
||||||
|
{
|
||||||
|
if (!cpu_khz)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!static_cpu_has(X86_FEATURE_APERFMPERF))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
|
||||||
|
|
||||||
|
return per_cpu(samples.khz, cpu);
|
||||||
|
}
|
|
@ -2,7 +2,6 @@
|
||||||
#include <linux/timex.h>
|
#include <linux/timex.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/cpufreq.h>
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get CPU information for use by the procfs.
|
* Get CPU information for use by the procfs.
|
||||||
|
@ -76,14 +75,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
if (c->microcode)
|
if (c->microcode)
|
||||||
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
|
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
|
||||||
|
|
||||||
if (cpu_has(c, X86_FEATURE_TSC)) {
|
if (cpu_has(c, X86_FEATURE_TSC))
|
||||||
unsigned int freq = cpufreq_quick_get(cpu);
|
|
||||||
|
|
||||||
if (!freq)
|
|
||||||
freq = cpu_khz;
|
|
||||||
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
|
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
|
||||||
freq / 1000, (freq % 1000));
|
cpu_khz / 1000, (cpu_khz % 1000));
|
||||||
}
|
|
||||||
|
|
||||||
/* Cache size */
|
/* Cache size */
|
||||||
if (c->x86_cache_size >= 0)
|
if (c->x86_cache_size >= 0)
|
||||||
|
|
|
@ -144,10 +144,23 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||||
|
|
||||||
cppc_dmi_max_khz = cppc_get_dmi_max_khz();
|
cppc_dmi_max_khz = cppc_get_dmi_max_khz();
|
||||||
|
|
||||||
policy->min = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz / cpu->perf_caps.highest_perf;
|
/*
|
||||||
|
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
|
||||||
|
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
|
||||||
|
*/
|
||||||
|
policy->min = cpu->perf_caps.lowest_nonlinear_perf * cppc_dmi_max_khz /
|
||||||
|
cpu->perf_caps.highest_perf;
|
||||||
policy->max = cppc_dmi_max_khz;
|
policy->max = cppc_dmi_max_khz;
|
||||||
policy->cpuinfo.min_freq = policy->min;
|
|
||||||
policy->cpuinfo.max_freq = policy->max;
|
/*
|
||||||
|
* Set cpuinfo.min_freq to Lowest to make the full range of performance
|
||||||
|
* available if userspace wants to use any perf between lowest & lowest
|
||||||
|
* nonlinear perf
|
||||||
|
*/
|
||||||
|
policy->cpuinfo.min_freq = cpu->perf_caps.lowest_perf * cppc_dmi_max_khz /
|
||||||
|
cpu->perf_caps.highest_perf;
|
||||||
|
policy->cpuinfo.max_freq = cppc_dmi_max_khz;
|
||||||
|
|
||||||
policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
|
policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
|
||||||
policy->shared_type = cpu->shared_type;
|
policy->shared_type = cpu->shared_type;
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ static const struct of_device_id machines[] __initconst = {
|
||||||
{ .compatible = "arm,integrator-ap", },
|
{ .compatible = "arm,integrator-ap", },
|
||||||
{ .compatible = "arm,integrator-cp", },
|
{ .compatible = "arm,integrator-cp", },
|
||||||
|
|
||||||
|
{ .compatible = "hisilicon,hi3660", },
|
||||||
{ .compatible = "hisilicon,hi6220", },
|
{ .compatible = "hisilicon,hi6220", },
|
||||||
|
|
||||||
{ .compatible = "fsl,imx27", },
|
{ .compatible = "fsl,imx27", },
|
||||||
|
|
|
@ -632,11 +632,21 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
|
||||||
show_one(scaling_min_freq, min);
|
show_one(scaling_min_freq, min);
|
||||||
show_one(scaling_max_freq, max);
|
show_one(scaling_max_freq, max);
|
||||||
|
|
||||||
|
__weak unsigned int arch_freq_get_on_cpu(int cpu)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
|
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
|
||||||
{
|
{
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
unsigned int freq;
|
||||||
|
|
||||||
if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
|
freq = arch_freq_get_on_cpu(policy->cpu);
|
||||||
|
if (freq)
|
||||||
|
ret = sprintf(buf, "%u\n", freq);
|
||||||
|
else if (cpufreq_driver && cpufreq_driver->setpolicy &&
|
||||||
|
cpufreq_driver->get)
|
||||||
ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
|
ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
|
||||||
else
|
else
|
||||||
ret = sprintf(buf, "%u\n", policy->cur);
|
ret = sprintf(buf, "%u\n", policy->cur);
|
||||||
|
|
|
@ -173,12 +173,12 @@ static void exynos_enable_dvfs(unsigned int cur_frequency)
|
||||||
/* Enable PSTATE Change Event */
|
/* Enable PSTATE Change Event */
|
||||||
tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
|
tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
|
||||||
tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
|
tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
|
||||||
__raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
|
__raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
|
||||||
|
|
||||||
/* Enable PSTATE Change IRQ */
|
/* Enable PSTATE Change IRQ */
|
||||||
tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
|
tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
|
||||||
tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
|
tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
|
||||||
__raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
|
__raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
|
||||||
|
|
||||||
/* Set initial performance index */
|
/* Set initial performance index */
|
||||||
cpufreq_for_each_entry(pos, freq_table)
|
cpufreq_for_each_entry(pos, freq_table)
|
||||||
|
@ -330,7 +330,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
||||||
struct resource res;
|
struct resource res;
|
||||||
unsigned int cur_frequency;
|
unsigned int cur_frequency;
|
||||||
|
|
||||||
np = pdev->dev.of_node;
|
np = pdev->dev.of_node;
|
||||||
if (!np)
|
if (!np)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
|
|
@ -101,7 +101,8 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
|
||||||
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
|
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
|
||||||
* - Disable pll2_pfd2_396m_clk
|
* - Disable pll2_pfd2_396m_clk
|
||||||
*/
|
*/
|
||||||
if (of_machine_is_compatible("fsl,imx6ul")) {
|
if (of_machine_is_compatible("fsl,imx6ul") ||
|
||||||
|
of_machine_is_compatible("fsl,imx6ull")) {
|
||||||
/*
|
/*
|
||||||
* When changing pll1_sw_clk's parent to pll1_sys_clk,
|
* When changing pll1_sw_clk's parent to pll1_sys_clk,
|
||||||
* CPU may run at higher than 528MHz, this will lead to
|
* CPU may run at higher than 528MHz, this will lead to
|
||||||
|
@ -215,7 +216,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
||||||
goto put_clk;
|
goto put_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (of_machine_is_compatible("fsl,imx6ul")) {
|
if (of_machine_is_compatible("fsl,imx6ul") ||
|
||||||
|
of_machine_is_compatible("fsl,imx6ull")) {
|
||||||
pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
|
pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
|
||||||
secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
|
secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
|
||||||
if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
|
if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
|
||||||
|
|
|
@ -231,10 +231,8 @@ struct global_params {
|
||||||
* @prev_cummulative_iowait: IO Wait time difference from last and
|
* @prev_cummulative_iowait: IO Wait time difference from last and
|
||||||
* current sample
|
* current sample
|
||||||
* @sample: Storage for storing last Sample data
|
* @sample: Storage for storing last Sample data
|
||||||
* @min_perf: Minimum capacity limit as a fraction of the maximum
|
* @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios
|
||||||
* turbo P-state capacity.
|
* @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios
|
||||||
* @max_perf: Maximum capacity limit as a fraction of the maximum
|
|
||||||
* turbo P-state capacity.
|
|
||||||
* @acpi_perf_data: Stores ACPI perf information read from _PSS
|
* @acpi_perf_data: Stores ACPI perf information read from _PSS
|
||||||
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
|
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
|
||||||
* @epp_powersave: Last saved HWP energy performance preference
|
* @epp_powersave: Last saved HWP energy performance preference
|
||||||
|
@ -266,8 +264,8 @@ struct cpudata {
|
||||||
u64 prev_tsc;
|
u64 prev_tsc;
|
||||||
u64 prev_cummulative_iowait;
|
u64 prev_cummulative_iowait;
|
||||||
struct sample sample;
|
struct sample sample;
|
||||||
int32_t min_perf;
|
int32_t min_perf_ratio;
|
||||||
int32_t max_perf;
|
int32_t max_perf_ratio;
|
||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
struct acpi_processor_performance acpi_perf_data;
|
struct acpi_processor_performance acpi_perf_data;
|
||||||
bool valid_pss_table;
|
bool valid_pss_table;
|
||||||
|
@ -790,25 +788,32 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
|
||||||
|
int *current_max)
|
||||||
|
{
|
||||||
|
u64 cap;
|
||||||
|
|
||||||
|
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
|
||||||
|
if (global.no_turbo)
|
||||||
|
*current_max = HWP_GUARANTEED_PERF(cap);
|
||||||
|
else
|
||||||
|
*current_max = HWP_HIGHEST_PERF(cap);
|
||||||
|
|
||||||
|
*phy_max = HWP_HIGHEST_PERF(cap);
|
||||||
|
}
|
||||||
|
|
||||||
static void intel_pstate_hwp_set(unsigned int cpu)
|
static void intel_pstate_hwp_set(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct cpudata *cpu_data = all_cpu_data[cpu];
|
struct cpudata *cpu_data = all_cpu_data[cpu];
|
||||||
int min, hw_min, max, hw_max;
|
int max, min;
|
||||||
u64 value, cap;
|
u64 value;
|
||||||
s16 epp;
|
s16 epp;
|
||||||
|
|
||||||
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
|
max = cpu_data->max_perf_ratio;
|
||||||
hw_min = HWP_LOWEST_PERF(cap);
|
min = cpu_data->min_perf_ratio;
|
||||||
if (global.no_turbo)
|
|
||||||
hw_max = HWP_GUARANTEED_PERF(cap);
|
|
||||||
else
|
|
||||||
hw_max = HWP_HIGHEST_PERF(cap);
|
|
||||||
|
|
||||||
max = fp_ext_toint(hw_max * cpu_data->max_perf);
|
|
||||||
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
|
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||||
min = max;
|
min = max;
|
||||||
else
|
|
||||||
min = fp_ext_toint(hw_max * cpu_data->min_perf);
|
|
||||||
|
|
||||||
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
|
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
|
||||||
|
|
||||||
|
@ -1524,8 +1529,7 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
|
||||||
|
|
||||||
update_turbo_state();
|
update_turbo_state();
|
||||||
pstate = intel_pstate_get_base_pstate(cpu);
|
pstate = intel_pstate_get_base_pstate(cpu);
|
||||||
pstate = max(cpu->pstate.min_pstate,
|
pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
|
||||||
fp_ext_toint(pstate * cpu->max_perf));
|
|
||||||
intel_pstate_set_pstate(cpu, pstate);
|
intel_pstate_set_pstate(cpu, pstate);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1612,9 +1616,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
|
||||||
int32_t busy_frac, boost;
|
int32_t busy_frac, boost;
|
||||||
int target, avg_pstate;
|
int target, avg_pstate;
|
||||||
|
|
||||||
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
|
|
||||||
return cpu->pstate.turbo_pstate;
|
|
||||||
|
|
||||||
busy_frac = div_fp(sample->mperf, sample->tsc);
|
busy_frac = div_fp(sample->mperf, sample->tsc);
|
||||||
|
|
||||||
boost = cpu->iowait_boost;
|
boost = cpu->iowait_boost;
|
||||||
|
@ -1651,9 +1652,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
|
||||||
int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
|
int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
|
||||||
u64 duration_ns;
|
u64 duration_ns;
|
||||||
|
|
||||||
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
|
|
||||||
return cpu->pstate.turbo_pstate;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* perf_scaled is the ratio of the average P-state during the last
|
* perf_scaled is the ratio of the average P-state during the last
|
||||||
* sampling period to the P-state requested last time (in percent).
|
* sampling period to the P-state requested last time (in percent).
|
||||||
|
@ -1691,9 +1689,8 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
|
||||||
int max_pstate = intel_pstate_get_base_pstate(cpu);
|
int max_pstate = intel_pstate_get_base_pstate(cpu);
|
||||||
int min_pstate;
|
int min_pstate;
|
||||||
|
|
||||||
min_pstate = max(cpu->pstate.min_pstate,
|
min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
|
||||||
fp_ext_toint(max_pstate * cpu->min_perf));
|
max_pstate = max(min_pstate, cpu->max_perf_ratio);
|
||||||
max_pstate = max(min_pstate, fp_ext_toint(max_pstate * cpu->max_perf));
|
|
||||||
return clamp_t(int, pstate, min_pstate, max_pstate);
|
return clamp_t(int, pstate, min_pstate, max_pstate);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1729,16 +1726,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
|
||||||
fp_toint(cpu->iowait_boost * 100));
|
fp_toint(cpu->iowait_boost * 100));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_pstate_update_util_hwp(struct update_util_data *data,
|
|
||||||
u64 time, unsigned int flags)
|
|
||||||
{
|
|
||||||
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
|
|
||||||
u64 delta_ns = time - cpu->sample.time;
|
|
||||||
|
|
||||||
if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL)
|
|
||||||
intel_pstate_sample(cpu, time);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void intel_pstate_update_util_pid(struct update_util_data *data,
|
static void intel_pstate_update_util_pid(struct update_util_data *data,
|
||||||
u64 time, unsigned int flags)
|
u64 time, unsigned int flags)
|
||||||
{
|
{
|
||||||
|
@ -1930,6 +1917,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
|
||||||
{
|
{
|
||||||
struct cpudata *cpu = all_cpu_data[cpu_num];
|
struct cpudata *cpu = all_cpu_data[cpu_num];
|
||||||
|
|
||||||
|
if (hwp_active)
|
||||||
|
return;
|
||||||
|
|
||||||
if (cpu->update_util_set)
|
if (cpu->update_util_set)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -1963,52 +1953,61 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||||
{
|
{
|
||||||
int max_freq = intel_pstate_get_max_freq(cpu);
|
int max_freq = intel_pstate_get_max_freq(cpu);
|
||||||
int32_t max_policy_perf, min_policy_perf;
|
int32_t max_policy_perf, min_policy_perf;
|
||||||
|
int max_state, turbo_max;
|
||||||
|
|
||||||
max_policy_perf = div_ext_fp(policy->max, max_freq);
|
/*
|
||||||
max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
|
* HWP needs some special consideration, because on BDX the
|
||||||
|
* HWP_REQUEST uses abstract value to represent performance
|
||||||
|
* rather than pure ratios.
|
||||||
|
*/
|
||||||
|
if (hwp_active) {
|
||||||
|
intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
|
||||||
|
} else {
|
||||||
|
max_state = intel_pstate_get_base_pstate(cpu);
|
||||||
|
turbo_max = cpu->pstate.turbo_pstate;
|
||||||
|
}
|
||||||
|
|
||||||
|
max_policy_perf = max_state * policy->max / max_freq;
|
||||||
if (policy->max == policy->min) {
|
if (policy->max == policy->min) {
|
||||||
min_policy_perf = max_policy_perf;
|
min_policy_perf = max_policy_perf;
|
||||||
} else {
|
} else {
|
||||||
min_policy_perf = div_ext_fp(policy->min, max_freq);
|
min_policy_perf = max_state * policy->min / max_freq;
|
||||||
min_policy_perf = clamp_t(int32_t, min_policy_perf,
|
min_policy_perf = clamp_t(int32_t, min_policy_perf,
|
||||||
0, max_policy_perf);
|
0, max_policy_perf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
|
||||||
|
policy->cpu, max_state,
|
||||||
|
min_policy_perf, max_policy_perf);
|
||||||
|
|
||||||
/* Normalize user input to [min_perf, max_perf] */
|
/* Normalize user input to [min_perf, max_perf] */
|
||||||
if (per_cpu_limits) {
|
if (per_cpu_limits) {
|
||||||
cpu->min_perf = min_policy_perf;
|
cpu->min_perf_ratio = min_policy_perf;
|
||||||
cpu->max_perf = max_policy_perf;
|
cpu->max_perf_ratio = max_policy_perf;
|
||||||
} else {
|
} else {
|
||||||
int32_t global_min, global_max;
|
int32_t global_min, global_max;
|
||||||
|
|
||||||
/* Global limits are in percent of the maximum turbo P-state. */
|
/* Global limits are in percent of the maximum turbo P-state. */
|
||||||
global_max = percent_ext_fp(global.max_perf_pct);
|
global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
|
||||||
global_min = percent_ext_fp(global.min_perf_pct);
|
global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
|
||||||
if (max_freq != cpu->pstate.turbo_freq) {
|
|
||||||
int32_t turbo_factor;
|
|
||||||
|
|
||||||
turbo_factor = div_ext_fp(cpu->pstate.turbo_pstate,
|
|
||||||
cpu->pstate.max_pstate);
|
|
||||||
global_min = mul_ext_fp(global_min, turbo_factor);
|
|
||||||
global_max = mul_ext_fp(global_max, turbo_factor);
|
|
||||||
}
|
|
||||||
global_min = clamp_t(int32_t, global_min, 0, global_max);
|
global_min = clamp_t(int32_t, global_min, 0, global_max);
|
||||||
|
|
||||||
cpu->min_perf = max(min_policy_perf, global_min);
|
pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
|
||||||
cpu->min_perf = min(cpu->min_perf, max_policy_perf);
|
global_min, global_max);
|
||||||
cpu->max_perf = min(max_policy_perf, global_max);
|
|
||||||
cpu->max_perf = max(min_policy_perf, cpu->max_perf);
|
cpu->min_perf_ratio = max(min_policy_perf, global_min);
|
||||||
|
cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
|
||||||
|
cpu->max_perf_ratio = min(max_policy_perf, global_max);
|
||||||
|
cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
|
||||||
|
|
||||||
/* Make sure min_perf <= max_perf */
|
/* Make sure min_perf <= max_perf */
|
||||||
cpu->min_perf = min(cpu->min_perf, cpu->max_perf);
|
cpu->min_perf_ratio = min(cpu->min_perf_ratio,
|
||||||
|
cpu->max_perf_ratio);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
|
||||||
cpu->max_perf = round_up(cpu->max_perf, EXT_FRAC_BITS);
|
cpu->max_perf_ratio,
|
||||||
cpu->min_perf = round_up(cpu->min_perf, EXT_FRAC_BITS);
|
cpu->min_perf_ratio);
|
||||||
|
|
||||||
pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
|
|
||||||
fp_ext_toint(cpu->max_perf * 100),
|
|
||||||
fp_ext_toint(cpu->min_perf * 100));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||||
|
@ -2035,10 +2034,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||||
*/
|
*/
|
||||||
intel_pstate_clear_update_util_hook(policy->cpu);
|
intel_pstate_clear_update_util_hook(policy->cpu);
|
||||||
intel_pstate_max_within_limits(cpu);
|
intel_pstate_max_within_limits(cpu);
|
||||||
|
} else {
|
||||||
|
intel_pstate_set_update_util_hook(policy->cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_pstate_set_update_util_hook(policy->cpu);
|
|
||||||
|
|
||||||
if (hwp_active)
|
if (hwp_active)
|
||||||
intel_pstate_hwp_set(policy->cpu);
|
intel_pstate_hwp_set(policy->cpu);
|
||||||
|
|
||||||
|
@ -2111,8 +2110,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||||
|
|
||||||
cpu = all_cpu_data[policy->cpu];
|
cpu = all_cpu_data[policy->cpu];
|
||||||
|
|
||||||
cpu->max_perf = int_ext_tofp(1);
|
cpu->max_perf_ratio = 0xFF;
|
||||||
cpu->min_perf = 0;
|
cpu->min_perf_ratio = 0;
|
||||||
|
|
||||||
policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
||||||
policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
||||||
|
@ -2554,7 +2553,6 @@ static int __init intel_pstate_init(void)
|
||||||
} else {
|
} else {
|
||||||
hwp_active++;
|
hwp_active++;
|
||||||
intel_pstate.attr = hwp_cpufreq_attrs;
|
intel_pstate.attr = hwp_cpufreq_attrs;
|
||||||
pstate_funcs.update_util = intel_pstate_update_util_hwp;
|
|
||||||
goto hwp_cpu_matched;
|
goto hwp_cpu_matched;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
|
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
|
|
||||||
struct cpufreq_frequency_table *freq_table;
|
static struct cpufreq_frequency_table *freq_table;
|
||||||
static struct sfi_freq_table_entry *sfi_cpufreq_array;
|
static struct sfi_freq_table_entry *sfi_cpufreq_array;
|
||||||
static int num_freq_table_entries;
|
static int num_freq_table_entries;
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
config ARM_CPUIDLE
|
config ARM_CPUIDLE
|
||||||
bool "Generic ARM/ARM64 CPU idle Driver"
|
bool "Generic ARM/ARM64 CPU idle Driver"
|
||||||
select DT_IDLE_STATES
|
select DT_IDLE_STATES
|
||||||
|
select CPU_IDLE_MULTIPLE_DRIVERS
|
||||||
help
|
help
|
||||||
Select this to enable generic cpuidle driver for ARM.
|
Select this to enable generic cpuidle driver for ARM.
|
||||||
It provides a generic idle driver whose idle states are configured
|
It provides a generic idle driver whose idle states are configured
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/topology.h>
|
||||||
|
|
||||||
#include <asm/cpuidle.h>
|
#include <asm/cpuidle.h>
|
||||||
|
|
||||||
|
@ -44,7 +45,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
|
||||||
return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
|
return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cpuidle_driver arm_idle_driver = {
|
static struct cpuidle_driver arm_idle_driver __initdata = {
|
||||||
.name = "arm_idle",
|
.name = "arm_idle",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
/*
|
/*
|
||||||
|
@ -80,30 +81,42 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
|
||||||
static int __init arm_idle_init(void)
|
static int __init arm_idle_init(void)
|
||||||
{
|
{
|
||||||
int cpu, ret;
|
int cpu, ret;
|
||||||
struct cpuidle_driver *drv = &arm_idle_driver;
|
struct cpuidle_driver *drv;
|
||||||
struct cpuidle_device *dev;
|
struct cpuidle_device *dev;
|
||||||
|
|
||||||
/*
|
|
||||||
* Initialize idle states data, starting at index 1.
|
|
||||||
* This driver is DT only, if no DT idle states are detected (ret == 0)
|
|
||||||
* let the driver initialization fail accordingly since there is no
|
|
||||||
* reason to initialize the idle driver if only wfi is supported.
|
|
||||||
*/
|
|
||||||
ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
|
|
||||||
if (ret <= 0)
|
|
||||||
return ret ? : -ENODEV;
|
|
||||||
|
|
||||||
ret = cpuidle_register_driver(drv);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("Failed to register cpuidle driver\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Call arch CPU operations in order to initialize
|
|
||||||
* idle states suspend back-end specific data
|
|
||||||
*/
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
|
|
||||||
|
drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
|
||||||
|
if (!drv) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
drv->cpumask = (struct cpumask *)cpumask_of(cpu);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize idle states data, starting at index 1. This
|
||||||
|
* driver is DT only, if no DT idle states are detected (ret
|
||||||
|
* == 0) let the driver initialization fail accordingly since
|
||||||
|
* there is no reason to initialize the idle driver if only
|
||||||
|
* wfi is supported.
|
||||||
|
*/
|
||||||
|
ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
|
||||||
|
if (ret <= 0) {
|
||||||
|
ret = ret ? : -ENODEV;
|
||||||
|
goto out_fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = cpuidle_register_driver(drv);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("Failed to register cpuidle driver\n");
|
||||||
|
goto out_fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Call arch CPU operations in order to initialize
|
||||||
|
* idle states suspend back-end specific data
|
||||||
|
*/
|
||||||
ret = arm_cpuidle_init(cpu);
|
ret = arm_cpuidle_init(cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -141,10 +154,11 @@ out_fail:
|
||||||
dev = per_cpu(cpuidle_devices, cpu);
|
dev = per_cpu(cpuidle_devices, cpu);
|
||||||
cpuidle_unregister_device(dev);
|
cpuidle_unregister_device(dev);
|
||||||
kfree(dev);
|
kfree(dev);
|
||||||
|
drv = cpuidle_get_driver();
|
||||||
|
cpuidle_unregister_driver(drv);
|
||||||
|
kfree(drv);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpuidle_unregister_driver(drv);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
device_initcall(arm_idle_init);
|
device_initcall(arm_idle_init);
|
||||||
|
|
|
@ -286,6 +286,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
struct device *device = get_cpu_device(dev->cpu);
|
struct device *device = get_cpu_device(dev->cpu);
|
||||||
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
||||||
int i;
|
int i;
|
||||||
|
int first_idx;
|
||||||
|
int idx;
|
||||||
unsigned int interactivity_req;
|
unsigned int interactivity_req;
|
||||||
unsigned int expected_interval;
|
unsigned int expected_interval;
|
||||||
unsigned long nr_iowaiters, cpu_load;
|
unsigned long nr_iowaiters, cpu_load;
|
||||||
|
@ -335,11 +337,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
if (data->next_timer_us > polling_threshold &&
|
if (data->next_timer_us > polling_threshold &&
|
||||||
latency_req > s->exit_latency && !s->disabled &&
|
latency_req > s->exit_latency && !s->disabled &&
|
||||||
!dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
|
!dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
|
||||||
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
|
first_idx = CPUIDLE_DRIVER_STATE_START;
|
||||||
else
|
else
|
||||||
data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
|
first_idx = CPUIDLE_DRIVER_STATE_START - 1;
|
||||||
} else {
|
} else {
|
||||||
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
|
first_idx = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -359,20 +361,28 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||||
* Find the idle state with the lowest power while satisfying
|
* Find the idle state with the lowest power while satisfying
|
||||||
* our constraints.
|
* our constraints.
|
||||||
*/
|
*/
|
||||||
for (i = data->last_state_idx + 1; i < drv->state_count; i++) {
|
idx = -1;
|
||||||
|
for (i = first_idx; i < drv->state_count; i++) {
|
||||||
struct cpuidle_state *s = &drv->states[i];
|
struct cpuidle_state *s = &drv->states[i];
|
||||||
struct cpuidle_state_usage *su = &dev->states_usage[i];
|
struct cpuidle_state_usage *su = &dev->states_usage[i];
|
||||||
|
|
||||||
if (s->disabled || su->disable)
|
if (s->disabled || su->disable)
|
||||||
continue;
|
continue;
|
||||||
|
if (idx == -1)
|
||||||
|
idx = i; /* first enabled state */
|
||||||
if (s->target_residency > data->predicted_us)
|
if (s->target_residency > data->predicted_us)
|
||||||
break;
|
break;
|
||||||
if (s->exit_latency > latency_req)
|
if (s->exit_latency > latency_req)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
data->last_state_idx = i;
|
idx = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (idx == -1)
|
||||||
|
idx = 0; /* No states enabled. Must use 0. */
|
||||||
|
|
||||||
|
data->last_state_idx = idx;
|
||||||
|
|
||||||
return data->last_state_idx;
|
return data->last_state_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,6 +51,8 @@
|
||||||
/* un-comment DEBUG to enable pr_debug() statements */
|
/* un-comment DEBUG to enable pr_debug() statements */
|
||||||
#define DEBUG
|
#define DEBUG
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/cpuidle.h>
|
#include <linux/cpuidle.h>
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
|
@ -65,7 +67,6 @@
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
|
|
||||||
#define INTEL_IDLE_VERSION "0.4.1"
|
#define INTEL_IDLE_VERSION "0.4.1"
|
||||||
#define PREFIX "intel_idle: "
|
|
||||||
|
|
||||||
static struct cpuidle_driver intel_idle_driver = {
|
static struct cpuidle_driver intel_idle_driver = {
|
||||||
.name = "intel_idle",
|
.name = "intel_idle",
|
||||||
|
@ -1111,7 +1112,7 @@ static int __init intel_idle_probe(void)
|
||||||
const struct x86_cpu_id *id;
|
const struct x86_cpu_id *id;
|
||||||
|
|
||||||
if (max_cstate == 0) {
|
if (max_cstate == 0) {
|
||||||
pr_debug(PREFIX "disabled\n");
|
pr_debug("disabled\n");
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1119,8 +1120,8 @@ static int __init intel_idle_probe(void)
|
||||||
if (!id) {
|
if (!id) {
|
||||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||||
boot_cpu_data.x86 == 6)
|
boot_cpu_data.x86 == 6)
|
||||||
pr_debug(PREFIX "does not run on family %d model %d\n",
|
pr_debug("does not run on family %d model %d\n",
|
||||||
boot_cpu_data.x86, boot_cpu_data.x86_model);
|
boot_cpu_data.x86, boot_cpu_data.x86_model);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1134,13 +1135,13 @@ static int __init intel_idle_probe(void)
|
||||||
!mwait_substates)
|
!mwait_substates)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
|
pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
|
||||||
|
|
||||||
icpu = (const struct idle_cpu *)id->driver_data;
|
icpu = (const struct idle_cpu *)id->driver_data;
|
||||||
cpuidle_state_table = icpu->state_table;
|
cpuidle_state_table = icpu->state_table;
|
||||||
|
|
||||||
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
|
pr_debug("v" INTEL_IDLE_VERSION " model 0x%X\n",
|
||||||
" model 0x%X\n", boot_cpu_data.x86_model);
|
boot_cpu_data.x86_model);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1340,8 +1341,7 @@ static void __init intel_idle_cpuidle_driver_init(void)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (cstate + 1 > max_cstate) {
|
if (cstate + 1 > max_cstate) {
|
||||||
printk(PREFIX "max_cstate %d reached\n",
|
pr_info("max_cstate %d reached\n", max_cstate);
|
||||||
max_cstate);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1358,8 +1358,8 @@ static void __init intel_idle_cpuidle_driver_init(void)
|
||||||
|
|
||||||
/* if state marked as disabled, skip it */
|
/* if state marked as disabled, skip it */
|
||||||
if (cpuidle_state_table[cstate].disabled != 0) {
|
if (cpuidle_state_table[cstate].disabled != 0) {
|
||||||
pr_debug(PREFIX "state %s is disabled",
|
pr_debug("state %s is disabled\n",
|
||||||
cpuidle_state_table[cstate].name);
|
cpuidle_state_table[cstate].name);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1395,7 +1395,7 @@ static int intel_idle_cpu_init(unsigned int cpu)
|
||||||
dev->cpu = cpu;
|
dev->cpu = cpu;
|
||||||
|
|
||||||
if (cpuidle_register_device(dev)) {
|
if (cpuidle_register_device(dev)) {
|
||||||
pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
|
pr_debug("cpuidle_register_device %d failed!\n", cpu);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1447,8 +1447,8 @@ static int __init intel_idle_init(void)
|
||||||
retval = cpuidle_register_driver(&intel_idle_driver);
|
retval = cpuidle_register_driver(&intel_idle_driver);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
struct cpuidle_driver *drv = cpuidle_get_driver();
|
struct cpuidle_driver *drv = cpuidle_get_driver();
|
||||||
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
|
printk(KERN_DEBUG pr_fmt("intel_idle yielding to %s\n"),
|
||||||
drv ? drv->name : "none");
|
drv ? drv->name : "none");
|
||||||
goto init_driver_fail;
|
goto init_driver_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1460,8 +1460,8 @@ static int __init intel_idle_init(void)
|
||||||
if (retval < 0)
|
if (retval < 0)
|
||||||
goto hp_setup_fail;
|
goto hp_setup_fail;
|
||||||
|
|
||||||
pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
|
pr_debug("lapic_timer_reliable_states 0x%x\n",
|
||||||
lapic_timer_reliable_states);
|
lapic_timer_reliable_states);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -883,6 +883,8 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern unsigned int arch_freq_get_on_cpu(int cpu);
|
||||||
|
|
||||||
/* the following are really really optional */
|
/* the following are really really optional */
|
||||||
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
|
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
|
||||||
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
|
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
|
||||||
|
|
Loading…
Reference in New Issue