1
0
Fork 0

cpufreq: intel_pstate: Rework iowait boosting to be less aggressive

The current iowait boosting mechanism in intel_pstate_update_util()
is quite aggressive, as it goes to the maximum P-state right away,
and may cause excessive amounts of energy to be used, which is not
desirable and arguably isn't necessary too.

Follow commit a5a0809bc5 ("cpufreq: schedutil: Make iowait boost
more energy efficient") that reworked the analogous iowait boost
mechanism in the schedutil governor and make the iowait boosting
in intel_pstate_update_util() work along the same lines.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
hifive-unleashed-5.1
Rafael J. Wysocki 2019-02-07 12:51:04 +01:00
parent a8e1942d97
commit b8bd1581aa
1 changed files with 18 additions and 18 deletions

View File

@ -50,6 +50,8 @@
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS)
#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
#define EXT_BITS 6 #define EXT_BITS 6
#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
@ -1671,17 +1673,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu)
static inline int32_t get_target_pstate(struct cpudata *cpu) static inline int32_t get_target_pstate(struct cpudata *cpu)
{ {
struct sample *sample = &cpu->sample; struct sample *sample = &cpu->sample;
int32_t busy_frac, boost; int32_t busy_frac;
int target, avg_pstate; int target, avg_pstate;
busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
sample->tsc); sample->tsc);
boost = cpu->iowait_boost; if (busy_frac < cpu->iowait_boost)
cpu->iowait_boost >>= 1; busy_frac = cpu->iowait_boost;
if (busy_frac < boost)
busy_frac = boost;
sample->busy_scaled = busy_frac * 100; sample->busy_scaled = busy_frac * 100;
@ -1758,29 +1757,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
if (smp_processor_id() != cpu->cpu) if (smp_processor_id() != cpu->cpu)
return; return;
delta_ns = time - cpu->last_update;
if (flags & SCHED_CPUFREQ_IOWAIT) { if (flags & SCHED_CPUFREQ_IOWAIT) {
cpu->iowait_boost = int_tofp(1); /* Start over if the CPU may have been idle. */
cpu->last_update = time; if (delta_ns > TICK_NSEC) {
/* cpu->iowait_boost = ONE_EIGHTH_FP;
* The last time the busy was 100% so P-state was max anyway } else if (cpu->iowait_boost) {
* so avoid overhead of computation. cpu->iowait_boost <<= 1;
*/ if (cpu->iowait_boost > int_tofp(1))
if (fp_toint(cpu->sample.busy_scaled) == 100) cpu->iowait_boost = int_tofp(1);
return; } else {
cpu->iowait_boost = ONE_EIGHTH_FP;
goto set_pstate; }
} else if (cpu->iowait_boost) { } else if (cpu->iowait_boost) {
/* Clear iowait_boost if the CPU may have been idle. */ /* Clear iowait_boost if the CPU may have been idle. */
delta_ns = time - cpu->last_update;
if (delta_ns > TICK_NSEC) if (delta_ns > TICK_NSEC)
cpu->iowait_boost = 0; cpu->iowait_boost = 0;
else
cpu->iowait_boost >>= 1;
} }
cpu->last_update = time; cpu->last_update = time;
delta_ns = time - cpu->sample.time; delta_ns = time - cpu->sample.time;
if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
return; return;
set_pstate:
if (intel_pstate_sample(cpu, time)) if (intel_pstate_sample(cpu, time))
intel_pstate_adjust_pstate(cpu); intel_pstate_adjust_pstate(cpu);
} }