1
0
Fork 0

Merge branch 'pm-cpufreq'

* pm-cpufreq:
  intel_pstate: skip the driver if ACPI has power mgmt option
  cpufreq: ondemand: Remove redundant return statement
  cpufreq: move freq change notifications to cpufreq core
  cpufreq: distinguish drivers that do asynchronous notifications
  cpufreq/intel_pstate: Add static declarations to internal functions
  cpufreq: arm_big_little: reconfigure switcher behavior at run time
  cpufreq: arm_big_little: add in-kernel switching (IKS) support
  ARM: vexpress/TC2: register vexpress-spc cpufreq device
  cpufreq: arm_big_little: add vexpress SPC interface driver
  ARM: vexpress/TC2: add cpu clock support
  ARM: vexpress/TC2: add support for CPU DVFS
hifive-unleashed-5.1
Rafael J. Wysocki 2013-11-07 19:26:02 +01:00
commit 15d4cb9013
54 changed files with 1128 additions and 660 deletions

View File

@ -66,10 +66,22 @@ config ARCH_VEXPRESS_DCSCB
This is needed to provide CPU and cluster power management
on RTSM implementing big.LITTLE.
config ARCH_VEXPRESS_SPC
bool "Versatile Express Serial Power Controller (SPC)"
select ARCH_HAS_CPUFREQ
select ARCH_HAS_OPP
select PM_OPP
help
The TC2 (A15x2 A7x3) versatile express core tile integrates a logic
block called Serial Power Controller (SPC) that provides the interface
between the dual cluster test-chip and the M3 microcontroller that
carries out power management.
config ARCH_VEXPRESS_TC2_PM
bool "Versatile Express TC2 power management"
depends on MCPM
select ARM_CCI
select ARCH_VEXPRESS_SPC
help
Support for CPU and cluster power management on Versatile Express
with a TC2 (A15x2 A7x3) big.LITTLE core tile.

View File

@ -8,7 +8,8 @@ obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o
CFLAGS_dcscb.o += -march=armv7-a
obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o spc.o
obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o
obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o
CFLAGS_tc2_pm.o += -march=armv7-a
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o

View File

@ -17,14 +17,31 @@
* GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/semaphore.h>
#include <asm/cacheflush.h>
#define SPCLOG "vexpress-spc: "
#define PERF_LVL_A15 0x00
#define PERF_REQ_A15 0x04
#define PERF_LVL_A7 0x08
#define PERF_REQ_A7 0x0c
#define COMMS 0x10
#define COMMS_REQ 0x14
#define PWC_STATUS 0x18
#define PWC_FLAG 0x1c
/* SPC wake-up IRQs status and mask */
#define WAKE_INT_MASK 0x24
#define WAKE_INT_RAW 0x28
@ -36,12 +53,45 @@
#define A15_BX_ADDR0 0x68
#define A7_BX_ADDR0 0x78
/* SPC system config interface registers */
#define SYSCFG_WDATA 0x70
#define SYSCFG_RDATA 0x74
/* A15/A7 OPP virtual register base */
#define A15_PERFVAL_BASE 0xC10
#define A7_PERFVAL_BASE 0xC30
/* Config interface control bits */
#define SYSCFG_START (1 << 31)
#define SYSCFG_SCC (6 << 20)
#define SYSCFG_STAT (14 << 20)
/* wake-up interrupt masks */
#define GBL_WAKEUP_INT_MSK (0x3 << 10)
/* TC2 static dual-cluster configuration */
#define MAX_CLUSTERS 2
/*
* Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
* operation, the operation could start just before jiffie is about
* to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
*/
#define TIMEOUT_US 20000
#define MAX_OPPS 8
#define CA15_DVFS 0
#define CA7_DVFS 1
#define SPC_SYS_CFG 2
#define STAT_COMPLETE(type) ((1 << 0) << (type << 2))
#define STAT_ERR(type) ((1 << 1) << (type << 2))
#define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type))
struct ve_spc_opp {
unsigned long freq;
unsigned long u_volt;
};
struct ve_spc_drvdata {
void __iomem *baseaddr;
/*
@ -49,6 +99,12 @@ struct ve_spc_drvdata {
* It corresponds to A15 processors MPIDR[15:8] bitfield
*/
u32 a15_clusid;
uint32_t cur_rsp_mask;
uint32_t cur_rsp_stat;
struct semaphore sem;
struct completion done;
struct ve_spc_opp *opps[MAX_CLUSTERS];
int num_opps[MAX_CLUSTERS];
};
static struct ve_spc_drvdata *info;
@ -157,8 +213,197 @@ void ve_spc_powerdown(u32 cluster, bool enable)
writel_relaxed(enable, info->baseaddr + pwdrn_reg);
}
int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
static int ve_spc_get_performance(int cluster, u32 *freq)
{
struct ve_spc_opp *opps = info->opps[cluster];
u32 perf_cfg_reg = 0;
u32 perf;
perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
if (perf >= info->num_opps[cluster])
return -EINVAL;
opps += perf;
*freq = opps->freq;
return 0;
}
/* find closest match to given frequency in OPP table */
static int ve_spc_round_performance(int cluster, u32 freq)
{
int idx, max_opp = info->num_opps[cluster];
struct ve_spc_opp *opps = info->opps[cluster];
u32 fmin = 0, fmax = ~0, ftmp;
freq /= 1000; /* OPP entries in kHz */
for (idx = 0; idx < max_opp; idx++, opps++) {
ftmp = opps->freq;
if (ftmp >= freq) {
if (ftmp <= fmax)
fmax = ftmp;
} else {
if (ftmp >= fmin)
fmin = ftmp;
}
}
if (fmax != ~0)
return fmax * 1000;
else
return fmin * 1000;
}
static int ve_spc_find_performance_index(int cluster, u32 freq)
{
int idx, max_opp = info->num_opps[cluster];
struct ve_spc_opp *opps = info->opps[cluster];
for (idx = 0; idx < max_opp; idx++, opps++)
if (opps->freq == freq)
break;
return (idx == max_opp) ? -EINVAL : idx;
}
static int ve_spc_waitforcompletion(int req_type)
{
int ret = wait_for_completion_interruptible_timeout(
&info->done, usecs_to_jiffies(TIMEOUT_US));
if (ret == 0)
ret = -ETIMEDOUT;
else if (ret > 0)
ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
return ret;
}
static int ve_spc_set_performance(int cluster, u32 freq)
{
u32 perf_cfg_reg, perf_stat_reg;
int ret, perf, req_type;
if (cluster_is_a15(cluster)) {
req_type = CA15_DVFS;
perf_cfg_reg = PERF_LVL_A15;
perf_stat_reg = PERF_REQ_A15;
} else {
req_type = CA7_DVFS;
perf_cfg_reg = PERF_LVL_A7;
perf_stat_reg = PERF_REQ_A7;
}
perf = ve_spc_find_performance_index(cluster, freq);
if (perf < 0)
return perf;
if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
return -ETIME;
init_completion(&info->done);
info->cur_rsp_mask = RESPONSE_MASK(req_type);
writel(perf, info->baseaddr + perf_cfg_reg);
ret = ve_spc_waitforcompletion(req_type);
info->cur_rsp_mask = 0;
up(&info->sem);
return ret;
}
static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
{
int ret;
if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
return -ETIME;
init_completion(&info->done);
info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
/* Set the control value */
writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
if (ret == 0)
*data = readl(info->baseaddr + SYSCFG_RDATA);
info->cur_rsp_mask = 0;
up(&info->sem);
return ret;
}
static irqreturn_t ve_spc_irq_handler(int irq, void *data)
{
struct ve_spc_drvdata *drv_data = data;
uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
if (info->cur_rsp_mask & status) {
info->cur_rsp_stat = status;
complete(&drv_data->done);
}
return IRQ_HANDLED;
}
/*
* +--------------------------+
* | 31 20 | 19 0 |
* +--------------------------+
* | u_volt | freq(kHz) |
* +--------------------------+
*/
#define MULT_FACTOR 20
#define VOLT_SHIFT 20
#define FREQ_MASK (0xFFFFF)
static int ve_spc_populate_opps(uint32_t cluster)
{
uint32_t data = 0, off, ret, idx;
struct ve_spc_opp *opps;
opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL);
if (!opps)
return -ENOMEM;
info->opps[cluster] = opps;
off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
if (!ret) {
opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
opps->u_volt = data >> VOLT_SHIFT;
} else {
break;
}
}
info->num_opps[cluster] = idx;
return ret;
}
static int ve_init_opp_table(struct device *cpu_dev)
{
int cluster = topology_physical_package_id(cpu_dev->id);
int idx, ret = 0, max_opp = info->num_opps[cluster];
struct ve_spc_opp *opps = info->opps[cluster];
for (idx = 0; idx < max_opp; idx++, opps++) {
ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
if (ret) {
dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
opps->freq, opps->u_volt);
return ret;
}
}
return ret;
}
int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
{
int ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
pr_err(SPCLOG "unable to allocate mem\n");
@ -168,6 +413,25 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
info->baseaddr = baseaddr;
info->a15_clusid = a15_clusid;
if (irq <= 0) {
pr_err(SPCLOG "Invalid IRQ %d\n", irq);
kfree(info);
return -EINVAL;
}
init_completion(&info->done);
readl_relaxed(info->baseaddr + PWC_STATUS);
ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
| IRQF_ONESHOT, "vexpress-spc", info);
if (ret) {
pr_err(SPCLOG "IRQ %d request failed\n", irq);
kfree(info);
return -ENODEV;
}
sema_init(&info->sem, 1);
/*
* Multi-cluster systems may need this data when non-coherent, during
* cluster power-up/power-down. Make sure driver info reaches main
@ -178,3 +442,103 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
return 0;
}
struct clk_spc {
struct clk_hw hw;
int cluster;
};
#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
static unsigned long spc_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_spc *spc = to_clk_spc(hw);
u32 freq;
if (ve_spc_get_performance(spc->cluster, &freq))
return -EIO;
return freq * 1000;
}
static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
unsigned long *parent_rate)
{
struct clk_spc *spc = to_clk_spc(hw);
return ve_spc_round_performance(spc->cluster, drate);
}
static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_spc *spc = to_clk_spc(hw);
return ve_spc_set_performance(spc->cluster, rate / 1000);
}
static struct clk_ops clk_spc_ops = {
.recalc_rate = spc_recalc_rate,
.round_rate = spc_round_rate,
.set_rate = spc_set_rate,
};
static struct clk *ve_spc_clk_register(struct device *cpu_dev)
{
struct clk_init_data init;
struct clk_spc *spc;
spc = kzalloc(sizeof(*spc), GFP_KERNEL);
if (!spc) {
pr_err("could not allocate spc clk\n");
return ERR_PTR(-ENOMEM);
}
spc->hw.init = &init;
spc->cluster = topology_physical_package_id(cpu_dev->id);
init.name = dev_name(cpu_dev);
init.ops = &clk_spc_ops;
init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
init.num_parents = 0;
return devm_clk_register(cpu_dev, &spc->hw);
}
static int __init ve_spc_clk_init(void)
{
int cpu;
struct clk *clk;
if (!info)
return 0; /* Continue only if SPC is initialised */
if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
pr_err("failed to build OPP table\n");
return -ENODEV;
}
for_each_possible_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_warn("failed to get cpu%d device\n", cpu);
continue;
}
clk = ve_spc_clk_register(cpu_dev);
if (IS_ERR(clk)) {
pr_warn("failed to register cpu%d clock\n", cpu);
continue;
}
if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
pr_warn("failed to register cpu%d clock lookup\n", cpu);
continue;
}
if (ve_init_opp_table(cpu_dev))
pr_warn("failed to initialise cpu%d opp table\n", cpu);
}
platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
return 0;
}
module_init(ve_spc_clk_init);

View File

@ -15,7 +15,7 @@
#ifndef __SPC_H_
#define __SPC_H_
int __init ve_spc_init(void __iomem *base, u32 a15_clusid);
int __init ve_spc_init(void __iomem *base, u32 a15_clusid, int irq);
void ve_spc_global_wakeup_irq(bool set);
void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set);
void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr);

View File

@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/irqchip/arm-gic.h>
@ -311,7 +312,7 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
static int __init tc2_pm_init(void)
{
int ret;
int ret, irq;
void __iomem *scc;
u32 a15_cluster_id, a7_cluster_id, sys_info;
struct device_node *np;
@ -336,13 +337,15 @@ static int __init tc2_pm_init(void)
tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
irq = irq_of_parse_and_map(np, 0);
/*
* A subset of the SCC registers is also used to communicate
* with the SPC (power controller). We need to be able to
* drive it very early in the boot process to power up
* processors, so we initialize the SPC driver here.
*/
ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id);
ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
if (ret)
return ret;

View File

@ -224,3 +224,11 @@ config ARM_TEGRA_CPUFREQ
default y
help
This adds the CPUFreq driver support for TEGRA SOCs.
config ARM_VEXPRESS_SPC_CPUFREQ
tristate "Versatile Express SPC based CPUfreq driver"
select ARM_BIG_LITTLE_CPUFREQ
depends on ARCH_VEXPRESS_SPC
help
This add the CPUfreq driver support for Versatile Express
big.LITTLE platforms using SPC for power management.

View File

@ -74,6 +74,7 @@ obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
# PowerPC platform drivers

View File

@ -428,14 +428,10 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
struct acpi_processor_performance *perf;
struct cpufreq_freqs freqs;
struct drv_cmd cmd;
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
pr_debug("acpi_cpufreq_target %d (%d)\n",
data->freq_table[index].frequency, policy->cpu);
if (unlikely(data == NULL ||
data->acpi_data == NULL || data->freq_table == NULL)) {
return -ENODEV;
@ -483,23 +479,17 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
else
cmd.mask = cpumask_of(policy->cpu);
freqs.old = perf->states[perf->state].core_frequency * 1000;
freqs.new = data->freq_table[index].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
drv_write(&cmd);
if (acpi_pstate_strict) {
if (!check_freqs(cmd.mask, freqs.new, data)) {
if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
data)) {
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
result = -EAGAIN;
freqs.new = freqs.old;
}
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
if (!result)
perf->state = next_perf_state;

View File

@ -24,91 +24,319 @@
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>
#include <asm/bL_switcher.h>
#include "arm_big_little.h"
/* Currently we support only two clusters */
#define A15_CLUSTER 0
#define A7_CLUSTER 1
#define MAX_CLUSTERS 2
#ifdef CONFIG_BL_SWITCHER
static bool bL_switching_enabled;
#define is_bL_switching_enabled() bL_switching_enabled
#define set_switching_enabled(x) (bL_switching_enabled = (x))
#else
#define is_bL_switching_enabled() false
#define set_switching_enabled(x) do { } while (0)
#endif
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
static struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
static unsigned int bL_cpufreq_get(unsigned int cpu)
static unsigned int clk_big_min; /* (Big) clock frequencies */
static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
static DEFINE_PER_CPU(unsigned int, physical_cluster);
static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
static struct mutex cluster_lock[MAX_CLUSTERS];
static inline int raw_cpu_to_cluster(int cpu)
{
u32 cur_cluster = cpu_to_cluster(cpu);
return topology_physical_package_id(cpu);
}
return clk_get_rate(clk[cur_cluster]) / 1000;
static inline int cpu_to_cluster(int cpu)
{
return is_bL_switching_enabled() ?
MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
}
static unsigned int find_cluster_maxfreq(int cluster)
{
int j;
u32 max_freq = 0, cpu_freq;
for_each_online_cpu(j) {
cpu_freq = per_cpu(cpu_last_req_freq, j);
if ((cluster == per_cpu(physical_cluster, j)) &&
(max_freq < cpu_freq))
max_freq = cpu_freq;
}
pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
max_freq);
return max_freq;
}
static unsigned int clk_get_cpu_rate(unsigned int cpu)
{
u32 cur_cluster = per_cpu(physical_cluster, cpu);
u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
/* For switcher we use virtual A7 clock rates */
if (is_bL_switching_enabled())
rate = VIRT_FREQ(cur_cluster, rate);
pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
cur_cluster, rate);
return rate;
}
static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
{
if (is_bL_switching_enabled()) {
pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
cpu));
return per_cpu(cpu_last_req_freq, cpu);
} else {
return clk_get_cpu_rate(cpu);
}
}
static unsigned int
bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
{
u32 new_rate, prev_rate;
int ret;
bool bLs = is_bL_switching_enabled();
mutex_lock(&cluster_lock[new_cluster]);
if (bLs) {
prev_rate = per_cpu(cpu_last_req_freq, cpu);
per_cpu(cpu_last_req_freq, cpu) = rate;
per_cpu(physical_cluster, cpu) = new_cluster;
new_rate = find_cluster_maxfreq(new_cluster);
new_rate = ACTUAL_FREQ(new_cluster, new_rate);
} else {
new_rate = rate;
}
pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
__func__, cpu, old_cluster, new_cluster, new_rate);
ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
if (WARN_ON(ret)) {
pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
new_cluster);
if (bLs) {
per_cpu(cpu_last_req_freq, cpu) = prev_rate;
per_cpu(physical_cluster, cpu) = old_cluster;
}
mutex_unlock(&cluster_lock[new_cluster]);
return ret;
}
mutex_unlock(&cluster_lock[new_cluster]);
/* Recalc freq for old cluster when switching clusters */
if (old_cluster != new_cluster) {
pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
__func__, cpu, old_cluster, new_cluster);
/* Switch cluster */
bL_switch_request(cpu, new_cluster);
mutex_lock(&cluster_lock[old_cluster]);
/* Set freq of old cluster if there are cpus left on it */
new_rate = find_cluster_maxfreq(old_cluster);
new_rate = ACTUAL_FREQ(old_cluster, new_rate);
if (new_rate) {
pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
__func__, old_cluster, new_rate);
if (clk_set_rate(clk[old_cluster], new_rate * 1000))
pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
__func__, ret, old_cluster);
}
mutex_unlock(&cluster_lock[old_cluster]);
}
return 0;
}
/* Set clock frequency */
static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_freqs freqs;
u32 cpu = policy->cpu, cur_cluster;
int ret = 0;
u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
unsigned int freqs_new;
cur_cluster = cpu_to_cluster(policy->cpu);
cur_cluster = cpu_to_cluster(cpu);
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
freqs.old = bL_cpufreq_get(policy->cpu);
freqs.new = freq_table[cur_cluster][index].frequency;
freqs_new = freq_table[cur_cluster][index].frequency;
pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
__func__, cpu, cur_cluster, freqs.old, freqs.new,
freqs.new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
if (ret) {
pr_err("clk_set_rate failed: %d\n", ret);
freqs.new = freqs.old;
if (is_bL_switching_enabled()) {
if ((actual_cluster == A15_CLUSTER) &&
(freqs_new < clk_big_min)) {
new_cluster = A7_CLUSTER;
} else if ((actual_cluster == A7_CLUSTER) &&
(freqs_new > clk_little_max)) {
new_cluster = A15_CLUSTER;
}
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
}
return ret;
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
{
int count;
for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
;
return count;
}
/* get the minimum frequency in the cpufreq_frequency_table */
static inline u32 get_table_min(struct cpufreq_frequency_table *table)
{
int i;
uint32_t min_freq = ~0;
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
if (table[i].frequency < min_freq)
min_freq = table[i].frequency;
return min_freq;
}
/* get the maximum frequency in the cpufreq_frequency_table */
static inline u32 get_table_max(struct cpufreq_frequency_table *table)
{
int i;
uint32_t max_freq = 0;
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
if (table[i].frequency > max_freq)
max_freq = table[i].frequency;
return max_freq;
}
static int merge_cluster_tables(void)
{
int i, j, k = 0, count = 1;
struct cpufreq_frequency_table *table;
for (i = 0; i < MAX_CLUSTERS; i++)
count += get_table_count(freq_table[i]);
table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
if (!table)
return -ENOMEM;
freq_table[MAX_CLUSTERS] = table;
/* Add in reverse order to get freqs in increasing order */
for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
j++) {
table[k].frequency = VIRT_FREQ(i,
freq_table[i][j].frequency);
pr_debug("%s: index: %d, freq: %d\n", __func__, k,
table[k].frequency);
k++;
}
}
table[k].driver_data = k;
table[k].frequency = CPUFREQ_TABLE_END;
pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
return 0;
}
static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
if (!freq_table[cluster])
return;
clk_put(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
}
static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i;
if (!atomic_dec_return(&cluster_usage[cluster])) {
clk_put(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
if (atomic_dec_return(&cluster_usage[cluster]))
return;
if (cluster < MAX_CLUSTERS)
return _put_cluster_clk_and_freq_table(cpu_dev);
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
if (!cdev) {
pr_err("%s: failed to get cpu%d device\n", __func__, i);
return;
}
_put_cluster_clk_and_freq_table(cdev);
}
/* free virtual table */
kfree(freq_table[cluster]);
}
static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
char name[14] = "cpu-cluster.";
int ret;
if (atomic_inc_return(&cluster_usage[cluster]) != 1)
if (freq_table[cluster])
return 0;
ret = arm_bL_ops->init_opp_table(cpu_dev);
if (ret) {
dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
goto atomic_dec;
goto out;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
if (ret) {
dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
goto atomic_dec;
goto out;
}
name[12] = cluster + '0';
@ -125,13 +353,72 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
ret = PTR_ERR(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
atomic_dec:
atomic_dec(&cluster_usage[cluster]);
out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
return ret;
}
static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i, ret;
if (atomic_inc_return(&cluster_usage[cluster]) != 1)
return 0;
if (cluster < MAX_CLUSTERS) {
ret = _get_cluster_clk_and_freq_table(cpu_dev);
if (ret)
atomic_dec(&cluster_usage[cluster]);
return ret;
}
/*
* Get data for all clusters and fill virtual cluster with a merge of
* both
*/
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
if (!cdev) {
pr_err("%s: failed to get cpu%d device\n", __func__, i);
return -ENODEV;
}
ret = _get_cluster_clk_and_freq_table(cdev);
if (ret)
goto put_clusters;
}
ret = merge_cluster_tables();
if (ret)
goto put_clusters;
/* Assuming 2 cluster, set clk_big_min and clk_little_max */
clk_big_min = get_table_min(freq_table[0]);
clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
__func__, cluster, clk_big_min, clk_little_max);
return 0;
put_clusters:
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
if (!cdev) {
pr_err("%s: failed to get cpu%d device\n", __func__, i);
return -ENODEV;
}
_put_cluster_clk_and_freq_table(cdev);
}
atomic_dec(&cluster_usage[cluster]);
return ret;
}
/* Per-CPU initialization */
static int bL_cpufreq_init(struct cpufreq_policy *policy)
{
@ -158,13 +445,23 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
if (cur_cluster < MAX_CLUSTERS) {
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
per_cpu(physical_cluster, policy->cpu) = cur_cluster;
} else {
/* Assumption: during init, we are always running on A15 */
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
}
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency =
arm_bL_ops->get_transition_latency(cpu_dev);
else
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
@ -194,15 +491,47 @@ static struct cpufreq_driver bL_cpufreq_driver = {
CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = bL_cpufreq_set_target,
.get = bL_cpufreq_get,
.get = bL_cpufreq_get_rate,
.init = bL_cpufreq_init,
.exit = bL_cpufreq_exit,
.attr = cpufreq_generic_attr,
};
static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
unsigned long action, void *_arg)
{
pr_debug("%s: action: %ld\n", __func__, action);
switch (action) {
case BL_NOTIFY_PRE_ENABLE:
case BL_NOTIFY_PRE_DISABLE:
cpufreq_unregister_driver(&bL_cpufreq_driver);
break;
case BL_NOTIFY_POST_ENABLE:
set_switching_enabled(true);
cpufreq_register_driver(&bL_cpufreq_driver);
break;
case BL_NOTIFY_POST_DISABLE:
set_switching_enabled(false);
cpufreq_register_driver(&bL_cpufreq_driver);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static struct notifier_block bL_switcher_notifier = {
.notifier_call = bL_cpufreq_switcher_notifier,
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
{
int ret;
int ret, i;
if (arm_bL_ops) {
pr_debug("%s: Already registered: %s, exiting\n", __func__,
@ -217,16 +546,29 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
arm_bL_ops = ops;
ret = bL_switcher_get_enabled();
set_switching_enabled(ret);
for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]);
ret = cpufreq_register_driver(&bL_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",
__func__, ops->name, ret);
arm_bL_ops = NULL;
} else {
pr_info("%s: Registered platform driver: %s\n", __func__,
ops->name);
ret = bL_switcher_register_notifier(&bL_switcher_notifier);
if (ret) {
cpufreq_unregister_driver(&bL_cpufreq_driver);
arm_bL_ops = NULL;
} else {
pr_info("%s: Registered platform driver: %s\n",
__func__, ops->name);
}
}
bL_switcher_put_enabled();
return ret;
}
EXPORT_SYMBOL_GPL(bL_cpufreq_register);
@ -239,7 +581,10 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
return;
}
bL_switcher_get_enabled();
bL_switcher_unregister_notifier(&bL_switcher_notifier);
cpufreq_unregister_driver(&bL_cpufreq_driver);
bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
arm_bL_ops->name);
arm_bL_ops = NULL;

View File

@ -34,11 +34,6 @@ struct cpufreq_arm_bL_ops {
int (*init_opp_table)(struct device *cpu_dev);
};
static inline int cpu_to_cluster(int cpu)
{
return topology_physical_package_id(cpu);
}
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);

View File

@ -37,27 +37,23 @@ static unsigned long loops_per_jiffy_ref;
static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct cpufreq_freqs freqs;
unsigned int old_freq, new_freq;
freqs.old = at32_get_speed(0);
freqs.new = freq_table[index].frequency;
old_freq = at32_get_speed(0);
new_freq = freq_table[index].frequency;
if (!ref_freq) {
ref_freq = freqs.old;
ref_freq = old_freq;
loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (freqs.old < freqs.new)
if (old_freq < new_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
loops_per_jiffy_ref, ref_freq, freqs.new);
clk_set_rate(cpuclk, freqs.new * 1000);
if (freqs.new < freqs.old)
loops_per_jiffy_ref, ref_freq, new_freq);
clk_set_rate(cpuclk, new_freq * 1000);
if (new_freq < old_freq)
boot_cpu_data.loops_per_jiffy = cpufreq_scale(
loops_per_jiffy_ref, ref_freq, freqs.new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
pr_debug("cpufreq: set frequency %u Hz\n", freqs.new * 1000);
loops_per_jiffy_ref, ref_freq, new_freq);
return 0;
}

View File

@ -132,27 +132,23 @@ static int bfin_target(struct cpufreq_policy *policy, unsigned int index)
#ifndef CONFIG_BF60x
unsigned int plldiv;
#endif
struct cpufreq_freqs freqs;
static unsigned long lpj_ref;
static unsigned int lpj_ref_freq;
unsigned int old_freq, new_freq;
int ret = 0;
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
cycles_t cycles;
#endif
freqs.old = bfin_getfreq_khz(0);
freqs.new = bfin_freq_table[index].frequency;
old_freq = bfin_getfreq_khz(0);
new_freq = bfin_freq_table[index].frequency;
pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
freqs.new, freqs.new, freqs.old);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
#ifndef CONFIG_BF60x
plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
bfin_write_PLL_DIV(plldiv);
#else
ret = cpu_set_cclk(policy->cpu, freqs.new * 1000);
ret = cpu_set_cclk(policy->cpu, new_freq * 1000);
if (ret != 0) {
WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
return ret;
@ -168,17 +164,13 @@ static int bfin_target(struct cpufreq_policy *policy, unsigned int index)
#endif
if (!lpj_ref_freq) {
lpj_ref = loops_per_jiffy;
lpj_ref_freq = freqs.old;
lpj_ref_freq = old_freq;
}
if (freqs.new != freqs.old) {
if (new_freq != old_freq) {
loops_per_jiffy = cpufreq_scale(lpj_ref,
lpj_ref_freq, freqs.new);
lpj_ref_freq, new_freq);
}
/* TODO: just test case for cycles clock source, remove later */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
pr_debug("cpufreq: done\n");
return ret;
}

View File

@ -37,20 +37,19 @@ static unsigned int cpu0_get_speed(unsigned int cpu)
static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct cpufreq_freqs freqs;
struct dev_pm_opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
unsigned int old_freq, new_freq;
long freq_Hz, freq_exact;
int ret;
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz < 0)
freq_Hz = freq_table[index].frequency * 1000;
freq_exact = freq_Hz;
freqs.new = freq_Hz / 1000;
freqs.old = clk_get_rate(cpu_clk) / 1000;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
freq_exact = freq_Hz;
new_freq = freq_Hz / 1000;
old_freq = clk_get_rate(cpu_clk) / 1000;
if (!IS_ERR(cpu_reg)) {
rcu_read_lock();
@ -58,9 +57,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
if (IS_ERR(opp)) {
rcu_read_unlock();
pr_err("failed to find OPP for %ld\n", freq_Hz);
freqs.new = freqs.old;
ret = PTR_ERR(opp);
goto post_notify;
return PTR_ERR(opp);
}
volt = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
@ -69,16 +66,15 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
}
pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
freqs.new / 1000, volt ? volt / 1000 : -1);
old_freq / 1000, volt_old ? volt_old / 1000 : -1,
new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage up: %d\n", ret);
freqs.new = freqs.old;
goto post_notify;
return ret;
}
}
@ -87,23 +83,18 @@ static int cpu0_set_target(struct cpufreq_policy *policy, unsigned int index)
pr_err("failed to set clock rate: %d\n", ret);
if (!IS_ERR(cpu_reg))
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
freqs.new = freqs.old;
goto post_notify;
return ret;
}
/* scaling down? scale voltage after frequency */
if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage down: %d\n", ret);
clk_set_rate(cpu_clk, freqs.old * 1000);
freqs.new = freqs.old;
clk_set_rate(cpu_clk, old_freq * 1000);
}
}
post_notify:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}

View File

@ -1669,6 +1669,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
retval = cpufreq_driver->target(policy, target_freq, relation);
else if (cpufreq_driver->target_index) {
struct cpufreq_frequency_table *freq_table;
struct cpufreq_freqs freqs;
bool notify;
int index;
freq_table = cpufreq_frequency_get_table(policy->cpu);
@ -1684,10 +1686,42 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
goto out;
}
if (freq_table[index].frequency == policy->cur)
if (freq_table[index].frequency == policy->cur) {
retval = 0;
else
retval = cpufreq_driver->target_index(policy, index);
goto out;
}
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
if (notify) {
freqs.old = policy->cur;
freqs.new = freq_table[index].frequency;
freqs.flags = 0;
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
__func__, policy->cpu, freqs.old,
freqs.new);
cpufreq_notify_transition(policy, &freqs,
CPUFREQ_PRECHANGE);
}
retval = cpufreq_driver->target_index(policy, index);
if (retval)
pr_err("%s: Failed to change cpu frequency: %d\n",
__func__, retval);
if (notify) {
/*
* Notify with old freq in case we failed to change
* frequency
*/
if (retval)
freqs.new = freqs.old;
cpufreq_notify_transition(policy, &freqs,
CPUFREQ_POSTCHANGE);
}
}
out:

View File

@ -168,7 +168,6 @@ static void od_check_cpu(int cpu, unsigned int load)
dbs_info->rate_mult =
od_tuners->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
return;
} else {
/* Calculate the next frequency proportional to load */
unsigned int freq_next;

View File

@ -29,15 +29,9 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
struct cpufreq_freqs freqs;
reg_clkgen_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
freqs.new = cris_freq_table[state].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
local_irq_disable();
/* Even though we may be SMP they will share the same clock
@ -50,8 +44,6 @@ static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
local_irq_enable();
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}

View File

@ -29,15 +29,9 @@ static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
struct cpufreq_freqs freqs;
reg_config_rw_clk_ctrl clk_ctrl;
clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
freqs.new = cris_freq_table[state].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
local_irq_disable();
/* Even though we may be SMP they will share the same clock
@ -50,8 +44,6 @@ static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
local_irq_enable();
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}

View File

@ -68,46 +68,36 @@ static unsigned int davinci_getspeed(unsigned int cpu)
static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
int ret = 0;
struct cpufreq_freqs freqs;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
struct clk *armclk = cpufreq.armclk;
unsigned int old_freq, new_freq;
int ret = 0;
freqs.old = davinci_getspeed(0);
freqs.new = pdata->freq_table[idx].frequency;
dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
old_freq = davinci_getspeed(0);
new_freq = pdata->freq_table[idx].frequency;
/* if moving to higher frequency, up the voltage beforehand */
if (pdata->set_voltage && freqs.new > freqs.old) {
if (pdata->set_voltage && new_freq > old_freq) {
ret = pdata->set_voltage(idx);
if (ret)
goto out;
return ret;
}
ret = clk_set_rate(armclk, idx);
if (ret)
goto out;
return ret;
if (cpufreq.asyncclk) {
ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
if (ret)
goto out;
return ret;
}
/* if moving to lower freq, lower the voltage after lowering freq */
if (pdata->set_voltage && freqs.new < freqs.old)
if (pdata->set_voltage && new_freq < old_freq)
pdata->set_voltage(idx);
out:
if (ret)
freqs.new = freqs.old;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
return 0;
}
static int davinci_cpu_init(struct cpufreq_policy *policy)

View File

@ -22,28 +22,8 @@ static struct clk *armss_clk;
static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_freqs freqs;
int ret;
freqs.old = policy->cur;
freqs.new = freq_table[index].frequency;
/* pre-change notification */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* update armss clk frequency */
ret = clk_set_rate(armss_clk, freqs.new * 1000);
if (ret) {
pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
freqs.new * 1000, ret);
freqs.new = freqs.old;
}
/* post change notification */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
}
static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)

View File

@ -107,15 +107,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
struct cpufreq_policy *policy,
u32 dest_state)
{
struct cpufreq_freqs freqs;
u32 lo, hi;
int err = 0;
int i;
freqs.old = eps_get(policy->cpu);
freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Wait while CPU is busy */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i = 0;
@ -124,8 +118,7 @@ static int eps_set_state(struct eps_cpu_data *centaur,
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
err = -ENODEV;
goto postchange;
return -ENODEV;
}
}
/* Set new multiplier and voltage */
@ -137,16 +130,10 @@ static int eps_set_state(struct eps_cpu_data *centaur,
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
err = -ENODEV;
goto postchange;
return -ENODEV;
}
} while (lo & ((1 << 16) | (1 << 17)));
/* Return current frequency */
postchange:
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
#ifdef DEBUG
{
u8 current_multiplier, current_voltage;
@ -161,11 +148,7 @@ postchange:
current_multiplier);
}
#endif
if (err)
freqs.new = freqs.old;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return err;
return 0;
}
static int eps_target(struct cpufreq_policy *policy, unsigned int index)

View File

@ -108,17 +108,6 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
static int elanfreq_target(struct cpufreq_policy *policy,
unsigned int state)
{
struct cpufreq_freqs freqs;
freqs.old = elanfreq_get_cpu_frequency(0);
freqs.new = elan_multiplier[state].clock;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
elan_multiplier[state].clock);
/*
* Access to the Elan's internal registers is indexed via
* 0x22: Chip Setup & Control Register Index Register (CSCI)
@ -149,8 +138,6 @@ static int elanfreq_target(struct cpufreq_policy *policy,
udelay(10000);
local_irq_enable();
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}
/*

View File

@ -25,7 +25,6 @@
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
static struct cpufreq_freqs freqs;
static unsigned int locking_frequency;
static bool frequency_locked;
@ -59,18 +58,18 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
unsigned int arm_volt, safe_arm_volt = 0;
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
unsigned int old_freq;
int index, old_index;
int ret = 0;
freqs.old = policy->cur;
freqs.new = target_freq;
old_freq = policy->cur;
/*
* The policy max have been changed so that we cannot get proper
* old_index with cpufreq_frequency_table_target(). Thus, ignore
* policy and get the index from the raw freqeuncy table.
*/
old_index = exynos_cpufreq_get_index(freqs.old);
old_index = exynos_cpufreq_get_index(old_freq);
if (old_index < 0) {
ret = old_index;
goto out;
@ -95,17 +94,14 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
}
arm_volt = volt_table[index];
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* When the new frequency is higher than current frequency */
if ((freqs.new > freqs.old) && !safe_arm_volt) {
if ((target_freq > old_freq) && !safe_arm_volt) {
/* Firstly, voltage up to increase frequency */
ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, arm_volt);
freqs.new = freqs.old;
goto post_notify;
return ret;
}
}
@ -115,22 +111,15 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, safe_arm_volt);
freqs.new = freqs.old;
goto post_notify;
return ret;
}
}
exynos_info->set_freq(old_index, index);
post_notify:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
if (ret)
goto out;
/* When the new frequency is lower than current frequency */
if ((freqs.new < freqs.old) ||
((freqs.new > freqs.old) && safe_arm_volt)) {
if ((target_freq < old_freq) ||
((target_freq > old_freq) && safe_arm_volt)) {
/* down the voltage after frequency change */
ret = regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
@ -142,7 +131,6 @@ post_notify:
}
out:
cpufreq_cpu_put(policy);
return ret;

View File

@ -312,7 +312,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
.flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = exynos_target,
.get = exynos_getspeed,

View File

@ -141,7 +141,6 @@ processor_set_freq (
{
int ret = 0;
u32 value = 0;
struct cpufreq_freqs cpufreq_freqs;
cpumask_t saved_mask;
int retval;
@ -168,13 +167,6 @@ processor_set_freq (
pr_debug("Transitioning from P%d to P%d\n",
data->acpi_data.state, state);
/* cpufreq frequency struct */
cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
cpufreq_freqs.new = data->freq_table[state].frequency;
/* notify cpufreq */
cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
/*
* First we write the target state's 'control' value to the
* control_register.
@ -186,22 +178,11 @@ processor_set_freq (
ret = processor_set_pstate(value);
if (ret) {
unsigned int tmp = cpufreq_freqs.new;
cpufreq_notify_transition(policy, &cpufreq_freqs,
CPUFREQ_POSTCHANGE);
cpufreq_freqs.new = cpufreq_freqs.old;
cpufreq_freqs.old = tmp;
cpufreq_notify_transition(policy, &cpufreq_freqs,
CPUFREQ_PRECHANGE);
cpufreq_notify_transition(policy, &cpufreq_freqs,
CPUFREQ_POSTCHANGE);
printk(KERN_WARNING "Transition failed with error %d\n", ret);
retval = -ENODEV;
goto migrate_end;
}
cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
data->acpi_data.state = state;
retval = 0;

View File

@ -42,14 +42,14 @@ static unsigned int imx6q_get_speed(unsigned int cpu)
static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct cpufreq_freqs freqs;
struct dev_pm_opp *opp;
unsigned long freq_hz, volt, volt_old;
unsigned int old_freq, new_freq;
int ret;
freqs.new = freq_table[index].frequency;
freq_hz = freqs.new * 1000;
freqs.old = clk_get_rate(arm_clk) / 1000;
new_freq = freq_table[index].frequency;
freq_hz = new_freq * 1000;
old_freq = clk_get_rate(arm_clk) / 1000;
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
@ -64,26 +64,23 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
volt_old = regulator_get_voltage(arm_reg);
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
freqs.old / 1000, volt_old / 1000,
freqs.new / 1000, volt / 1000);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
old_freq / 1000, volt_old / 1000,
new_freq / 1000, volt / 1000);
/* scaling up? scale voltage before frequency */
if (freqs.new > freqs.old) {
if (new_freq > old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_err(cpu_dev,
"failed to scale vddarm up: %d\n", ret);
freqs.new = freqs.old;
goto post_notify;
return ret;
}
/*
* Need to increase vddpu and vddsoc for safety
* if we are about to run at 1.2 GHz.
*/
if (freqs.new == FREQ_1P2_GHZ / 1000) {
if (new_freq == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_HIGH, 0);
regulator_set_voltage_tol(soc_reg,
@ -103,21 +100,20 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
clk_set_rate(pll1_sys_clk, freqs.new * 1000);
clk_set_rate(pll1_sys_clk, new_freq * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
}
/* Ensure the arm clock divider is what we expect */
ret = clk_set_rate(arm_clk, freqs.new * 1000);
ret = clk_set_rate(arm_clk, new_freq * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
freqs.new = freqs.old;
goto post_notify;
return ret;
}
/* scaling down? scale voltage after frequency */
if (freqs.new < freqs.old) {
if (new_freq < old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_warn(cpu_dev,
@ -125,7 +121,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
ret = 0;
}
if (freqs.old == FREQ_1P2_GHZ / 1000) {
if (old_freq == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_NORMAL, 0);
regulator_set_voltage_tol(soc_reg,
@ -133,10 +129,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
}
}
post_notify:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
return 0;
}
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)

View File

@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/acpi.h>
#include <trace/events/power.h>
#include <asm/div64.h>
@ -759,7 +760,7 @@ static int intel_pstate_msrs_not_valid(void)
return 0;
}
void copy_pid_params(struct pstate_adjust_policy *policy)
static void copy_pid_params(struct pstate_adjust_policy *policy)
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.p_gain_pct = policy->p_gain_pct;
@ -769,7 +770,7 @@ void copy_pid_params(struct pstate_adjust_policy *policy)
pid_params.setpoint = policy->setpoint;
}
void copy_cpu_funcs(struct pstate_funcs *funcs)
static void copy_cpu_funcs(struct pstate_funcs *funcs)
{
pstate_funcs.get_max = funcs->get_max;
pstate_funcs.get_min = funcs->get_min;
@ -777,6 +778,72 @@ void copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.set = funcs->set;
}
#if IS_ENABLED(CONFIG_ACPI)
#include <acpi/processor.h>
static bool intel_pstate_no_acpi_pss(void)
{
int i;
for_each_possible_cpu(i) {
acpi_status status;
union acpi_object *pss;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_processor *pr = per_cpu(processors, i);
if (!pr)
continue;
status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
if (ACPI_FAILURE(status))
continue;
pss = buffer.pointer;
if (pss && pss->type == ACPI_TYPE_PACKAGE) {
kfree(pss);
return false;
}
kfree(pss);
}
return true;
}
struct hw_vendor_info {
u16 valid;
char oem_id[ACPI_OEM_ID_SIZE];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
};
/* Hardware vendor-specific info that has its own power management modes */
static struct hw_vendor_info vendor_info[] = {
{1, "HP ", "ProLiant"},
{0, "", ""},
};
static bool intel_pstate_platform_pwr_mgmt_exists(void)
{
struct acpi_table_header hdr;
struct hw_vendor_info *v_info;
if (acpi_disabled
|| ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
return false;
for (v_info = vendor_info; v_info->valid; v_info++) {
if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
&& !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
&& intel_pstate_no_acpi_pss())
return true;
}
return false;
}
#else /* CONFIG_ACPI not enabled */
static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
#endif /* CONFIG_ACPI */
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
@ -790,6 +857,13 @@ static int __init intel_pstate_init(void)
if (!id)
return -ENODEV;
/*
* The Intel pstate driver will be ignored if the platform
* firmware has its own power management modes.
*/
if (intel_pstate_platform_pwr_mgmt_exists())
return -ENODEV;
cpu_info = (struct cpu_defaults *)id->driver_data;
copy_pid_params(&cpu_info->pid_policy);

View File

@ -58,48 +58,34 @@ static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_freqs freqs;
unsigned int state = kirkwood_freq_table[index].driver_data;
unsigned long reg;
freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
freqs.new = kirkwood_freq_table[index].frequency;
local_irq_disable();
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Disable interrupts to the CPU */
reg = readl_relaxed(priv.base);
reg |= CPU_SW_INT_BLK;
writel_relaxed(reg, priv.base);
dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
kirkwood_freq_table[index].frequency);
dev_dbg(priv.dev, "old frequency was %i KHz\n",
kirkwood_cpufreq_get_cpu_frequency(0));
if (freqs.old != freqs.new) {
local_irq_disable();
/* Disable interrupts to the CPU */
reg = readl_relaxed(priv.base);
reg |= CPU_SW_INT_BLK;
writel_relaxed(reg, priv.base);
switch (state) {
case STATE_CPU_FREQ:
clk_disable(priv.powersave_clk);
break;
case STATE_DDR_FREQ:
clk_enable(priv.powersave_clk);
break;
}
/* Wait-for-Interrupt, while the hardware changes frequency */
cpu_do_idle();
/* Enable interrupts to the CPU */
reg = readl_relaxed(priv.base);
reg &= ~CPU_SW_INT_BLK;
writel_relaxed(reg, priv.base);
local_irq_enable();
switch (state) {
case STATE_CPU_FREQ:
clk_disable(priv.powersave_clk);
break;
case STATE_DDR_FREQ:
clk_enable(priv.powersave_clk);
break;
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
/* Wait-for-Interrupt, while the hardware changes frequency */
cpu_do_idle();
/* Enable interrupts to the CPU */
reg = readl_relaxed(priv.base);
reg &= ~CPU_SW_INT_BLK;
writel_relaxed(reg, priv.base);
local_irq_enable();
return 0;
}

View File

@ -57,7 +57,6 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
{
unsigned int cpu = policy->cpu;
cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
unsigned int freq;
cpus_allowed = current->cpus_allowed;
@ -67,26 +66,11 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
((cpu_clock_freq / 1000) *
loongson2_clockmod_table[index].driver_data) / 8;
pr_debug("cpufreq: requested frequency %u Hz\n",
loongson2_clockmod_table[index].frequency * 1000);
freqs.old = loongson2_cpufreq_get(cpu);
freqs.new = freq;
freqs.flags = 0;
/* notifiers */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
clk_set_rate(cpuclk, freq);
/* notifiers */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
pr_debug("cpufreq: set frequency %u kHz\n", freq);
return 0;
}

View File

@ -69,8 +69,6 @@ static struct cpufreq_frequency_table maple_cpu_freqs[] = {
*/
static int maple_pmode_cur;
static DEFINE_MUTEX(maple_switch_mutex);
static const u32 *maple_pmode_data;
static int maple_pmode_max;
@ -133,21 +131,7 @@ static int maple_scom_query_freq(void)
static int maple_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_freqs freqs;
int rc;
mutex_lock(&maple_switch_mutex);
freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
freqs.new = maple_cpu_freqs[index].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
rc = maple_scom_switch_freq(index);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&maple_switch_mutex);
return rc;
return maple_scom_switch_freq(index);
}
static unsigned int maple_cpufreq_get_speed(unsigned int cpu)

View File

@ -53,15 +53,14 @@ static unsigned int omap_getspeed(unsigned int cpu)
static int omap_target(struct cpufreq_policy *policy, unsigned int index)
{
int r, ret = 0;
struct cpufreq_freqs freqs;
struct dev_pm_opp *opp;
unsigned long freq, volt = 0, volt_old = 0, tol = 0;
unsigned int old_freq, new_freq;
freqs.old = omap_getspeed(policy->cpu);
freqs.new = freq_table[index].frequency;
old_freq = omap_getspeed(policy->cpu);
new_freq = freq_table[index].frequency;
freq = freqs.new * 1000;
freq = new_freq * 1000;
ret = clk_round_rate(mpu_clk, freq);
if (IS_ERR_VALUE(ret)) {
dev_warn(mpu_dev,
@ -77,7 +76,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
__func__, freqs.new);
__func__, new_freq);
return -EINVAL;
}
volt = dev_pm_opp_get_voltage(opp);
@ -87,43 +86,32 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
}
dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n",
freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
freqs.new / 1000, volt ? volt / 1000 : -1);
/* notifiers */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
old_freq / 1000, volt_old ? volt_old / 1000 : -1,
new_freq / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
if (mpu_reg && (freqs.new > freqs.old)) {
if (mpu_reg && (new_freq > old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
__func__);
freqs.new = freqs.old;
goto done;
return r;
}
}
ret = clk_set_rate(mpu_clk, freqs.new * 1000);
ret = clk_set_rate(mpu_clk, new_freq * 1000);
/* scaling down? scale voltage after frequency */
if (mpu_reg && (freqs.new < freqs.old)) {
if (mpu_reg && (new_freq < old_freq)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
if (r < 0) {
dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
__func__);
ret = clk_set_rate(mpu_clk, freqs.old * 1000);
freqs.new = freqs.old;
goto done;
clk_set_rate(mpu_clk, old_freq * 1000);
return r;
}
}
freqs.new = omap_getspeed(policy->cpu);
done:
/* notifiers */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}

View File

@ -107,15 +107,8 @@ static struct cpufreq_frequency_table p4clockmod_table[] = {
static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
{
struct cpufreq_freqs freqs;
int i;
freqs.old = cpufreq_p4_get(policy->cpu);
freqs.new = stock_freq * p4clockmod_table[index].driver_data / 8;
/* notifiers */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* run on each logical CPU,
* see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
@ -123,9 +116,6 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
for_each_cpu(i, policy->cpus)
cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data);
/* notifiers */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}

View File

@ -51,8 +51,6 @@
static void __iomem *sdcpwr_mapbase;
static void __iomem *sdcasr_mapbase;
static DEFINE_MUTEX(pas_switch_mutex);
/* Current astate, is used when waking up from power savings on
* one core, in case the other core has switched states during
* the idle time.
@ -242,15 +240,8 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static int pas_cpufreq_target(struct cpufreq_policy *policy,
unsigned int pas_astate_new)
{
struct cpufreq_freqs freqs;
int i;
freqs.old = policy->cur;
freqs.new = pas_freqs[pas_astate_new].frequency;
mutex_lock(&pas_switch_mutex);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
policy->cpu,
pas_freqs[pas_astate_new].frequency,
@ -261,10 +252,7 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
for_each_online_cpu(i)
set_astate(i, pas_astate_new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&pas_switch_mutex);
ppc_proc_freq = freqs.new * 1000ul;
ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul;
return 0;
}

View File

@ -331,21 +331,11 @@ static int pmu_set_cpu_speed(int low_speed)
return 0;
}
static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
int notify)
static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode)
{
struct cpufreq_freqs freqs;
unsigned long l3cr;
static unsigned long prev_l3cr;
freqs.old = cur_freq;
freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
if (freqs.old == freqs.new)
return 0;
if (notify)
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (speed_mode == CPUFREQ_LOW &&
cpu_has_feature(CPU_FTR_L3CR)) {
l3cr = _get_L3CR();
@ -361,8 +351,6 @@ static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
_set_L3CR(prev_l3cr);
}
if (notify)
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
return 0;
@ -378,7 +366,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
{
int rc;
rc = do_set_cpu_speed(policy, index, 1);
rc = do_set_cpu_speed(policy, index);
ppc_proc_freq = cur_freq * 1000ul;
return rc;
@ -420,7 +408,7 @@ static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
no_schedule = 1;
sleep_freq = cur_freq;
if (cur_freq == low_freq && !is_pmu_based)
do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
do_set_cpu_speed(policy, CPUFREQ_HIGH);
return 0;
}
@ -437,7 +425,7 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
* probably high speed due to our suspend() routine
*/
do_set_cpu_speed(policy, sleep_freq == low_freq ?
CPUFREQ_LOW : CPUFREQ_HIGH, 0);
CPUFREQ_LOW : CPUFREQ_HIGH);
ppc_proc_freq = cur_freq * 1000ul;

View File

@ -79,8 +79,6 @@ static void (*g5_switch_volt)(int speed_mode);
static int (*g5_switch_freq)(int speed_mode);
static int (*g5_query_freq)(void);
static DEFINE_MUTEX(g5_switch_mutex);
static unsigned long transition_latency;
#ifdef CONFIG_PMAC_SMU
@ -314,21 +312,7 @@ static int g5_pfunc_query_freq(void)
static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
{
struct cpufreq_freqs freqs;
int rc;
mutex_lock(&g5_switch_mutex);
freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
freqs.new = g5_cpu_freqs[index].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
rc = g5_switch_freq(index);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&g5_switch_mutex);
return rc;
return g5_switch_freq(index);
}
static unsigned int g5_cpufreq_get_speed(unsigned int cpu)

View File

@ -1204,6 +1204,7 @@ out:
}
static struct cpufreq_driver cpufreq_amd64_driver = {
.flags = CPUFREQ_ASYNC_NOTIFICATION,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernowk8_target,
.bios_limit = acpi_processor_get_bios_limit,

View File

@ -69,8 +69,6 @@ static const struct soc_data sdata[] = {
static u32 min_cpufreq;
static const u32 *fmask;
/* serialize frequency changes */
static DEFINE_MUTEX(cpufreq_lock);
static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
/* cpumask in a cluster */
@ -253,26 +251,11 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static int corenet_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_freqs freqs;
struct clk *parent;
int ret;
struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
freqs.old = policy->cur;
freqs.new = data->table[index].frequency;
mutex_lock(&cpufreq_lock);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
parent = of_clk_get(data->parent, data->table[index].driver_data);
ret = clk_set_parent(data->clk, parent);
if (ret)
freqs.new = freqs.old;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&cpufreq_lock);
return ret;
return clk_set_parent(data->clk, parent);
}
static struct cpufreq_driver ppc_corenet_cpufreq_driver = {

View File

@ -30,9 +30,6 @@
#include "ppc_cbe_cpufreq.h"
static DEFINE_MUTEX(cbe_switch_mutex);
/* the CBE supports an 8 step frequency scaling */
static struct cpufreq_frequency_table cbe_freqs[] = {
{1, 0},
@ -131,27 +128,13 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
static int cbe_cpufreq_target(struct cpufreq_policy *policy,
unsigned int cbe_pmode_new)
{
int rc;
struct cpufreq_freqs freqs;
freqs.old = policy->cur;
freqs.new = cbe_freqs[cbe_pmode_new].frequency;
mutex_lock(&cbe_switch_mutex);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
pr_debug("setting frequency for cpu %d to %d kHz, " \
"1/%d of max frequency\n",
policy->cpu,
cbe_freqs[cbe_pmode_new].frequency,
cbe_freqs[cbe_pmode_new].driver_data);
rc = set_pmode(policy->cpu, cbe_pmode_new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
mutex_unlock(&cbe_switch_mutex);
return rc;
return set_pmode(policy->cpu, cbe_pmode_new);
}
static struct cpufreq_driver cbe_cpufreq_driver = {

View File

@ -271,7 +271,6 @@ static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct cpufreq_frequency_table *pxa_freqs_table;
pxa_freqs_t *pxa_freq_settings;
struct cpufreq_freqs freqs;
unsigned long flags;
unsigned int new_freq_cpu, new_freq_mem;
unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
@ -282,24 +281,17 @@ static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
new_freq_cpu = pxa_freq_settings[idx].khz;
new_freq_mem = pxa_freq_settings[idx].membus;
freqs.old = policy->cur;
freqs.new = new_freq_cpu;
if (freq_debug)
pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));
if (vcc_core && freqs.new > freqs.old)
if (vcc_core && new_freq_cpu > policy->cur) {
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
if (ret)
return ret;
/*
* Tell everyone what we're about to do...
* you should add a notify client with any platform specific
* Vcc changing capability
*/
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (ret)
return ret;
}
/* Calculate the next MDREFR. If we're slowing down the SDRAM clock
* we need to preset the smaller DRI before the change. If we're
@ -349,13 +341,6 @@ static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
: "r4", "r5");
local_irq_restore(flags);
/*
* Tell everyone what we've just done...
* you should add a notify client with any platform specific
* SDRAM refresh timer adjustments
*/
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
/*
* Even if voltage setting fails, we don't report it, as the frequency
* change succeeded. The voltage reduction is not a critical failure,
@ -365,7 +350,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
* bug is triggered (seems a deadlock). Should anybody find out where,
* the "return 0" should become a "return ret".
*/
if (vcc_core && freqs.new < freqs.old)
if (vcc_core && new_freq_cpu < policy->cur)
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
return 0;

View File

@ -158,7 +158,6 @@ static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index)
{
struct pxa3xx_freq_info *next;
struct cpufreq_freqs freqs;
unsigned long flags;
if (policy->cpu != 0)
@ -166,22 +165,11 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index)
next = &pxa3xx_freqs[index];
freqs.old = policy->cur;
freqs.new = next->cpufreq_mhz * 1000;
pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
freqs.old / 1000, freqs.new / 1000,
(freqs.old == freqs.new) ? " (skipped)" : "");
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
local_irq_save(flags);
__update_core_freq(next);
__update_bus_freq(next);
local_irq_restore(flags);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}

View File

@ -220,7 +220,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
struct cpufreq_freqs freqs;
unsigned int new_freq;
int idx, ret, to_dvs = 0;
mutex_lock(&cpufreq_lock);
@ -237,25 +237,14 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
goto out;
}
freqs.flags = 0;
freqs.old = s3c_freq->is_dvs ? FREQ_DVS
: clk_get_rate(s3c_freq->armclk) / 1000;
/* When leavin dvs mode, always switch the armdiv to the hclk rate
* The S3C2416 has stability issues when switching directly to
* higher frequencies.
*/
freqs.new = (s3c_freq->is_dvs && !to_dvs)
new_freq = (s3c_freq->is_dvs && !to_dvs)
? clk_get_rate(s3c_freq->hclk) / 1000
: s3c_freq->freq_table[index].frequency;
pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
if (!to_dvs && freqs.old == freqs.new)
goto out;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (to_dvs) {
pr_debug("cpufreq: enter dvs\n");
ret = s3c2416_cpufreq_enter_dvs(s3c_freq, idx);
@ -263,12 +252,10 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
pr_debug("cpufreq: leave dvs\n");
ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
} else {
pr_debug("cpufreq: change armdiv to %dkHz\n", freqs.new);
ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new);
pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
out:
mutex_unlock(&cpufreq_lock);

View File

@ -65,54 +65,46 @@ static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
int ret;
struct cpufreq_freqs freqs;
struct s3c64xx_dvfs *dvfs;
unsigned int old_freq, new_freq;
int ret;
freqs.old = clk_get_rate(armclk) / 1000;
freqs.new = s3c64xx_freq_table[index].frequency;
freqs.flags = 0;
old_freq = clk_get_rate(armclk) / 1000;
new_freq = s3c64xx_freq_table[index].frequency;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new > freqs.old) {
if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
freqs.new = freqs.old;
goto post_notify;
new_freq, ret);
return ret;
}
}
#endif
ret = clk_set_rate(armclk, freqs.new * 1000);
ret = clk_set_rate(armclk, new_freq * 1000);
if (ret < 0) {
pr_err("Failed to set rate %dkHz: %d\n",
freqs.new, ret);
freqs.new = freqs.old;
new_freq, ret);
return ret;
}
post_notify:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
if (ret)
goto err;
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new < freqs.old) {
if (vddarm && new_freq < old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err_clk;
new_freq, ret);
if (clk_set_rate(armclk, old_freq * 1000) < 0)
pr_err("Failed to restore original clock rate\n");
return ret;
}
}
#endif
@ -121,14 +113,6 @@ post_notify:
clk_get_rate(armclk) / 1000);
return 0;
err_clk:
if (clk_set_rate(armclk, freqs.old * 1000) < 0)
pr_err("Failed to restore original clock rate\n");
err:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}
#ifdef CONFIG_REGULATOR

View File

@ -26,7 +26,6 @@
static struct clk *cpu_clk;
static struct clk *dmc0_clk;
static struct clk *dmc1_clk;
static struct cpufreq_freqs freqs;
static DEFINE_MUTEX(set_freq_lock);
/* APLL M,P,S values for 1G/800Mhz */
@ -179,6 +178,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
unsigned int priv_index;
unsigned int pll_changing = 0;
unsigned int bus_speed_changing = 0;
unsigned int old_freq, new_freq;
int arm_volt, int_volt;
int ret = 0;
@ -193,12 +193,12 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
goto exit;
}
freqs.old = s5pv210_getspeed(0);
freqs.new = s5pv210_freq_table[index].frequency;
old_freq = s5pv210_getspeed(0);
new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
freqs.old, CPUFREQ_RELATION_H,
old_freq, CPUFREQ_RELATION_H,
&priv_index)) {
ret = -EINVAL;
goto exit;
@ -207,7 +207,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
arm_volt = dvs_conf[index].arm_volt;
int_volt = dvs_conf[index].int_volt;
if (freqs.new > freqs.old) {
if (new_freq > old_freq) {
ret = regulator_set_voltage(arm_regulator,
arm_volt, arm_volt_max);
if (ret)
@ -219,8 +219,6 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
goto exit;
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Check if there need to change PLL */
if ((index == L0) || (priv_index == L0))
pll_changing = 1;
@ -431,9 +429,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
}
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
if (freqs.new < freqs.old) {
if (new_freq < old_freq) {
regulator_set_voltage(int_regulator,
int_volt, int_volt_max);

View File

@ -180,22 +180,17 @@ static void sa1100_update_dram_timings(int current_speed, int new_speed)
static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
{
unsigned int cur = sa11x0_getspeed(0);
struct cpufreq_freqs freqs;
unsigned int new_freq;
freqs.old = cur;
freqs.new = sa11x0_freq_table[ppcr].frequency;
new_freq = sa11x0_freq_table[ppcr].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (freqs.new > cur)
sa1100_update_dram_timings(cur, freqs.new);
if (new_freq > cur)
sa1100_update_dram_timings(cur, new_freq);
PPCR = ppcr;
if (freqs.new < cur)
sa1100_update_dram_timings(cur, freqs.new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
if (new_freq < cur)
sa1100_update_dram_timings(cur, new_freq);
return 0;
}

View File

@ -232,15 +232,11 @@ sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
{
struct sdram_params *sdram = &sdram_params;
struct cpufreq_freqs freqs;
struct sdram_info sd;
unsigned long flags;
unsigned int unused;
freqs.old = sa11x0_getspeed(0);
freqs.new = sa11x0_freq_table[ppcr].frequency;
sdram_calculate_timing(&sd, freqs.new, sdram);
sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram);
#if 0
/*
@ -259,8 +255,6 @@ static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
sd.mdcas[2] = 0xaaaaaaaa;
#endif
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/*
* The clock could be going away for some time. Set the SDRAMs
* to refresh rapidly (every 64 memory clock cycles). To get
@ -305,9 +299,7 @@ static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
/*
* Now, return the SDRAM refresh back to normal.
*/
sdram_update_refresh(freqs.new, sdram);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram);
return 0;
}

View File

@ -56,17 +56,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state)
{
struct cpufreq_freqs freqs;
u8 clockspeed_reg;
freqs.old = sc520_freq_get_cpu_frequency(0);
freqs.new = sc520_freq_table[state].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
pr_debug("attempting to set frequency to %i kHz\n",
sc520_freq_table[state].frequency);
local_irq_disable();
clockspeed_reg = *cpuctl & ~0x03;
@ -74,8 +65,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state)
local_irq_enable();
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}

View File

@ -251,7 +251,6 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
unsigned long new_bits, new_freq;
unsigned long clock_tick, divisor, old_divisor, estar;
cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
@ -265,16 +264,10 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
old_divisor = estar_to_divisor(estar);
freqs.old = clock_tick / old_divisor;
freqs.new = new_freq;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (old_divisor != divisor)
us2e_transition(estar, new_bits, clock_tick * 1000,
old_divisor, divisor);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
set_cpus_allowed_ptr(current, &cpus_allowed);
return 0;

View File

@ -98,7 +98,6 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
unsigned int cpu = policy->cpu;
unsigned long new_bits, new_freq, reg;
cpumask_t cpus_allowed;
struct cpufreq_freqs freqs;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
@ -124,16 +123,10 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
reg = read_safari_cfg();
freqs.old = get_current_freq(cpu, reg);
freqs.new = new_freq;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
reg &= ~SAFARI_CFG_DIV_MASK;
reg |= new_bits;
write_safari_cfg(reg);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
set_cpus_allowed_ptr(current, &cpus_allowed);
return 0;

View File

@ -107,12 +107,10 @@ static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
static int spear_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_freqs freqs;
long newfreq;
struct clk *srcclk;
int ret, mult = 1;
freqs.old = spear_cpufreq_get(0);
newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
if (of_machine_is_compatible("st,spear1340")) {
@ -145,23 +143,14 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
return newfreq;
}
freqs.new = newfreq / 1000;
freqs.new /= mult;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
else
ret = clk_set_rate(spear_cpufreq.clk, newfreq);
/* Get current rate after clk_set_rate, in case of failure */
if (ret) {
if (ret)
pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}

View File

@ -423,9 +423,8 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
struct cpufreq_freqs freqs;
int retval = 0;
unsigned int j, first_cpu, tmp;
unsigned int j, first_cpu;
struct cpufreq_frequency_table *op_points;
cpumask_var_t covered_cpus;
@ -473,16 +472,6 @@ static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
goto out;
}
freqs.old = extract_clock(oldmsr, cpu, 0);
freqs.new = extract_clock(msr, cpu, 0);
pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
op_points->frequency, freqs.old, freqs.new,
msr);
cpufreq_notify_transition(policy, &freqs,
CPUFREQ_PRECHANGE);
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
oldmsr &= ~0xffff;
@ -497,8 +486,6 @@ static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
cpumask_set_cpu(j, covered_cpus);
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
if (unlikely(retval)) {
/*
* We have failed halfway through the frequency change.
@ -509,12 +496,6 @@ static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
for_each_cpu(j, covered_cpus)
wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
tmp = freqs.new;
freqs.new = freqs.old;
freqs.old = tmp;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
}
retval = 0;

View File

@ -258,21 +258,12 @@ static unsigned int speedstep_get(unsigned int cpu)
static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
{
unsigned int policy_cpu;
struct cpufreq_freqs freqs;
policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
freqs.old = speedstep_get(policy_cpu);
freqs.new = speedstep_freqs[index].frequency;
pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
smp_call_function_single(policy_cpu, _speedstep_set_state, &index,
true);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}

View File

@ -241,14 +241,7 @@ static void speedstep_set_state(unsigned int state)
*/
static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
{
struct cpufreq_freqs freqs;
freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
freqs.new = speedstep_freqs[index].frequency;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
speedstep_set_state(index);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return 0;
}

View File

@ -102,12 +102,8 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
unsigned long rate)
{
int ret = 0;
struct cpufreq_freqs freqs;
freqs.old = tegra_getspeed(0);
freqs.new = rate;
if (freqs.old == freqs.new)
if (tegra_getspeed(0) == rate)
return ret;
/*
@ -121,21 +117,10 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
else
clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
#ifdef CONFIG_CPU_FREQ_DEBUG
printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
freqs.old, freqs.new);
#endif
ret = tegra_cpu_clk_set_rate(freqs.new * 1000);
if (ret) {
pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
freqs.new);
freqs.new = freqs.old;
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
ret = tegra_cpu_clk_set_rate(rate * 1000);
if (ret)
pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n",
rate);
return ret;
}

View File

@ -0,0 +1,70 @@
/*
* Versatile Express SPC CPUFreq Interface driver
*
* It provides necessary ops to arm_big_little cpufreq driver.
*
* Copyright (C) 2013 ARM Ltd.
* Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/types.h>
#include "arm_big_little.h"
static int ve_spc_init_opp_table(struct device *cpu_dev)
{
/*
* platform specific SPC code must initialise the opp table
* so just check if the OPP count is non-zero
*/
return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
}
static int ve_spc_get_transition_latency(struct device *cpu_dev)
{
return 1000000; /* 1 ms */
}
static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
.name = "vexpress-spc",
.get_transition_latency = ve_spc_get_transition_latency,
.init_opp_table = ve_spc_init_opp_table,
};
static int ve_spc_cpufreq_probe(struct platform_device *pdev)
{
return bL_cpufreq_register(&ve_spc_cpufreq_ops);
}
static int ve_spc_cpufreq_remove(struct platform_device *pdev)
{
bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
return 0;
}
static struct platform_driver ve_spc_cpufreq_platdrv = {
.driver = {
.name = "vexpress-spc-cpufreq",
.owner = THIS_MODULE,
},
.probe = ve_spc_cpufreq_probe,
.remove = ve_spc_cpufreq_remove,
};
module_platform_driver(ve_spc_cpufreq_platdrv);
MODULE_LICENSE("GPL");

View File

@ -237,6 +237,13 @@ struct cpufreq_driver {
*/
#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
/*
* Driver will do POSTCHANGE notifications from outside of their ->target()
* routine and so must set cpufreq_driver->flags with this flag, so that core
* can handle them specially.
*/
#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);