1
0
Fork 0

Pull bugzilla-5737 into release branch

Conflicts:

	arch/x86_64/kernel/acpi/processor.c
hifive-unleashed-5.1
Len Brown 2006-06-15 21:39:25 -04:00
commit d42510a0f5
7 changed files with 648 additions and 159 deletions

View File

@ -47,7 +47,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;

View File

@ -48,12 +48,13 @@ MODULE_LICENSE("GPL");
struct cpufreq_acpi_io {
struct acpi_processor_performance acpi_data;
struct acpi_processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int resume;
};
static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
@ -104,64 +105,43 @@ acpi_processor_set_performance (
{
u16 port = 0;
u8 bit_width = 0;
int i = 0;
int ret = 0;
u32 value = 0;
int i = 0;
struct cpufreq_freqs cpufreq_freqs;
cpumask_t saved_mask;
int retval;
struct acpi_processor_performance *perf;
dprintk("acpi_processor_set_performance\n");
/*
* TBD: Use something other than set_cpus_allowed.
* As set_cpus_allowed is a bit racy,
* with any other set_cpus_allowed for this process.
*/
saved_mask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) {
return (-EAGAIN);
}
if (state == data->acpi_data.state) {
retval = 0;
perf = data->acpi_data;
if (state == perf->state) {
if (unlikely(data->resume)) {
dprintk("Called after resume, resetting to P%d\n", state);
data->resume = 0;
} else {
dprintk("Already at target state (P%d)\n", state);
retval = 0;
goto migrate_end;
return (retval);
}
}
dprintk("Transitioning from P%d to P%d\n",
data->acpi_data.state, state);
/* cpufreq frequency struct */
cpufreq_freqs.cpu = cpu;
cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
cpufreq_freqs.new = data->freq_table[state].frequency;
/* notify cpufreq */
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
dprintk("Transitioning from P%d to P%d\n", perf->state, state);
/*
* First we write the target state's 'control' value to the
* control_register.
*/
port = data->acpi_data.control_register.address;
bit_width = data->acpi_data.control_register.bit_width;
value = (u32) data->acpi_data.states[state].control;
port = perf->control_register.address;
bit_width = perf->control_register.bit_width;
value = (u32) perf->states[state].control;
dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
ret = acpi_processor_write_port(port, bit_width, value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
retval = ret;
goto migrate_end;
return (ret);
}
/*
@ -177,49 +157,36 @@ acpi_processor_set_performance (
* before giving up.
*/
port = data->acpi_data.status_register.address;
bit_width = data->acpi_data.status_register.bit_width;
port = perf->status_register.address;
bit_width = perf->status_register.bit_width;
dprintk("Looking for 0x%08x from port 0x%04x\n",
(u32) data->acpi_data.states[state].status, port);
(u32) perf->states[state].status, port);
for (i=0; i<100; i++) {
for (i = 0; i < 100; i++) {
ret = acpi_processor_read_port(port, bit_width, &value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
retval = ret;
goto migrate_end;
return (ret);
}
if (value == (u32) data->acpi_data.states[state].status)
if (value == (u32) perf->states[state].status)
break;
udelay(10);
}
} else {
i = 0;
value = (u32) data->acpi_data.states[state].status;
value = (u32) perf->states[state].status;
}
/* notify cpufreq */
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
if (unlikely(value != (u32) data->acpi_data.states[state].status)) {
unsigned int tmp = cpufreq_freqs.new;
cpufreq_freqs.new = cpufreq_freqs.old;
cpufreq_freqs.old = tmp;
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
if (unlikely(value != (u32) perf->states[state].status)) {
printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
retval = -ENODEV;
goto migrate_end;
return (retval);
}
dprintk("Transition successful after %d microseconds\n", i * 10);
data->acpi_data.state = state;
retval = 0;
migrate_end:
set_cpus_allowed(current, saved_mask);
perf->state = state;
return (retval);
}
@ -231,8 +198,17 @@ acpi_cpufreq_target (
unsigned int relation)
{
struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
struct acpi_processor_performance *perf;
struct cpufreq_freqs freqs;
cpumask_t online_policy_cpus;
cpumask_t saved_mask;
cpumask_t set_mask;
cpumask_t covered_cpus;
unsigned int cur_state = 0;
unsigned int next_state = 0;
unsigned int result = 0;
unsigned int j;
unsigned int tmp;
dprintk("acpi_cpufreq_setpolicy\n");
@ -241,11 +217,95 @@ acpi_cpufreq_target (
target_freq,
relation,
&next_state);
if (result)
if (unlikely(result))
return (result);
result = acpi_processor_set_performance (data, policy->cpu, next_state);
perf = data->acpi_data;
cur_state = perf->state;
freqs.old = data->freq_table[cur_state].frequency;
freqs.new = data->freq_table[next_state].frequency;
#ifdef CONFIG_HOTPLUG_CPU
/* cpufreq holds the hotplug lock, so we are safe from here on */
cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
#else
online_policy_cpus = policy->cpus;
#endif
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
/*
* We need to call driver->target() on all or any CPU in
* policy->cpus, depending on policy->shared_type.
*/
saved_mask = current->cpus_allowed;
cpus_clear(covered_cpus);
for_each_cpu_mask(j, online_policy_cpus) {
/*
* Support for SMP systems.
* Make sure we are running on CPU that wants to change freq
*/
cpus_clear(set_mask);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
cpus_or(set_mask, set_mask, online_policy_cpus);
else
cpu_set(j, set_mask);
set_cpus_allowed(current, set_mask);
if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
result = -EAGAIN;
break;
}
result = acpi_processor_set_performance (data, j, next_state);
if (result) {
result = -EAGAIN;
break;
}
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
cpu_set(j, covered_cpus);
}
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
if (unlikely(result)) {
/*
* We have failed halfway through the frequency change.
* We have sent callbacks to online_policy_cpus and
* acpi_processor_set_performance() has been called on
* coverd_cpus. Best effort undo..
*/
if (!cpus_empty(covered_cpus)) {
for_each_cpu_mask(j, covered_cpus) {
policy->cpu = j;
acpi_processor_set_performance (data,
j,
cur_state);
}
}
tmp = freqs.new;
freqs.new = freqs.old;
freqs.old = tmp;
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
set_cpus_allowed(current, saved_mask);
return (result);
}
@ -271,30 +331,65 @@ acpi_cpufreq_guess_freq (
struct cpufreq_acpi_io *data,
unsigned int cpu)
{
struct acpi_processor_performance *perf = data->acpi_data;
if (cpu_khz) {
/* search the closest match to cpu_khz */
unsigned int i;
unsigned long freq;
unsigned long freqn = data->acpi_data.states[0].core_frequency * 1000;
unsigned long freqn = perf->states[0].core_frequency * 1000;
for (i=0; i < (data->acpi_data.state_count - 1); i++) {
for (i = 0; i < (perf->state_count - 1); i++) {
freq = freqn;
freqn = data->acpi_data.states[i+1].core_frequency * 1000;
freqn = perf->states[i+1].core_frequency * 1000;
if ((2 * cpu_khz) > (freqn + freq)) {
data->acpi_data.state = i;
perf->state = i;
return (freq);
}
}
data->acpi_data.state = data->acpi_data.state_count - 1;
perf->state = perf->state_count - 1;
return (freqn);
} else
} else {
/* assume CPU is at P0... */
data->acpi_data.state = 0;
return data->acpi_data.states[0].core_frequency * 1000;
perf->state = 0;
return perf->states[0].core_frequency * 1000;
}
}
/*
* acpi_cpufreq_early_init - initialize ACPI P-States library
*
* Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
* in order to determine correct frequency and voltage pairings. We can
* do _PDC and _PSD and find out the processor dependency for the
* actual init that will happen later...
*/
static int acpi_cpufreq_early_init_acpi(void)
{
struct acpi_processor_performance *data;
unsigned int i, j;
dprintk("acpi_cpufreq_early_init\n");
for_each_cpu(i) {
data = kzalloc(sizeof(struct acpi_processor_performance),
GFP_KERNEL);
if (!data) {
for_each_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
return (-ENOMEM);
}
acpi_perf_data[i] = data;
}
/* Do initialization in ACPI core */
acpi_processor_preregister_performance(acpi_perf_data);
return 0;
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@ -304,41 +399,51 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
struct acpi_processor_performance *perf;
dprintk("acpi_cpufreq_cpu_init\n");
if (!acpi_perf_data[cpu])
return (-ENODEV);
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
return (-ENOMEM);
data->acpi_data = acpi_perf_data[cpu];
acpi_io_data[cpu] = data;
result = acpi_processor_register_performance(&data->acpi_data, cpu);
result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result)
goto err_free;
perf = data->acpi_data;
policy->cpus = perf->shared_cpu_map;
policy->shared_type = perf->shared_type;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
}
/* capability check */
if (data->acpi_data.state_count <= 1) {
if (perf->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
(data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
(perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
dprintk("Unsupported address space [%d, %d]\n",
(u32) (data->acpi_data.control_register.space_id),
(u32) (data->acpi_data.status_register.space_id));
(u32) (perf->control_register.space_id),
(u32) (perf->status_register.space_id));
result = -ENODEV;
goto err_unreg;
}
/* alloc freq_table */
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL);
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
goto err_unreg;
@ -346,9 +451,9 @@ acpi_cpufreq_cpu_init (
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
for (i=0; i<data->acpi_data.state_count; i++) {
if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000;
for (i=0; i<perf->state_count; i++) {
if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
}
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
@ -356,11 +461,11 @@ acpi_cpufreq_cpu_init (
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
/* table init */
for (i=0; i<=data->acpi_data.state_count; i++)
for (i=0; i<=perf->state_count; i++)
{
data->freq_table[i].index = i;
if (i<data->acpi_data.state_count)
data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
if (i<perf->state_count)
data->freq_table[i].frequency = perf->states[i].core_frequency * 1000;
else
data->freq_table[i].frequency = CPUFREQ_TABLE_END;
}
@ -375,12 +480,12 @@ acpi_cpufreq_cpu_init (
printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
cpu);
for (i = 0; i < data->acpi_data.state_count; i++)
for (i = 0; i < perf->state_count; i++)
dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
(i == data->acpi_data.state?'*':' '), i,
(u32) data->acpi_data.states[i].core_frequency,
(u32) data->acpi_data.states[i].power,
(u32) data->acpi_data.states[i].transition_latency);
(i == perf->state?'*':' '), i,
(u32) perf->states[i].core_frequency,
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
@ -395,7 +500,7 @@ acpi_cpufreq_cpu_init (
err_freqfree:
kfree(data->freq_table);
err_unreg:
acpi_processor_unregister_performance(&data->acpi_data, cpu);
acpi_processor_unregister_performance(perf, cpu);
err_free:
kfree(data);
acpi_io_data[cpu] = NULL;
@ -416,7 +521,7 @@ acpi_cpufreq_cpu_exit (
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
acpi_io_data[policy->cpu] = NULL;
acpi_processor_unregister_performance(&data->acpi_data, policy->cpu);
acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
kfree(data);
}
@ -462,7 +567,10 @@ acpi_cpufreq_init (void)
dprintk("acpi_cpufreq_init\n");
result = cpufreq_register_driver(&acpi_cpufreq_driver);
result = acpi_cpufreq_early_init_acpi();
if (!result)
result = cpufreq_register_driver(&acpi_cpufreq_driver);
return (result);
}
@ -471,10 +579,15 @@ acpi_cpufreq_init (void)
static void __exit
acpi_cpufreq_exit (void)
{
unsigned int i;
dprintk("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
for_each_cpu(i) {
kfree(acpi_perf_data[i]);
acpi_perf_data[i] = NULL;
}
return;
}

View File

@ -351,7 +351,36 @@ static unsigned int get_cur_freq(unsigned int cpu)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
static struct acpi_processor_performance p;
static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
/*
* centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States
* library
*
* Before doing the actual init, we need to do _PSD related setup whenever
* supported by the BIOS. These are handled by this early_init routine.
*/
static int centrino_cpu_early_init_acpi(void)
{
unsigned int i, j;
struct acpi_processor_performance *data;
for_each_cpu(i) {
data = kzalloc(sizeof(struct acpi_processor_performance),
GFP_KERNEL);
if (!data) {
for_each_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
return (-ENOMEM);
}
acpi_perf_data[i] = data;
}
acpi_processor_preregister_performance(acpi_perf_data);
return 0;
}
/*
* centrino_cpu_init_acpi - register with ACPI P-States library
@ -365,46 +394,51 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
struct acpi_processor_performance *p;
p = acpi_perf_data[cpu];
/* register with ACPI core */
if (acpi_processor_register_performance(&p, cpu)) {
if (acpi_processor_register_performance(p, cpu)) {
dprintk(KERN_INFO PFX "obtaining ACPI data failed\n");
return -EIO;
}
policy->cpus = p->shared_cpu_map;
policy->shared_type = p->shared_type;
/* verify the acpi_data */
if (p.state_count <= 1) {
if (p->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
dprintk("Invalid control/status registers (%x - %x)\n",
p.control_register.space_id, p.status_register.space_id);
p->control_register.space_id, p->status_register.space_id);
result = -EIO;
goto err_unreg;
}
for (i=0; i<p.state_count; i++) {
if (p.states[i].control != p.states[i].status) {
for (i=0; i<p->state_count; i++) {
if (p->states[i].control != p->states[i].status) {
dprintk("Different control (%llu) and status values (%llu)\n",
p.states[i].control, p.states[i].status);
p->states[i].control, p->states[i].status);
result = -EINVAL;
goto err_unreg;
}
if (!p.states[i].core_frequency) {
if (!p->states[i].core_frequency) {
dprintk("Zero core frequency for state %u\n", i);
result = -EINVAL;
goto err_unreg;
}
if (p.states[i].core_frequency > p.states[0].core_frequency) {
if (p->states[i].core_frequency > p->states[0].core_frequency) {
dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i,
p.states[i].core_frequency, p.states[0].core_frequency);
p.states[i].core_frequency = 0;
p->states[i].core_frequency, p->states[0].core_frequency);
p->states[i].core_frequency = 0;
continue;
}
}
@ -416,26 +450,26 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
centrino_model[cpu]->model_name=NULL;
centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000;
centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000;
centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
(p.state_count + 1), GFP_KERNEL);
(p->state_count + 1), GFP_KERNEL);
if (!centrino_model[cpu]->op_points) {
result = -ENOMEM;
goto err_kfree;
}
for (i=0; i<p.state_count; i++) {
centrino_model[cpu]->op_points[i].index = p.states[i].control;
centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000;
for (i=0; i<p->state_count; i++) {
centrino_model[cpu]->op_points[i].index = p->states[i].control;
centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
dprintk("adding state %i with frequency %u and control value %04x\n",
i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
}
centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END;
centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END;
cur_freq = get_cur_freq(cpu);
for (i=0; i<p.state_count; i++) {
if (!p.states[i].core_frequency) {
for (i=0; i<p->state_count; i++) {
if (!p->states[i].core_frequency) {
dprintk("skipping state %u\n", i);
centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
continue;
@ -451,7 +485,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
p.state = i;
p->state = i;
}
/* notify BIOS that we exist */
@ -464,12 +498,13 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
err_kfree:
kfree(centrino_model[cpu]);
err_unreg:
acpi_processor_unregister_performance(&p, cpu);
acpi_processor_unregister_performance(p, cpu);
dprintk(KERN_INFO PFX "invalid ACPI data\n");
return (result);
}
#else
static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
static inline int centrino_cpu_early_init_acpi(void) { return 0; }
#endif
static int centrino_cpu_init(struct cpufreq_policy *policy)
@ -555,10 +590,15 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
if (!centrino_model[cpu]->model_name) {
dprintk("unregistering and freeing ACPI data\n");
acpi_processor_unregister_performance(&p, cpu);
kfree(centrino_model[cpu]->op_points);
kfree(centrino_model[cpu]);
static struct acpi_processor_performance *p;
if (acpi_perf_data[cpu]) {
p = acpi_perf_data[cpu];
dprintk("unregistering and freeing ACPI data\n");
acpi_processor_unregister_performance(p, cpu);
kfree(centrino_model[cpu]->op_points);
kfree(centrino_model[cpu]);
}
}
#endif
@ -592,63 +632,128 @@ static int centrino_target (struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int newstate = 0;
unsigned int msr, oldmsr, h, cpu = policy->cpu;
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
struct cpufreq_freqs freqs;
cpumask_t online_policy_cpus;
cpumask_t saved_mask;
int retval;
cpumask_t set_mask;
cpumask_t covered_cpus;
int retval = 0;
unsigned int j, k, first_cpu, tmp;
if (centrino_model[cpu] == NULL)
if (unlikely(centrino_model[cpu] == NULL))
return -ENODEV;
/*
* Support for SMP systems.
* Make sure we are running on the CPU that wants to change frequency
*/
if (unlikely(cpufreq_frequency_table_target(policy,
centrino_model[cpu]->op_points,
target_freq,
relation,
&newstate))) {
return -EINVAL;
}
#ifdef CONFIG_HOTPLUG_CPU
/* cpufreq holds the hotplug lock, so we are safe from here on */
cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
#else
online_policy_cpus = policy->cpus;
#endif
saved_mask = current->cpus_allowed;
set_cpus_allowed(current, policy->cpus);
if (!cpu_isset(smp_processor_id(), policy->cpus)) {
dprintk("couldn't limit to CPUs in this domain\n");
return(-EAGAIN);
first_cpu = 1;
cpus_clear(covered_cpus);
for_each_cpu_mask(j, online_policy_cpus) {
/*
* Support for SMP systems.
* Make sure we are running on CPU that wants to change freq
*/
cpus_clear(set_mask);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
cpus_or(set_mask, set_mask, online_policy_cpus);
else
cpu_set(j, set_mask);
set_cpus_allowed(current, set_mask);
if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN;
if (first_cpu) {
/* We haven't started the transition yet. */
goto migrate_end;
}
break;
}
msr = centrino_model[cpu]->op_points[newstate].index;
if (first_cpu) {
rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
if (msr == (oldmsr & 0xffff)) {
dprintk("no change needed - msr was and needs "
"to be %x\n", oldmsr);
retval = 0;
goto migrate_end;
}
freqs.old = extract_clock(oldmsr, cpu, 0);
freqs.new = extract_clock(msr, cpu, 0);
dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
target_freq, freqs.old, freqs.new, msr);
for_each_cpu_mask(k, online_policy_cpus) {
freqs.cpu = k;
cpufreq_notify_transition(&freqs,
CPUFREQ_PRECHANGE);
}
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
oldmsr &= ~0xffff;
msr &= 0xffff;
oldmsr |= msr;
}
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
cpu_set(j, covered_cpus);
}
if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq,
relation, &newstate)) {
retval = -EINVAL;
goto migrate_end;
for_each_cpu_mask(k, online_policy_cpus) {
freqs.cpu = k;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
msr = centrino_model[cpu]->op_points[newstate].index;
rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
if (unlikely(retval)) {
/*
* We have failed halfway through the frequency change.
* We have sent callbacks to policy->cpus and
* MSRs have already been written on coverd_cpus.
* Best effort undo..
*/
if (msr == (oldmsr & 0xffff)) {
retval = 0;
dprintk("no change needed - msr was and needs to be %x\n", oldmsr);
goto migrate_end;
if (!cpus_empty(covered_cpus)) {
for_each_cpu_mask(j, covered_cpus) {
set_cpus_allowed(current, cpumask_of_cpu(j));
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
}
}
tmp = freqs.new;
freqs.new = freqs.old;
freqs.old = tmp;
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
freqs.cpu = cpu;
freqs.old = extract_clock(oldmsr, cpu, 0);
freqs.new = extract_clock(msr, cpu, 0);
dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
target_freq, freqs.old, freqs.new, msr);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* all but 16 LSB are "reserved", so treat them with
care */
oldmsr &= ~0xffff;
msr &= 0xffff;
oldmsr |= msr;
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
retval = 0;
migrate_end:
set_cpus_allowed(current, saved_mask);
return (retval);
return 0;
}
static struct freq_attr* centrino_attr[] = {
@ -690,12 +795,25 @@ static int __init centrino_init(void)
if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
centrino_cpu_early_init_acpi();
return cpufreq_register_driver(&centrino_driver);
}
static void __exit centrino_exit(void)
{
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
unsigned int j;
#endif
cpufreq_unregister_driver(&centrino_driver);
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
for_each_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
#endif
}
MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");

View File

@ -554,6 +554,230 @@ static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
}
#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
static int acpi_processor_get_psd(struct acpi_processor *pr)
{
int result = 0;
acpi_status status = AE_OK;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
struct acpi_buffer state = {0, NULL};
union acpi_object *psd = NULL;
struct acpi_psd_package *pdomain;
status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
return -ENODEV;
}
psd = (union acpi_object *) buffer.pointer;
if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
result = -EFAULT;
goto end;
}
if (psd->package.count != 1) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
result = -EFAULT;
goto end;
}
pdomain = &(pr->performance->domain_info);
state.length = sizeof(struct acpi_psd_package);
state.pointer = pdomain;
status = acpi_extract_package(&(psd->package.elements[0]),
&format, &state);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
result = -EFAULT;
goto end;
}
if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
result = -EFAULT;
goto end;
}
if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
result = -EFAULT;
goto end;
}
end:
acpi_os_free(buffer.pointer);
return result;
}
int acpi_processor_preregister_performance(
struct acpi_processor_performance **performance)
{
int count, count_target;
int retval = 0;
unsigned int i, j;
cpumask_t covered_cpus;
struct acpi_processor *pr;
struct acpi_psd_package *pdomain;
struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain;
down(&performance_sem);
retval = 0;
/* Call _PSD for all CPUs */
for_each_possible_cpu(i) {
pr = processors[i];
if (!pr) {
/* Look only at processors in ACPI namespace */
continue;
}
if (pr->performance) {
retval = -EBUSY;
continue;
}
if (!performance || !performance[i]) {
retval = -EINVAL;
continue;
}
pr->performance = performance[i];
cpu_set(i, pr->performance->shared_cpu_map);
if (acpi_processor_get_psd(pr)) {
retval = -EINVAL;
continue;
}
}
if (retval)
goto err_ret;
/*
* Now that we have _PSD data from all CPUs, lets setup P-state
* domain info.
*/
for_each_possible_cpu(i) {
pr = processors[i];
if (!pr)
continue;
/* Basic validity check for domain info */
pdomain = &(pr->performance->domain_info);
if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
(pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
retval = -EINVAL;
goto err_ret;
}
if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
retval = -EINVAL;
goto err_ret;
}
}
cpus_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = processors[i];
if (!pr)
continue;
if (cpu_isset(i, covered_cpus))
continue;
pdomain = &(pr->performance->domain_info);
cpu_set(i, pr->performance->shared_cpu_map);
cpu_set(i, covered_cpus);
if (pdomain->num_processors <= 1)
continue;
/* Validate the Domain info */
count_target = pdomain->num_processors;
count = 1;
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL ||
pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) {
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
} else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) {
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
}
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = processors[j];
if (!match_pr)
continue;
match_pdomain = &(match_pr->performance->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
/* Here i and j are in the same domain */
if (match_pdomain->num_processors != count_target) {
retval = -EINVAL;
goto err_ret;
}
if (pdomain->coord_type != match_pdomain->coord_type) {
retval = -EINVAL;
goto err_ret;
}
cpu_set(j, covered_cpus);
cpu_set(j, pr->performance->shared_cpu_map);
count++;
}
for_each_possible_cpu(j) {
if (i == j)
continue;
match_pr = processors[j];
if (!match_pr)
continue;
match_pdomain = &(match_pr->performance->domain_info);
if (match_pdomain->domain != pdomain->domain)
continue;
match_pr->performance->shared_type =
pr->performance->shared_type;
match_pr->performance->shared_cpu_map =
pr->performance->shared_cpu_map;
}
}
err_ret:
if (retval) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
}
for_each_possible_cpu(i) {
pr = processors[i];
if (!pr || !pr->performance)
continue;
/* Assume no coordination on any error parsing domain info */
if (retval) {
cpus_clear(pr->performance->shared_cpu_map);
cpu_set(i, pr->performance->shared_cpu_map);
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
}
pr->performance = NULL; /* Will be set for real in register */
}
up(&performance_sem);
return retval;
}
EXPORT_SYMBOL(acpi_processor_preregister_performance);
int
acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu)

View File

@ -18,6 +18,11 @@
ACPI_PDC_C_C1_HALT | \
ACPI_PDC_P_FFH)
#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
ACPI_PDC_C_C1_HALT | \
ACPI_PDC_SMP_P_SWCOORD | \
ACPI_PDC_P_FFH)
#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
ACPI_PDC_SMP_C1PT | \
ACPI_PDC_C_C1_HALT)

View File

@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/config.h>
#include <linux/cpu.h>
#include <asm/acpi.h>
@ -18,6 +19,17 @@
#define ACPI_PDC_REVISION_ID 0x1
#define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */
#define ACPI_PSD_REV0_ENTRIES 5
/*
* Types of coordination defined in ACPI 3.0. Same macros can be used across
* P, C and T states
*/
#define DOMAIN_COORD_TYPE_SW_ALL 0xfc
#define DOMAIN_COORD_TYPE_SW_ANY 0xfd
#define DOMAIN_COORD_TYPE_HW_ALL 0xfe
/* Power Management */
struct acpi_processor_cx;
@ -66,6 +78,14 @@ struct acpi_processor_power {
/* Performance Management */
struct acpi_psd_package {
acpi_integer num_entries;
acpi_integer revision;
acpi_integer domain;
acpi_integer coord_type;
acpi_integer num_processors;
} __attribute__ ((packed));
struct acpi_pct_register {
u8 descriptor;
u16 length;
@ -92,7 +112,9 @@ struct acpi_processor_performance {
struct acpi_pct_register status_register;
unsigned int state_count;
struct acpi_processor_px *states;
struct acpi_psd_package domain_info;
cpumask_t shared_cpu_map;
unsigned int shared_type;
};
/* Throttling Control */
@ -161,6 +183,9 @@ struct acpi_processor_errata {
} piix4;
};
extern int acpi_processor_preregister_performance(
struct acpi_processor_performance **performance);
extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu);
extern void acpi_processor_unregister_performance(struct

View File

@ -73,6 +73,8 @@ struct cpufreq_real_policy {
struct cpufreq_policy {
cpumask_t cpus; /* affected CPUs */
unsigned int shared_type; /* ANY or ALL affected CPUs
should set cpufreq */
unsigned int cpu; /* cpu nr of registered CPU */
struct cpufreq_cpuinfo cpuinfo;/* see above */
@ -99,6 +101,8 @@ struct cpufreq_policy {
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_SHARED_TYPE_ALL (0) /* All dependent CPUs should set freq */
#define CPUFREQ_SHARED_TYPE_ANY (1) /* Freq can be set from any dependent CPU */
/******************** cpufreq transition notifiers *******************/