1
0
Fork 0

Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq

* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] kzalloc conversion for gx-suspmod
  [CPUFREQ] Whitespace cleanup
  [CPUFREQ] Mark longhaul driver as broken.
  [PATCH] cpufreq: fix section mismatch warnings
  [CPUFREQ] Fix the p4-clockmod N60 errata workaround.
  [CPUFREQ] Fix handling for CPU hotplug
  [CPUFREQ] powernow-k8: Let cpufreq driver handle affected CPUs
  [CPUFREQ] Lots of whitespace & CodingStyle cleanup.
  [CPUFREQ] Remove duplicate cpuinfo struct
  [CPUFREQ] Silence powernow-k8 warning on k7's.
wifi-calibration
Linus Torvalds 2006-03-25 08:52:23 -08:00
commit be9bf30c73
21 changed files with 414 additions and 404 deletions

View File

@ -96,7 +96,6 @@ config X86_POWERNOW_K8_ACPI
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
depends on PCI
help
This add the CPUFreq driver for NatSemi Geode processors which
support suspend modulation.
@ -115,9 +114,9 @@ config X86_SPEEDSTEP_CENTRINO
you also need to say Y to "Use ACPI tables to decode..." below
[which might imply enabling ACPI] if you want to use this driver
on non-Banias CPUs.
For details, take a look at <file:Documentation/cpu-freq/>.
If in doubt, say N.
config X86_SPEEDSTEP_CENTRINO_ACPI
@ -148,7 +147,7 @@ config X86_SPEEDSTEP_ICH
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
(Coppermine), all mobile Intel Pentium III-M (Tualatin) and all
mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
ICH3 or ICH4 southbridge.
For details, take a look at <file:Documentation/cpu-freq/>.
@ -161,7 +160,7 @@ config X86_SPEEDSTEP_SMI
depends on EXPERIMENTAL
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
(Coppermine), all mobile Intel Pentium III-M (Tualatin)
(Coppermine), all mobile Intel Pentium III-M (Tualatin)
on systems which have an Intel 440BX/ZX/MX southbridge.
For details, take a look at <file:Documentation/cpu-freq/>.
@ -203,9 +202,10 @@ config X86_LONGRUN
config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul"
select CPU_FREQ_TABLE
depends on BROKEN
help
This adds the CPUFreq driver for VIA Samuel/CyrixIII,
VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
This adds the CPUFreq driver for VIA Samuel/CyrixIII,
VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
processors.
For details, take a look at <file:Documentation/cpu-freq/>.
@ -215,11 +215,11 @@ config X86_LONGHAUL
comment "shared options"
config X86_ACPI_CPUFREQ_PROC_INTF
bool "/proc/acpi/processor/../performance interface (deprecated)"
bool "/proc/acpi/processor/../performance interface (deprecated)"
depends on PROC_FS
depends on X86_ACPI_CPUFREQ || X86_SPEEDSTEP_CENTRINO_ACPI || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
help
This enables the deprecated /proc/acpi/processor/../performance
This enables the deprecated /proc/acpi/processor/../performance
interface. While it is helpful for debugging, the generic,
cross-architecture cpufreq interfaces should be used.
@ -233,9 +233,9 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK
bool "Relaxed speedstep capability checks"
depends on (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH)
help
Don't perform all checks for a speedstep capable system which would
normally be done. Some ancient or strange systems, though speedstep
capable, don't always indicate that they are speedstep capable. This
Don't perform all checks for a speedstep capable system which would
normally be done. Some ancient or strange systems, though speedstep
capable, don't always indicate that they are speedstep capable. This
option lets the probing code bypass some of those checks if the
parameter "relaxed_check=1" is passed to the module.

View File

@ -39,7 +39,7 @@ static struct pci_dev *nforce2_chipset_dev;
static int fid = 0;
/* min_fsb, max_fsb:
* minimum and maximum FSB (= FSB at boot time)
* minimum and maximum FSB (= FSB at boot time)
*/
static int min_fsb = 0;
static int max_fsb = 0;
@ -57,10 +57,10 @@ MODULE_PARM_DESC(min_fsb,
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-nforce2", msg)
/*
/**
* nforce2_calc_fsb - calculate FSB
* @pll: PLL value
*
*
* Calculates FSB from PLL value
*/
static int nforce2_calc_fsb(int pll)
@ -76,10 +76,10 @@ static int nforce2_calc_fsb(int pll)
return 0;
}
/*
/**
* nforce2_calc_pll - calculate PLL value
* @fsb: FSB
*
*
* Calculate PLL value for given FSB
*/
static int nforce2_calc_pll(unsigned int fsb)
@ -106,10 +106,10 @@ static int nforce2_calc_pll(unsigned int fsb)
return NFORCE2_PLL(mul, div);
}
/*
/**
* nforce2_write_pll - write PLL value to chipset
* @pll: PLL value
*
*
* Writes new FSB PLL value to chipset
*/
static void nforce2_write_pll(int pll)
@ -121,15 +121,13 @@ static void nforce2_write_pll(int pll)
pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLADR, temp);
/* Now write the value in all 64 registers */
for (temp = 0; temp <= 0x3f; temp++) {
pci_write_config_dword(nforce2_chipset_dev,
NFORCE2_PLLREG, pll);
}
for (temp = 0; temp <= 0x3f; temp++)
pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, pll);
return;
}
/*
/**
* nforce2_fsb_read - Read FSB
*
* Read FSB from chipset
@ -140,39 +138,32 @@ static unsigned int nforce2_fsb_read(int bootfsb)
struct pci_dev *nforce2_sub5;
u32 fsb, temp = 0;
/* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */
nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
0x01EF,
PCI_ANY_ID,
PCI_ANY_ID,
NULL);
0x01EF,PCI_ANY_ID,PCI_ANY_ID,NULL);
if (!nforce2_sub5)
return 0;
pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
fsb /= 1000000;
/* Check if PLL register is already set */
pci_read_config_byte(nforce2_chipset_dev,
NFORCE2_PLLENABLE, (u8 *)&temp);
pci_read_config_byte(nforce2_chipset_dev,NFORCE2_PLLENABLE, (u8 *)&temp);
if(bootfsb || !temp)
return fsb;
/* Use PLL register FSB value */
pci_read_config_dword(nforce2_chipset_dev,
NFORCE2_PLLREG, &temp);
pci_read_config_dword(nforce2_chipset_dev,NFORCE2_PLLREG, &temp);
fsb = nforce2_calc_fsb(temp);
return fsb;
}
/*
/**
* nforce2_set_fsb - set new FSB
* @fsb: New FSB
*
*
* Sets new FSB
*/
static int nforce2_set_fsb(unsigned int fsb)
@ -186,7 +177,7 @@ static int nforce2_set_fsb(unsigned int fsb)
printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb);
return -EINVAL;
}
tfsb = nforce2_fsb_read(0);
if (!tfsb) {
printk(KERN_ERR "cpufreq: Error while reading the FSB\n");
@ -194,8 +185,7 @@ static int nforce2_set_fsb(unsigned int fsb)
}
/* First write? Then set actual value */
pci_read_config_byte(nforce2_chipset_dev,
NFORCE2_PLLENABLE, (u8 *)&temp);
pci_read_config_byte(nforce2_chipset_dev,NFORCE2_PLLENABLE, (u8 *)&temp);
if (!temp) {
pll = nforce2_calc_pll(tfsb);
@ -223,7 +213,7 @@ static int nforce2_set_fsb(unsigned int fsb)
/* Calculate the PLL reg. value */
if ((pll = nforce2_calc_pll(tfsb)) == -1)
return -EINVAL;
nforce2_write_pll(pll);
#ifdef NFORCE2_DELAY
mdelay(NFORCE2_DELAY);
@ -239,7 +229,7 @@ static int nforce2_set_fsb(unsigned int fsb)
/**
* nforce2_get - get the CPU frequency
* @cpu: CPU number
*
*
* Returns the CPU frequency
*/
static unsigned int nforce2_get(unsigned int cpu)
@ -354,10 +344,10 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
printk(KERN_INFO "cpufreq: FSB currently at %i MHz, FID %d.%d\n", fsb,
fid / 10, fid % 10);
/* Set maximum FSB to FSB at boot time */
max_fsb = nforce2_fsb_read(1);
if(!max_fsb)
return -EIO;
@ -398,17 +388,15 @@ static struct cpufreq_driver nforce2_driver = {
* nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic
*
* Detects nForce2 A2 and C1 stepping
*
*
*/
static unsigned int nforce2_detect_chipset(void)
{
u8 revision;
nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NFORCE2,
PCI_ANY_ID,
PCI_ANY_ID,
NULL);
PCI_DEVICE_ID_NVIDIA_NFORCE2,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (nforce2_chipset_dev == NULL)
return -ENODEV;

View File

@ -1,16 +1,16 @@
/*
* elanfreq: cpufreq driver for the AMD ELAN family
* elanfreq: cpufreq driver for the AMD ELAN family
*
* (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de>
*
* Parts of this code are (c) Sven Geggus <sven@geggus.net>
* Parts of this code are (c) Sven Geggus <sven@geggus.net>
*
* All Rights Reserved.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*
* 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel
*
@ -28,7 +28,7 @@
#include <asm/timex.h>
#include <asm/io.h>
#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
/* Module parameter */
@ -41,7 +41,7 @@ struct s_elan_multiplier {
};
/*
* It is important that the frequencies
* It is important that the frequencies
* are listed in ascending order here!
*/
struct s_elan_multiplier elan_multiplier[] = {
@ -72,78 +72,79 @@ static struct cpufreq_frequency_table elanfreq_table[] = {
* elanfreq_get_cpu_frequency: determine current cpu speed
*
* Finds out at which frequency the CPU of the Elan SOC runs
* at the moment. Frequencies from 1 to 33 MHz are generated
* at the moment. Frequencies from 1 to 33 MHz are generated
* the normal way, 66 and 99 MHz are called "Hyperspeed Mode"
* and have the rest of the chip running with 33 MHz.
* and have the rest of the chip running with 33 MHz.
*/
static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
{
u8 clockspeed_reg; /* Clock Speed Register */
u8 clockspeed_reg; /* Clock Speed Register */
local_irq_disable();
outb_p(0x80,REG_CSCIR);
clockspeed_reg = inb_p(REG_CSCDR);
outb_p(0x80,REG_CSCIR);
clockspeed_reg = inb_p(REG_CSCDR);
local_irq_enable();
if ((clockspeed_reg & 0xE0) == 0xE0) { return 0; }
if ((clockspeed_reg & 0xE0) == 0xE0)
return 0;
/* Are we in CPU clock multiplied mode (66/99 MHz)? */
if ((clockspeed_reg & 0xE0) == 0xC0) {
if ((clockspeed_reg & 0x01) == 0) {
/* Are we in CPU clock multiplied mode (66/99 MHz)? */
if ((clockspeed_reg & 0xE0) == 0xC0) {
if ((clockspeed_reg & 0x01) == 0)
return 66000;
} else {
return 99000;
}
}
else
return 99000;
}
/* 33 MHz is not 32 MHz... */
if ((clockspeed_reg & 0xE0)==0xA0)
return 33000;
return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000);
return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000);
}
/**
* elanfreq_set_cpu_frequency: Change the CPU core frequency
* @cpu: cpu number
* elanfreq_set_cpu_frequency: Change the CPU core frequency
* @cpu: cpu number
* @freq: frequency in kHz
*
* This function takes a frequency value and changes the CPU frequency
* This function takes a frequency value and changes the CPU frequency
* according to this. Note that the frequency has to be checked by
* elanfreq_validatespeed() for correctness!
*
* There is no return value.
*
* There is no return value.
*/
static void elanfreq_set_cpu_state (unsigned int state) {
static void elanfreq_set_cpu_state (unsigned int state)
{
struct cpufreq_freqs freqs;
freqs.old = elanfreq_get_cpu_frequency(0);
freqs.new = elan_multiplier[state].clock;
freqs.cpu = 0; /* elanfreq.c is UP only driver */
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",elan_multiplier[state].clock);
printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
elan_multiplier[state].clock);
/*
* Access to the Elan's internal registers is indexed via
* 0x22: Chip Setup & Control Register Index Register (CSCI)
* 0x23: Chip Setup & Control Register Data Register (CSCD)
/*
* Access to the Elan's internal registers is indexed via
* 0x22: Chip Setup & Control Register Index Register (CSCI)
* 0x23: Chip Setup & Control Register Data Register (CSCD)
*
*/
/*
* 0x40 is the Power Management Unit's Force Mode Register.
/*
* 0x40 is the Power Management Unit's Force Mode Register.
* Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency)
*/
local_irq_disable();
outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */
outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */
outb_p(0x00,REG_CSCDR);
local_irq_enable(); /* wait till internal pipelines and */
udelay(1000); /* buffers have cleaned up */
@ -166,10 +167,10 @@ static void elanfreq_set_cpu_state (unsigned int state) {
/**
* elanfreq_validatespeed: test if frequency range is valid
* @policy: the policy to validate
* @policy: the policy to validate
*
* This function checks if a given frequency range in kHz is valid
* for the hardware supported by the driver.
* This function checks if a given frequency range in kHz is valid
* for the hardware supported by the driver.
*/
static int elanfreq_verify (struct cpufreq_policy *policy)
@ -177,11 +178,11 @@ static int elanfreq_verify (struct cpufreq_policy *policy)
return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
}
static int elanfreq_target (struct cpufreq_policy *policy,
unsigned int target_freq,
static int elanfreq_target (struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int newstate = 0;
unsigned int newstate = 0;
if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate))
return -EINVAL;
@ -212,7 +213,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
max_freq = elanfreq_get_cpu_frequency(0);
/* table init */
for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (elanfreq_table[i].frequency > max_freq)
elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
}
@ -226,8 +227,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
if (result)
return (result);
cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
return 0;
}
@ -268,9 +268,9 @@ static struct freq_attr* elanfreq_attr[] = {
static struct cpufreq_driver elanfreq_driver = {
.get = elanfreq_get_cpu_frequency,
.verify = elanfreq_verify,
.target = elanfreq_target,
.get = elanfreq_get_cpu_frequency,
.verify = elanfreq_verify,
.target = elanfreq_target,
.init = elanfreq_cpu_init,
.exit = elanfreq_cpu_exit,
.name = "elanfreq",
@ -279,23 +279,21 @@ static struct cpufreq_driver elanfreq_driver = {
};
static int __init elanfreq_init(void)
{
static int __init elanfreq_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
/* Test if we have the right hardware */
if ((c->x86_vendor != X86_VENDOR_AMD) ||
(c->x86 != 4) || (c->x86_model!=10))
{
(c->x86 != 4) || (c->x86_model!=10)) {
printk(KERN_INFO "elanfreq: error: no Elan processor found!\n");
return -ENODEV;
}
return cpufreq_register_driver(&elanfreq_driver);
}
static void __exit elanfreq_exit(void)
static void __exit elanfreq_exit(void)
{
cpufreq_unregister_driver(&elanfreq_driver);
}
@ -309,4 +307,3 @@ MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
module_init(elanfreq_init);
module_exit(elanfreq_exit);

View File

@ -6,12 +6,12 @@
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation
* version 2 as published by the Free Software Foundation
*
* The author(s) of this software shall not be held liable for damages
* of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties.
*
*
* Theoritical note:
*
* (see Geode(tm) CS5530 manual (rev.4.1) page.56)
@ -21,18 +21,18 @@
*
* Suspend Modulation works by asserting and de-asserting the SUSP# pin
* to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
* the CPU enters an idle state. GX1 stops its core clock when SUSP# is
* the CPU enters an idle state. GX1 stops its core clock when SUSP# is
* asserted then power consumption is reduced.
*
* Suspend Modulation's OFF/ON duration are configurable
* Suspend Modulation's OFF/ON duration are configurable
* with 'Suspend Modulation OFF Count Register'
* and 'Suspend Modulation ON Count Register'.
* These registers are 8bit counters that represent the number of
* These registers are 8bit counters that represent the number of
* 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
* to the processor.
*
* These counters define a ratio which is the effective frequency
* of operation of the system.
* These counters define a ratio which is the effective frequency
* of operation of the system.
*
* OFF Count
* F_eff = Fgx * ----------------------
@ -40,24 +40,24 @@
*
* 0 <= On Count, Off Count <= 255
*
* From these limits, we can get register values
* From these limits, we can get register values
*
* off_duration + on_duration <= MAX_DURATION
* on_duration = off_duration * (stock_freq - freq) / freq
*
* off_duration = (freq * DURATION) / stock_freq
* on_duration = DURATION - off_duration
* off_duration = (freq * DURATION) / stock_freq
* on_duration = DURATION - off_duration
*
*
*---------------------------------------------------------------------------
*
* ChangeLog:
* Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
* - fix on/off register mistake
* - fix cpu_khz calc when it stops cpu modulation.
* Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
* - fix on/off register mistake
* - fix cpu_khz calc when it stops cpu modulation.
*
* Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
* - rewrite for Cyrix MediaGX Cx5510/5520 and
* Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
* - rewrite for Cyrix MediaGX Cx5510/5520 and
* NatSemi Geode Cs5530(A).
*
* Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com>
@ -74,40 +74,40 @@
************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/errno.h>
/* PCI config registers, all at F0 */
#define PCI_PMER1 0x80 /* power management enable register 1 */
#define PCI_PMER2 0x81 /* power management enable register 2 */
#define PCI_PMER3 0x82 /* power management enable register 3 */
#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
#define PCI_MODON 0x95 /* suspend modulation ON counter register */
#define PCI_SUSCFG 0x96 /* suspend configuration register */
#define PCI_PMER1 0x80 /* power management enable register 1 */
#define PCI_PMER2 0x81 /* power management enable register 2 */
#define PCI_PMER3 0x82 /* power management enable register 3 */
#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
#define PCI_MODON 0x95 /* suspend modulation ON counter register */
#define PCI_SUSCFG 0x96 /* suspend configuration register */
/* PMER1 bits */
#define GPM (1<<0) /* global power management */
#define GIT (1<<1) /* globally enable PM device idle timers */
#define GTR (1<<2) /* globally enable IO traps */
#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
#define GPM (1<<0) /* global power management */
#define GIT (1<<1) /* globally enable PM device idle timers */
#define GTR (1<<2) /* globally enable IO traps */
#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
/* SUSCFG bits */
#define SUSMOD (1<<0) /* enable/disable suspend modulation */
/* the belows support only with cs5530 (after rev.1.2)/cs5530A */
#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
/* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
/* the belows support only with cs5530A */
#define PWRSVE_ISA (1<<3) /* stop ISA clock */
#define PWRSVE (1<<4) /* active idle */
#define SUSMOD (1<<0) /* enable/disable suspend modulation */
/* the belows support only with cs5530 (after rev.1.2)/cs5530A */
#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
/* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
/* the belows support only with cs5530A */
#define PWRSVE_ISA (1<<3) /* stop ISA clock */
#define PWRSVE (1<<4) /* active idle */
struct gxfreq_params {
u8 on_duration;
@ -128,7 +128,7 @@ module_param (pci_busclk, int, 0444);
/* maximum duration for which the cpu may be suspended
* (32us * MAX_DURATION). If no parameter is given, this defaults
* to 255.
* to 255.
* Note that this leads to a maximum of 8 ms(!) where the CPU clock
* is suspended -- processing power is just 0.39% of what it used to be,
* though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
@ -144,17 +144,17 @@ module_param (max_duration, int, 0444);
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg)
/**
* we can detect a core multipiler from dir0_lsb
* from GX1 datasheet p.56,
* MULT[3:0]:
* 0000 = SYSCLK multiplied by 4 (test only)
* 0001 = SYSCLK multiplied by 10
* 0010 = SYSCLK multiplied by 4
* 0011 = SYSCLK multiplied by 6
* 0100 = SYSCLK multiplied by 9
* 0101 = SYSCLK multiplied by 5
* 0110 = SYSCLK multiplied by 7
* 0111 = SYSCLK multiplied by 8
* we can detect a core multipiler from dir0_lsb
* from GX1 datasheet p.56,
* MULT[3:0]:
* 0000 = SYSCLK multiplied by 4 (test only)
* 0001 = SYSCLK multiplied by 10
* 0010 = SYSCLK multiplied by 4
* 0011 = SYSCLK multiplied by 6
* 0100 = SYSCLK multiplied by 9
* 0101 = SYSCLK multiplied by 5
* 0110 = SYSCLK multiplied by 7
* 0111 = SYSCLK multiplied by 8
* of 33.3MHz
**/
static int gx_freq_mult[16] = {
@ -164,17 +164,17 @@ static int gx_freq_mult[16] = {
/****************************************************************
* Low Level chipset interface *
* Low Level chipset interface *
****************************************************************/
static struct pci_device_id gx_chipset_tbl[] __initdata = {
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID },
{ 0, },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID },
{ 0, },
};
/**
* gx_detect_chipset:
* gx_detect_chipset:
*
**/
static __init struct pci_dev *gx_detect_chipset(void)
@ -182,17 +182,16 @@ static __init struct pci_dev *gx_detect_chipset(void)
struct pci_dev *gx_pci = NULL;
/* check if CPU is a MediaGX or a Geode. */
if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
(current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) {
dprintk("error: no MediaGX/Geode processor found!\n");
return NULL;
return NULL;
}
/* detect which companion chip is used */
while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) {
if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL) {
if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL)
return gx_pci;
}
}
dprintk("error: no supported chipset found!\n");
@ -200,24 +199,24 @@ static __init struct pci_dev *gx_detect_chipset(void)
}
/**
* gx_get_cpuspeed:
* gx_get_cpuspeed:
*
* Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs.
*/
static unsigned int gx_get_cpuspeed(unsigned int cpu)
{
if ((gx_params->pci_suscfg & SUSMOD) == 0)
if ((gx_params->pci_suscfg & SUSMOD) == 0)
return stock_freq;
return (stock_freq * gx_params->off_duration)
return (stock_freq * gx_params->off_duration)
/ (gx_params->on_duration + gx_params->off_duration);
}
/**
* gx_validate_speed:
* determine current cpu speed
*
**/
*
**/
static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration)
{
@ -230,7 +229,7 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off
*on_duration=0;
for (i=max_duration; i>0; i--) {
tmp_off = ((khz * i) / stock_freq) & 0xff;
tmp_off = ((khz * i) / stock_freq) & 0xff;
tmp_on = i - tmp_off;
tmp_freq = (stock_freq * tmp_off) / i;
/* if this relation is closer to khz, use this. If it's equal,
@ -247,18 +246,17 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off
/**
* gx_set_cpuspeed:
* set cpu speed in khz.
* gx_set_cpuspeed:
* set cpu speed in khz.
**/
static void gx_set_cpuspeed(unsigned int khz)
{
u8 suscfg, pmer1;
u8 suscfg, pmer1;
unsigned int new_khz;
unsigned long flags;
struct cpufreq_freqs freqs;
freqs.cpu = 0;
freqs.old = gx_get_cpuspeed(0);
@ -303,18 +301,18 @@ static void gx_set_cpuspeed(unsigned int khz)
pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration);
pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration);
pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg);
pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg);
pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
local_irq_restore(flags);
local_irq_restore(flags);
gx_params->pci_suscfg = suscfg;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
gx_params->on_duration * 32, gx_params->off_duration * 32);
dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
gx_params->on_duration * 32, gx_params->off_duration * 32);
dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
}
/****************************************************************
@ -322,10 +320,10 @@ static void gx_set_cpuspeed(unsigned int khz)
****************************************************************/
/*
* cpufreq_gx_verify: test if frequency range is valid
* cpufreq_gx_verify: test if frequency range is valid
*
* This function checks if a given frequency range in kHz is valid
* for the hardware supported by the driver.
* This function checks if a given frequency range in kHz is valid
* for the hardware supported by the driver.
*/
static int cpufreq_gx_verify(struct cpufreq_policy *policy)
@ -333,8 +331,8 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
unsigned int tmp_freq = 0;
u8 tmp1, tmp2;
if (!stock_freq || !policy)
return -EINVAL;
if (!stock_freq || !policy)
return -EINVAL;
policy->cpu = 0;
cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
@ -342,14 +340,14 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
/* it needs to be assured that at least one supported frequency is
* within policy->min and policy->max. If it is not, policy->max
* needs to be increased until one freuqency is supported.
* policy->min may not be decreased, though. This way we guarantee a
* policy->min may not be decreased, though. This way we guarantee a
* specific processing capacity.
*/
tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2);
if (tmp_freq < policy->min)
if (tmp_freq < policy->min)
tmp_freq += stock_freq / max_duration;
policy->min = tmp_freq;
if (policy->min > policy->max)
if (policy->min > policy->max)
policy->max = tmp_freq;
tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2);
if (tmp_freq > policy->max)
@ -358,12 +356,12 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
if (policy->max < policy->min)
policy->max = policy->min;
cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
return 0;
}
/*
* cpufreq_gx_target:
* cpufreq_gx_target:
*
*/
static int cpufreq_gx_target(struct cpufreq_policy *policy,
@ -373,8 +371,8 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
u8 tmp1, tmp2;
unsigned int tmp_freq;
if (!stock_freq || !policy)
return -EINVAL;
if (!stock_freq || !policy)
return -EINVAL;
policy->cpu = 0;
@ -431,7 +429,7 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
return 0;
}
/*
/*
* cpufreq_gx_init:
* MediaGX/Geode GX initialize cpufreq driver
*/
@ -452,7 +450,7 @@ static int __init cpufreq_gx_init(void)
u32 class_rev;
/* Test if we have the right hardware */
if ((gx_pci = gx_detect_chipset()) == NULL)
if ((gx_pci = gx_detect_chipset()) == NULL)
return -ENODEV;
/* check whether module parameters are sane */
@ -461,10 +459,9 @@ static int __init cpufreq_gx_init(void)
dprintk("geode suspend modulation available.\n");
params = kmalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
if (params == NULL)
return -ENOMEM;
memset(params, 0, sizeof(struct gxfreq_params));
params->cs55x0 = gx_pci;
gx_params = params;
@ -478,7 +475,7 @@ static int __init cpufreq_gx_init(void)
pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev);
params->pci_rev = class_rev && 0xff;
if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
kfree(params);
return ret; /* register error! */
}

View File

@ -234,7 +234,7 @@ static int __initdata ezrat_eblcr[32] = {
/*
* VIA C3 Nehemiah */
static int __initdata nehemiah_a_clock_ratio[32] = {
100, /* 0000 -> 10.0x */
160, /* 0001 -> 16.0x */
@ -446,7 +446,7 @@ static int __initdata nehemiah_c_eblcr[32] = {
/* end of table */
};
/*
/*
* Voltage scales. Div/Mod by 1000 to get actual voltage.
* Which scale to use depends on the VRM type in use.
*/

View File

@ -14,7 +14,7 @@
* The author(s) of this software shall not be held liable for damages
* of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties.
*
*
* Date Errata Description
* 20020525 N44, O17 12.5% or 25% DC causes lockup
*
@ -22,7 +22,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
@ -30,7 +30,7 @@
#include <linux/cpumask.h>
#include <linux/sched.h> /* current / set_cpus_allowed() */
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/timex.h>
@ -79,7 +79,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
} else {
dprintk("CPU#%d setting duty cycle to %d%%\n",
cpu, ((125 * newstate) / 10));
/* bits 63 - 5 : reserved
/* bits 63 - 5 : reserved
* bit 4 : enable/disable
* bits 3-1 : duty cycle
* bit 0 : reserved
@ -132,7 +132,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
}
/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
* Developer's Manual, Volume 3
*/
cpus_allowed = current->cpus_allowed;
@ -206,7 +206,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D);
}
static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
{
@ -234,7 +234,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
dprintk("has errata -- disabling frequencies lower than 2ghz\n");
break;
}
/* get max frequency */
stock_freq = cpufreq_p4_get_frequency(c);
if (!stock_freq)
@ -244,13 +244,13 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if ((i<2) && (has_N44_O17_errata[policy->cpu]))
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000)
else if (has_N60_errata[policy->cpu] && ((stock_freq * i)/8) < 2000000)
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
}
cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
/* cpuinfo and default policy values */
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = 1000000; /* assumed */
@ -262,7 +262,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
@ -298,7 +298,7 @@ static struct freq_attr* p4clockmod_attr[] = {
};
static struct cpufreq_driver p4clockmod_driver = {
.verify = cpufreq_p4_verify,
.verify = cpufreq_p4_verify,
.target = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
.exit = cpufreq_p4_cpu_exit,
@ -310,12 +310,12 @@ static struct cpufreq_driver p4clockmod_driver = {
static int __init cpufreq_p4_init(void)
{
{
struct cpuinfo_x86 *c = cpu_data;
int ret;
/*
* THERM_CONTROL is architectural for IA32 now, so
* THERM_CONTROL is architectural for IA32 now, so
* we can rely on the capability checks
*/
if (c->x86_vendor != X86_VENDOR_INTEL)

View File

@ -8,7 +8,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/ioport.h>
@ -50,7 +50,7 @@ static int powernow_k6_get_cpu_multiplier(void)
{
u64 invalue = 0;
u32 msrval;
msrval = POWERNOW_IOPORT + 0x1;
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
invalue=inl(POWERNOW_IOPORT + 0x8);
@ -81,7 +81,7 @@ static void powernow_k6_set_state (unsigned int best_i)
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
freqs.new = busfreq * clock_ratio[best_i].index;
freqs.cpu = 0; /* powernow-k6.c is UP only driver */
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* we now need to transform best_i to the BVC format, see AMD#23446 */
@ -152,7 +152,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
busfreq = cpu_khz / max_multiplier;
/* table init */
for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
if (clock_ratio[i].index > max_multiplier)
clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
else
@ -182,7 +182,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
powernow_k6_set_state(i);
}
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return 0;
}
static unsigned int powernow_k6_get(unsigned int cpu)
@ -196,8 +196,8 @@ static struct freq_attr* powernow_k6_attr[] = {
};
static struct cpufreq_driver powernow_k6_driver = {
.verify = powernow_k6_verify,
.target = powernow_k6_target,
.verify = powernow_k6_verify,
.target = powernow_k6_target,
.init = powernow_k6_cpu_init,
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = {
* on success.
*/
static int __init powernow_k6_init(void)
{
{
struct cpuinfo_x86 *c = cpu_data;
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||

View File

@ -199,8 +199,8 @@ static int get_ranges (unsigned char *pst)
powernow_table[j].index |= (vid << 8); /* upper 8 bits */
dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed/1000, vid,
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed/1000, vid,
mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000);
}
@ -368,8 +368,8 @@ static int powernow_acpi_init(void)
}
dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed/1000, vid,
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
fid_codes[fid] % 10, speed/1000, vid,
mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000);
@ -460,7 +460,7 @@ static int powernow_decode_bios (int maxfid, int startvid)
(maxfid==pst->maxfid) && (startvid==pst->startvid))
{
dprintk ("PST:%d (@%p)\n", i, pst);
dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
ret = get_ranges ((char *) pst + sizeof (struct pst_s));

View File

@ -45,7 +45,7 @@
#define PFX "powernow-k8: "
#define BFX PFX "BIOS error: "
#define VERSION "version 1.60.0"
#define VERSION "version 1.60.1"
#include "powernow-k8.h"
/* serialize freq changes */
@ -83,11 +83,10 @@ static u32 find_millivolts_from_vid(struct powernow_k8_data *data, u32 vid)
*/
static u32 convert_fid_to_vco_fid(u32 fid)
{
if (fid < HI_FID_TABLE_BOTTOM) {
if (fid < HI_FID_TABLE_BOTTOM)
return 8 + (2 * fid);
} else {
else
return fid;
}
}
/*
@ -177,7 +176,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
if (i++ > 100) {
printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
return 1;
}
}
} while (query_current_values_with_pending_wait(data));
count_off_irt(data);
@ -474,8 +473,10 @@ static int check_supported_cpu(unsigned int cpu)
goto out;
eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
if ((eax & CPUID_XFAM) != CPUID_XFAM_K8)
goto out;
if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
((eax & CPUID_XFAM) != CPUID_XFAM_K8) ||
((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) {
printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
goto out;
@ -780,9 +781,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
/* verify only 1 entry from the lo frequency table */
if (fid < HI_FID_TABLE_BOTTOM) {
if (cntlofreq) {
/* if both entries are the same, ignore this
* one...
*/
/* if both entries are the same, ignore this one ... */
if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
(powernow_table[i].index != powernow_table[cntlofreq].index)) {
printk(KERN_ERR PFX "Too many lo freq table entries\n");
@ -854,7 +853,7 @@ static int transition_frequency(struct powernow_k8_data *data, unsigned int inde
dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
/* fid are the lower 8 bits of the index we stored into
* the cpufreq frequency table in find_psb_table, vid are
* the cpufreq frequency table in find_psb_table, vid are
* the upper 8 bits.
*/
@ -909,7 +908,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
u32 checkvid = data->currvid;
unsigned int newstate;
int ret = -EIO;
int i;
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
@ -955,12 +953,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
up(&fidvid_sem);
goto err_out;
}
/* Update all the fid/vids of our siblings */
for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
powernow_data[i]->currvid = data->currvid;
powernow_data[i]->currfid = data->currfid;
}
up(&fidvid_sem);
pol->cur = find_khz_freq_from_fid(data->currfid);
@ -984,7 +976,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data;
cpumask_t oldmask = CPU_MASK_ALL;
int rc, i;
int rc;
if (!cpu_online(pol->cpu))
return -ENODEV;
@ -1048,7 +1040,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
pol->cpus = cpu_core_map[pol->cpu];
/* Take a crude guess here.
/* Take a crude guess here.
* That guess was in microseconds, so multiply with 1000 */
pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
+ (3 * (1 << data->irt) * 10)) * 1000;
@ -1070,9 +1062,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
powernow_data[i] = data;
}
powernow_data[pol->cpu] = data;
return 0;

View File

@ -63,7 +63,7 @@ struct powernow_k8_data {
#define MSR_C_LO_VID_SHIFT 8
/* Field definitions within the FID VID High Control MSR : */
#define MSR_C_HI_STP_GNT_TO 0x000fffff
#define MSR_C_HI_STP_GNT_TO 0x000fffff
/* Field definitions within the FID VID Low Status MSR : */
#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */
@ -123,7 +123,7 @@ struct powernow_k8_data {
* Most values of interest are enocoded in a single field of the _PSS
* entries: the "control" value.
*/
#define IRT_SHIFT 30
#define RVO_SHIFT 28
#define EXT_TYPE_SHIFT 27
@ -185,7 +185,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
#ifndef for_each_cpu_mask
#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++)
#endif
#ifdef CONFIG_SMP
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
{

View File

@ -479,15 +479,13 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
unsigned l, h;
int ret;
int i;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
/* Only Intel makes Enhanced Speedstep-capable CPUs */
if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
}
if (centrino_cpu_init_acpi(policy)) {
if (policy->cpu != 0)

View File

@ -9,7 +9,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
@ -36,8 +36,8 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
/* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
struct {
unsigned int ratio; /* Frequency Multiplier (x10) */
u8 bitmap; /* power on configuration bits
[27, 25:22] (in MSR 0x2a) */
u8 bitmap; /* power on configuration bits
[27, 25:22] (in MSR 0x2a) */
} msr_decode_mult [] = {
{ 30, 0x01 },
{ 35, 0x05 },
@ -58,9 +58,9 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
/* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
struct {
unsigned int value; /* Front Side Bus speed in MHz */
u8 bitmap; /* power on configuration bits [18: 19]
(in MSR 0x2a) */
unsigned int value; /* Front Side Bus speed in MHz */
u8 bitmap; /* power on configuration bits [18: 19]
(in MSR 0x2a) */
} msr_decode_fsb [] = {
{ 66, 0x0 },
{ 100, 0x2 },
@ -68,8 +68,8 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
{ 0, 0xff}
};
u32 msr_lo, msr_tmp;
int i = 0, j = 0;
u32 msr_lo, msr_tmp;
int i = 0, j = 0;
/* read MSR 0x2a - we only need the low 32 bits */
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
@ -106,7 +106,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
static unsigned int pentiumM_get_frequency(void)
{
u32 msr_lo, msr_tmp;
u32 msr_lo, msr_tmp;
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
@ -134,7 +134,7 @@ static unsigned int pentium4_get_frequency(void)
dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi);
/* decode the FSB: see IA-32 Intel (C) Architecture Software
/* decode the FSB: see IA-32 Intel (C) Architecture Software
* Developer's Manual, Volume 3: System Prgramming Guide,
* revision #12 in Table B-1: MSRs in the Pentium 4 and
* Intel Xeon Processors, on page B-4 and B-5.
@ -170,7 +170,7 @@ static unsigned int pentium4_get_frequency(void)
return (fsb * mult);
}
unsigned int speedstep_get_processor_frequency(unsigned int processor)
{
switch (processor) {
@ -198,11 +198,11 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
unsigned int speedstep_detect_processor (void)
{
struct cpuinfo_x86 *c = cpu_data;
u32 ebx, msr_lo, msr_hi;
u32 ebx, msr_lo, msr_hi;
dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
if ((c->x86_vendor != X86_VENDOR_INTEL) ||
if ((c->x86_vendor != X86_VENDOR_INTEL) ||
((c->x86 != 6) && (c->x86 != 0xF)))
return 0;
@ -218,15 +218,15 @@ unsigned int speedstep_detect_processor (void)
dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
switch (c->x86_mask) {
case 4:
case 4:
/*
* B-stepping [M-P4-M]
* B-stepping [M-P4-M]
* sample has ebx = 0x0f, production has 0x0e.
*/
if ((ebx == 0x0e) || (ebx == 0x0f))
return SPEEDSTEP_PROCESSOR_P4M;
break;
case 7:
case 7:
/*
* C-stepping [M-P4-M]
* needs to have ebx=0x0e, else it's a celeron:
@ -253,7 +253,7 @@ unsigned int speedstep_detect_processor (void)
* also, M-P4M HTs have ebx=0x8, too
* For now, they are distinguished by the model_id string
*/
if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL))
if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL))
return SPEEDSTEP_PROCESSOR_P4M;
break;
default:
@ -264,8 +264,7 @@ unsigned int speedstep_detect_processor (void)
switch (c->x86_model) {
case 0x0B: /* Intel PIII [Tualatin] */
/* cpuid_ebx(1) is 0x04 for desktop PIII,
0x06 for mobile PIII-M */
/* cpuid_ebx(1) is 0x04 for desktop PIII, 0x06 for mobile PIII-M */
ebx = cpuid_ebx(0x00000001);
dprintk("ebx is %x\n", ebx);
@ -275,9 +274,8 @@ unsigned int speedstep_detect_processor (void)
return 0;
/* So far all PIII-M processors support SpeedStep. See
* Intel's 24540640.pdf of June 2003
* Intel's 24540640.pdf of June 2003
*/
return SPEEDSTEP_PROCESSOR_PIII_T;
case 0x08: /* Intel PIII [Coppermine] */
@ -399,7 +397,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
}
}
out:
out:
local_irq_restore(flags);
return (ret);
}

View File

@ -14,7 +14,7 @@
#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */
#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */
#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */
#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */
#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M */
/* the following processors are not speedstep-capable and are not auto-detected
@ -25,8 +25,8 @@
/* speedstep states -- only two of them */
#define SPEEDSTEP_HIGH 0x00000000
#define SPEEDSTEP_LOW 0x00000001
#define SPEEDSTEP_HIGH 0x00000000
#define SPEEDSTEP_LOW 0x00000001
/* detect a speedstep-capable processor */
@ -36,13 +36,13 @@ extern unsigned int speedstep_detect_processor (void);
extern unsigned int speedstep_get_processor_frequency(unsigned int processor);
/* detect the low and high speeds of the processor. The callback
* set_state"'s first argument is either SPEEDSTEP_HIGH or
* SPEEDSTEP_LOW; the second argument is zero so that no
/* detect the low and high speeds of the processor. The callback
* set_state"'s first argument is either SPEEDSTEP_HIGH or
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
extern unsigned int speedstep_get_freqs(unsigned int processor,
unsigned int *low_speed,
unsigned int *high_speed,
unsigned int *transition_latency,
void (*set_state) (unsigned int state));
unsigned int *low_speed,
unsigned int *high_speed,
unsigned int *transition_latency,
void (*set_state) (unsigned int state));

View File

@ -13,8 +13,8 @@
*********************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
@ -28,21 +28,21 @@
*
* These parameters are got from IST-SMI BIOS call.
* If user gives it, these are used.
*
*
*/
static int smi_port = 0;
static int smi_cmd = 0;
static unsigned int smi_sig = 0;
static int smi_port = 0;
static int smi_cmd = 0;
static unsigned int smi_sig = 0;
/* info about the processor */
static unsigned int speedstep_processor = 0;
static unsigned int speedstep_processor = 0;
/*
* There are only two frequency states for each processor. Values
/*
* There are only two frequency states for each processor. Values
* are in kHz for the time being.
*/
static struct cpufreq_frequency_table speedstep_freqs[] = {
{SPEEDSTEP_HIGH, 0},
{SPEEDSTEP_HIGH, 0},
{SPEEDSTEP_LOW, 0},
{0, CPUFREQ_TABLE_END},
};
@ -125,7 +125,7 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
*low = low_mhz * 1000;
return result;
}
}
/**
* speedstep_get_state - set the SpeedStep state
@ -206,7 +206,7 @@ static void speedstep_set_state (unsigned int state)
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
* @target_freq: new freq
* @relation:
* @relation:
*
* Sets a new CPUFreq policy/freq.
*/
@ -285,7 +285,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
state = speedstep_get_state();
speed = speedstep_freqs[state].frequency;
dprintk("currently at %s speed setting - %i MHz\n",
dprintk("currently at %s speed setting - %i MHz\n",
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high",
(speed / 1000));
@ -298,7 +298,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
if (result)
return (result);
cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
return 0;
}
@ -334,8 +334,8 @@ static struct freq_attr* speedstep_attr[] = {
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
.verify = speedstep_verify,
.target = speedstep_target,
.verify = speedstep_verify,
.target = speedstep_target,
.init = speedstep_cpu_init,
.exit = speedstep_cpu_exit,
.get = speedstep_get,
@ -372,13 +372,12 @@ static int __init speedstep_init(void)
return -ENODEV;
}
dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n",
dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n",
ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level);
/* Error if no IST-SMI BIOS or no PARM
/* Error if no IST-SMI BIOS or no PARM
sig= 'ISGE' aka 'Intel Speedstep Gate E' */
if ((ist_info.signature != 0x47534943) && (
if ((ist_info.signature != 0x47534943) && (
(smi_port == 0) || (smi_cmd == 0)))
return -ENODEV;
@ -388,17 +387,15 @@ static int __init speedstep_init(void)
smi_sig = ist_info.signature;
/* setup smi_port from MODLULE_PARM or BIOS */
if ((smi_port > 0xff) || (smi_port < 0)) {
if ((smi_port > 0xff) || (smi_port < 0))
return -EINVAL;
} else if (smi_port == 0) {
else if (smi_port == 0)
smi_port = ist_info.command & 0xff;
}
if ((smi_cmd > 0xff) || (smi_cmd < 0)) {
if ((smi_cmd > 0xff) || (smi_cmd < 0))
return -EINVAL;
} else if (smi_cmd == 0) {
else if (smi_cmd == 0)
smi_cmd = (ist_info.command >> 16) & 0xff;
}
return cpufreq_register_driver(&speedstep_driver);
}

View File

@ -5,7 +5,9 @@
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
*
* Oct 2005 - Ashok Raj <ashok.raj@intel.com>
* Added handling for CPU hotplug
* Added handling for CPU hotplug
* Feb 2006 - Jacob Shin <jacob.shin@amd.com>
* Fix handling for CPU hotplug -- affected CPUs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -44,8 +46,8 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
static void handle_update(void *data);
/**
* Two notifier lists: the "policy" list is involved in the
* validation process for a new CPU frequency policy; the
* Two notifier lists: the "policy" list is involved in the
* validation process for a new CPU frequency policy; the
* "transition" list for kernel code that needs to handle
* changes to devices when the CPU clock speed changes.
* The mutex locks both lists.
@ -151,7 +153,7 @@ void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt
va_list args;
unsigned int len;
unsigned long flags;
WARN_ON(!prefix);
if (type & debug) {
spin_lock_irqsave(&disable_ratelimit_lock, flags);
@ -198,7 +200,7 @@ static inline void cpufreq_debug_disable_ratelimit(void) { return; }
*
* This function alters the system "loops_per_jiffy" for the clock
* speed change. Note that loops_per_jiffy cannot be updated on SMP
* systems as each CPU might be scaled differently. So, use the arch
* systems as each CPU might be scaled differently. So, use the arch
* per-CPU loops_per_jiffy value wherever possible.
*/
#ifndef CONFIG_SMP
@ -233,7 +235,7 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) {
*
* This function calls the transition notifiers and the "adjust_jiffies"
* function. It is called twice on all CPU frequency changes that have
* external effects.
* external effects.
*/
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
{
@ -251,7 +253,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
switch (state) {
case CPUFREQ_PRECHANGE:
/* detect if the driver reported a value as "old frequency"
/* detect if the driver reported a value as "old frequency"
* which is not equal to what the cpufreq core thinks is
* "old frequency".
*/
@ -335,11 +337,11 @@ extern struct sysdev_class cpu_sysdev_class;
* "unsigned int".
*/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct cpufreq_policy * policy, char *buf) \
{ \
return sprintf (buf, "%u\n", policy->object); \
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct cpufreq_policy * policy, char *buf) \
{ \
return sprintf (buf, "%u\n", policy->object); \
}
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
@ -404,8 +406,8 @@ static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf)
/**
* store_scaling_governor - store policy for the specified CPU
*/
static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
const char *buf, size_t count)
static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
const char *buf, size_t count)
{
unsigned int ret = -EINVAL;
char str_governor[16];
@ -528,7 +530,7 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
return ret;
}
static ssize_t store(struct kobject * kobj, struct attribute * attr,
static ssize_t store(struct kobject * kobj, struct attribute * attr,
const char * buf, size_t count)
{
struct cpufreq_policy * policy = to_policy(kobj);
@ -564,7 +566,7 @@ static struct kobj_type ktype_cpufreq = {
/**
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
* Adds the cpufreq interface for a CPU device.
*/
static int cpufreq_add_dev (struct sys_device * sys_dev)
{
@ -573,8 +575,12 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
struct cpufreq_policy new_policy;
struct cpufreq_policy *policy;
struct freq_attr **drv_attr;
struct sys_device *cpu_sys_dev;
unsigned long flags;
unsigned int j;
#ifdef CONFIG_SMP
struct cpufreq_policy *managed_policy;
#endif
if (cpu_is_offline(cpu))
return 0;
@ -587,8 +593,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
* CPU because it is in the same boat. */
policy = cpufreq_cpu_get(cpu);
if (unlikely(policy)) {
dprintk("CPU already managed, adding link\n");
sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq");
cpufreq_cpu_put(policy);
cpufreq_debug_enable_ratelimit();
return 0;
}
@ -623,6 +628,32 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
goto err_out;
}
#ifdef CONFIG_SMP
for_each_cpu_mask(j, policy->cpus) {
if (cpu == j)
continue;
/* check for existing affected CPUs. They may not be aware
* of it due to CPU Hotplug.
*/
managed_policy = cpufreq_cpu_get(j);
if (unlikely(managed_policy)) {
spin_lock_irqsave(&cpufreq_driver_lock, flags);
managed_policy->cpus = policy->cpus;
cpufreq_cpu_data[cpu] = managed_policy;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
dprintk("CPU already managed, adding link\n");
sysfs_create_link(&sys_dev->kobj,
&managed_policy->kobj, "cpufreq");
cpufreq_debug_enable_ratelimit();
mutex_unlock(&policy->lock);
ret = 0;
goto err_out_driver_exit; /* call driver->exit() */
}
}
#endif
memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
/* prepare interface data */
@ -650,6 +681,21 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
for_each_cpu_mask(j, policy->cpus)
cpufreq_cpu_data[j] = policy;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
/* symlink affected CPUs */
for_each_cpu_mask(j, policy->cpus) {
if (j == cpu)
continue;
if (!cpu_online(j))
continue;
dprintk("CPU already managed, adding link\n");
cpufreq_cpu_get(cpu);
cpu_sys_dev = get_cpu_sysdev(j);
sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
"cpufreq");
}
policy->governor = NULL; /* to assure that the starting sequence is
* run in cpufreq_set_policy */
mutex_unlock(&policy->lock);
@ -724,10 +770,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
#ifdef CONFIG_SMP
/* if this isn't the CPU which is the parent of the kobj, we
* only need to unlink, put and exit
* only need to unlink, put and exit
*/
if (unlikely(cpu != data->cpu)) {
dprintk("removing link\n");
cpu_clear(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
sysfs_remove_link(&sys_dev->kobj, "cpufreq");
cpufreq_cpu_put(data);
@ -740,7 +787,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
if (!kobject_get(&data->kobj)) {
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpufreq_debug_enable_ratelimit();
return -EFAULT;
return -EFAULT;
}
#ifdef CONFIG_SMP
@ -783,7 +830,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
kobject_put(&data->kobj);
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* not referenced anymore by anybody before we proceed with
* unloading.
*/
dprintk("waiting for dropping of refcount\n");
@ -831,7 +878,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne
}
/**
/**
* cpufreq_quick_get - get the CPU frequency (in kHz) frpm policy->cur
* @cpu: CPU number
*
@ -855,7 +902,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
EXPORT_SYMBOL(cpufreq_quick_get);
/**
/**
* cpufreq_get - get the current CPU frequency (in kHz)
* @cpu: CPU number
*
@ -1072,7 +1119,7 @@ static struct sysdev_driver cpufreq_sysdev_driver = {
* @nb: notifier function to register
* @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
*
* Add a driver to one of two lists: either a list of drivers that
* Add a driver to one of two lists: either a list of drivers that
* are notified about clock rate changes (once before and once after
* the transition), or a list of drivers that are notified about
* changes in cpufreq policy.
@ -1225,7 +1272,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
return -EINVAL;
mutex_lock(&cpufreq_governor_mutex);
list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
mutex_unlock(&cpufreq_governor_mutex);
@ -1234,7 +1281,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
}
list_add(&governor->governor_list, &cpufreq_governor_list);
mutex_unlock(&cpufreq_governor_mutex);
mutex_unlock(&cpufreq_governor_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
@ -1497,9 +1544,9 @@ static struct notifier_block cpufreq_cpu_notifier =
* @driver_data: A struct cpufreq_driver containing the values#
* submitted by the CPU Frequency driver.
*
* Registers a CPU Frequency driver to this core code. This code
* Registers a CPU Frequency driver to this core code. This code
* returns zero on success, -EBUSY when another driver got here first
* (and isn't unregistered in the meantime).
* (and isn't unregistered in the meantime).
*
*/
int cpufreq_register_driver(struct cpufreq_driver *driver_data)
@ -1560,7 +1607,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_driver);
/**
* cpufreq_unregister_driver - unregister the current CPUFreq driver
*
* Unregister the current CPUFreq driver. Only call this if you have
* Unregister the current CPUFreq driver. Only call this if you have
* the right to do so, i.e. if you have succeeded in initialising before!
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.

View File

@ -38,17 +38,17 @@
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
/*
* The polling frequency of this governor depends on the capability of
/*
* The polling frequency of this governor depends on the capability of
* the processor. Default polling frequency is 1000 times the transition
* latency of the processor. The governor will work on any processor with
* transition latency <= 10mS, using appropriate sampling
* latency of the processor. The governor will work on any processor with
* transition latency <= 10mS, using appropriate sampling
* rate.
* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
* this governor will not work.
* All times here are in uS.
*/
static unsigned int def_sampling_rate;
static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE_RATIO (2)
/* for correct statistics, we need at least 10 ticks between each measure */
#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
@ -62,28 +62,28 @@ static unsigned int def_sampling_rate;
static void do_dbs_timer(void *data);
struct cpu_dbs_info_s {
struct cpufreq_policy *cur_policy;
unsigned int prev_cpu_idle_up;
unsigned int prev_cpu_idle_down;
unsigned int enable;
struct cpufreq_policy *cur_policy;
unsigned int prev_cpu_idle_up;
unsigned int prev_cpu_idle_down;
unsigned int enable;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
static DEFINE_MUTEX (dbs_mutex);
static DEFINE_MUTEX (dbs_mutex);
static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int ignore_nice;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int ignore_nice;
};
static struct dbs_tuners dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
};
static inline unsigned int get_cpu_idle_time(unsigned int cpu)
@ -106,8 +106,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
}
#define define_one_ro(_name) \
static struct freq_attr _name = \
#define define_one_ro(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(sampling_rate_max);
@ -125,7 +125,7 @@ show_one(sampling_down_factor, sampling_down_factor);
show_one(up_threshold, up_threshold);
show_one(ignore_nice_load, ignore_nice);
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
@ -144,7 +144,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
return count;
}
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
@ -163,7 +163,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
return count;
}
static ssize_t store_up_threshold(struct cpufreq_policy *unused,
static ssize_t store_up_threshold(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
@ -171,7 +171,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
ret = sscanf (buf, "%u", &input);
mutex_lock(&dbs_mutex);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
input < MIN_FREQUENCY_UP_THRESHOLD) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
@ -190,14 +190,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
int ret;
unsigned int j;
ret = sscanf (buf, "%u", &input);
if ( ret != 1 )
return -EINVAL;
if ( input > 1 )
input = 1;
mutex_lock(&dbs_mutex);
if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
mutex_unlock(&dbs_mutex);
@ -259,16 +259,16 @@ static void dbs_check_cpu(int cpu)
return;
policy = this_dbs_info->cur_policy;
/*
/*
* Every sampling_rate, we check, if current idle time is less
* than 20% (default), then we try to increase frequency
* Every sampling_rate*sampling_down_factor, we look for a the lowest
* frequency which can sustain the load while keeping idle time over
* 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
* 5% (default) of current frequency
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
* 5% (default) of current frequency
*/
/* Check for frequency increase */
@ -298,14 +298,14 @@ static void dbs_check_cpu(int cpu)
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->prev_cpu_idle_down =
j_dbs_info->prev_cpu_idle_down =
j_dbs_info->prev_cpu_idle_up;
}
/* if we are already at full speed then break out early */
if (policy->cur == policy->max)
return;
__cpufreq_driver_target(policy, policy->max,
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
return;
}
@ -347,7 +347,7 @@ static void dbs_check_cpu(int cpu)
* policy. To be safe, we focus 10 points under the threshold.
*/
freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks;
freq_next = (freq_next * policy->cur) /
freq_next = (freq_next * policy->cur) /
(dbs_tuners_ins.up_threshold - 10);
if (freq_next <= ((policy->cur * 95) / 100))
@ -355,15 +355,15 @@ static void dbs_check_cpu(int cpu)
}
static void do_dbs_timer(void *data)
{
{
int i;
mutex_lock(&dbs_mutex);
for_each_online_cpu(i)
dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
}
}
static inline void dbs_timer_init(void)
{
@ -390,7 +390,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
switch (event) {
case CPUFREQ_GOV_START:
if ((!cpu_online(cpu)) ||
if ((!cpu_online(cpu)) ||
(!policy->cur))
return -EINVAL;
@ -399,13 +399,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return -EINVAL;
if (this_dbs_info->enable) /* Already enabled */
break;
mutex_lock(&dbs_mutex);
for_each_cpu_mask(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_idle_down
= j_dbs_info->prev_cpu_idle_up;
@ -435,7 +435,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_init();
}
mutex_unlock(&dbs_mutex);
break;
@ -448,9 +448,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
* Stop the timerschedule work, when this governor
* is used for first time
*/
if (dbs_enable == 0)
if (dbs_enable == 0)
dbs_timer_exit();
mutex_unlock(&dbs_mutex);
break;
@ -460,11 +460,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
policy->max, CPUFREQ_RELATION_H);
policy->max, CPUFREQ_RELATION_H);
else if (policy->min > this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
break;
}

View File

@ -32,7 +32,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
}
return 0;
}
struct cpufreq_governor cpufreq_gov_performance = {
.name = "performance",
.governor = cpufreq_governor_performance,

View File

@ -31,7 +31,7 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
}
return 0;
}
static struct cpufreq_governor cpufreq_gov_powersave = {
.name = "powersave",
.governor = cpufreq_governor_powersave,

View File

@ -2,7 +2,7 @@
* drivers/cpufreq/cpufreq_stats.c
*
* Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
* (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
* (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -90,7 +90,7 @@ show_time_in_state(struct cpufreq_policy *policy, char *buf)
return 0;
cpufreq_stats_update(stat->cpu);
for (i = 0; i < stat->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
(unsigned long long)cputime64_to_clock_t(stat->time_in_state[i]));
}
return len;
@ -171,7 +171,7 @@ cpufreq_stats_free_table (unsigned int cpu)
{
struct cpufreq_stats *stat = cpufreq_stats_table[cpu];
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (policy && policy->cpu == cpu)
if (policy && policy->cpu == cpu)
sysfs_remove_group(&policy->kobj, &stats_attr_group);
if (stat) {
kfree(stat->time_in_state);
@ -303,7 +303,7 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
return 0;
}
static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;

View File

@ -41,7 +41,7 @@ static DEFINE_MUTEX (userspace_mutex);
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
/* keep track of frequency transitions */
static int
static int
userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
@ -58,7 +58,7 @@ static struct notifier_block userspace_cpufreq_notifier_block = {
};
/**
/**
* cpufreq_set - set the CPU frequency
* @freq: target frequency in kHz
* @cpu: CPU for which the frequency is to be set
@ -103,8 +103,8 @@ static ssize_t show_speed (struct cpufreq_policy *policy, char *buf)
return sprintf (buf, "%u\n", cpu_cur_freq[policy->cpu]);
}
static ssize_t
store_speed (struct cpufreq_policy *policy, const char *buf, size_t count)
static ssize_t
store_speed (struct cpufreq_policy *policy, const char *buf, size_t count)
{
unsigned int freq = 0;
unsigned int ret;
@ -118,7 +118,7 @@ store_speed (struct cpufreq_policy *policy, const char *buf, size_t count)
return count;
}
static struct freq_attr freq_attr_scaling_setspeed =
static struct freq_attr freq_attr_scaling_setspeed =
{
.attr = { .name = "scaling_setspeed", .mode = 0644, .owner = THIS_MODULE },
.show = show_speed,
@ -135,7 +135,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
return -EINVAL;
BUG_ON(!policy->cur);
mutex_lock(&userspace_mutex);
cpu_is_managed[cpu] = 1;
cpu_is_managed[cpu] = 1;
cpu_min_freq[cpu] = policy->min;
cpu_max_freq[cpu] = policy->max;
cpu_cur_freq[cpu] = policy->cur;

View File

@ -59,9 +59,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
if (!cpu_online(policy->cpu))
return -EINVAL;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
@ -76,9 +75,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
if (!count)
policy->max = next_larger;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
dprintk("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu);
@ -199,7 +197,7 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
* if you use these, you must assure that the frequency table is valid
* all the time between get_attr and put_attr!
*/
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu)
{
dprintk("setting show_table for cpu %u to %p\n", cpu, table);