Merge branches 'acpi-pm', 'acpi-apei', 'acpi-osl' and 'acpi-pci'

* acpi-pm:
  ACPI / PM: Add missing pm_generic_complete() invocation
  ACPI / PM: Turn power resources on and off in the right order during resume
  ACPI / PM: Rework device power management to follow ACPI 6
  ACPI / PM: Drop stale comment from acpi_power_transition()

* acpi-apei:
  GHES: Make NMI handler have a single reader
  GHES: Elliminate double-loop in the NMI handler
  GHES: Panic right after detection
  GHES: Carve out the panic functionality
  GHES: Carve out error queueing in a separate function

* acpi-osl:
  ACPI / osl: use same type for acpi_predefined_names values as in definition

* acpi-pci:
  ACPI / PCI: remove stale list_head in struct acpi_prt_entry
This commit is contained in:
Rafael J. Wysocki 2015-06-19 01:16:21 +02:00
10 changed files with 167 additions and 129 deletions

View file

@ -729,10 +729,10 @@ static struct llist_head ghes_estatus_llist;
static struct irq_work ghes_proc_irq_work;
/*
* NMI may be triggered on any CPU, so ghes_nmi_lock is used for
* mutual exclusion.
* NMI may be triggered on any CPU, so ghes_in_nmi is used for
* having only one concurrent reader.
*/
static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
static LIST_HEAD(ghes_nmi);
@ -797,73 +797,75 @@ static void ghes_print_queued_estatus(void)
}
}
/* Save estatus for further processing in IRQ context */
static void __process_error(struct ghes *ghes)
{
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
u32 len, node_len;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic_status *estatus;
if (ghes_estatus_cached(ghes->estatus))
return;
len = cper_estatus_len(ghes->estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
if (!estatus_node)
return;
estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
memcpy(estatus, ghes->estatus, len);
llist_add(&estatus_node->llnode, &ghes_estatus_llist);
#endif
}
static void __ghes_panic(struct ghes *ghes)
{
oops_begin();
ghes_print_queued_estatus();
__ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
panic_timeout = ghes_panic_timeout;
panic("Fatal hardware error!");
}
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
{
struct ghes *ghes, *ghes_global = NULL;
int sev, sev_global = -1;
int ret = NMI_DONE;
struct ghes *ghes;
int sev, ret = NMI_DONE;
if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
return ret;
raw_spin_lock(&ghes_nmi_lock);
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
if (ghes_read_estatus(ghes, 1)) {
ghes_clear_estatus(ghes);
continue;
}
sev = ghes_severity(ghes->estatus->error_severity);
if (sev > sev_global) {
sev_global = sev;
ghes_global = ghes;
}
if (sev >= GHES_SEV_PANIC)
__ghes_panic(ghes);
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
__process_error(ghes);
ghes_clear_estatus(ghes);
ret = NMI_HANDLED;
}
if (ret == NMI_DONE)
goto out;
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
ghes_print_queued_estatus();
__ghes_print_estatus(KERN_EMERG, ghes_global->generic,
ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
panic_timeout = ghes_panic_timeout;
panic("Fatal hardware error!");
}
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
u32 len, node_len;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic_status *estatus;
#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
if (ghes_estatus_cached(ghes->estatus))
goto next;
/* Save estatus for further processing in IRQ context */
len = cper_estatus_len(ghes->estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
node_len);
if (estatus_node) {
estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
memcpy(estatus, ghes->estatus, len);
llist_add(&estatus_node->llnode, &ghes_estatus_llist);
}
next:
#endif
ghes_clear_estatus(ghes);
}
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
irq_work_queue(&ghes_proc_irq_work);
#endif
out:
raw_spin_unlock(&ghes_nmi_lock);
atomic_dec(&ghes_in_nmi);
return ret;
}

View file

@ -98,17 +98,16 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
/*
* The power resources settings may indicate a power state
* shallower than the actual power state of the device.
* shallower than the actual power state of the device, because
* the same power resources may be referenced by other devices.
*
* Moreover, on systems predating ACPI 4.0, if the device
* doesn't depend on any power resources and _PSC returns 3,
* that means "power off". We need to maintain compatibility
* with those systems.
* For systems predating ACPI 4.0 we assume that D3hot is the
* deepest state that can be supported.
*/
if (psc > result && psc < ACPI_STATE_D3_COLD)
result = psc;
else if (result == ACPI_STATE_UNKNOWN)
result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_COLD : psc;
result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_HOT : psc;
}
/*
@ -153,8 +152,8 @@ static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state)
*/
int acpi_device_set_power(struct acpi_device *device, int state)
{
int target_state = state;
int result = 0;
bool cut_power = false;
if (!device || !device->flags.power_manageable
|| (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
@ -169,11 +168,21 @@ int acpi_device_set_power(struct acpi_device *device, int state)
return 0;
}
if (!device->power.states[state].flags.valid) {
if (state == ACPI_STATE_D3_COLD) {
/*
* For transitions to D3cold we need to execute _PS3 and then
* possibly drop references to the power resources in use.
*/
state = ACPI_STATE_D3_HOT;
/* If _PR3 is not available, use D3hot as the target state. */
if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
target_state = state;
} else if (!device->power.states[state].flags.valid) {
dev_warn(&device->dev, "Power state %s not supported\n",
acpi_power_state_string(state));
return -ENODEV;
}
if (!device->power.flags.ignore_parent &&
device->parent && (state < device->parent->power.state)) {
dev_warn(&device->dev,
@ -183,39 +192,38 @@ int acpi_device_set_power(struct acpi_device *device, int state)
return -ENODEV;
}
/* For D3cold we should first transition into D3hot. */
if (state == ACPI_STATE_D3_COLD
&& device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible) {
state = ACPI_STATE_D3_HOT;
cut_power = true;
}
if (state < device->power.state && state != ACPI_STATE_D0
&& device->power.state >= ACPI_STATE_D3_HOT) {
dev_warn(&device->dev,
"Cannot transition to non-D0 state from D3\n");
return -ENODEV;
}
/*
* Transition Power
* ----------------
* In accordance with the ACPI specification first apply power (via
* power resources) and then evaluate _PSx.
* In accordance with ACPI 6, _PSx is executed before manipulating power
* resources, unless the target state is D0, in which case _PS0 is
* supposed to be executed after turning the power resources on.
*/
if (device->power.flags.power_resources) {
result = acpi_power_transition(device, state);
if (state > ACPI_STATE_D0) {
/*
* According to ACPI 6, devices cannot go from lower-power
* (deeper) states to higher-power (shallower) states.
*/
if (state < device->power.state) {
dev_warn(&device->dev, "Cannot transition from %s to %s\n",
acpi_power_state_string(device->power.state),
acpi_power_state_string(state));
return -ENODEV;
}
result = acpi_dev_pm_explicit_set(device, state);
if (result)
goto end;
}
result = acpi_dev_pm_explicit_set(device, state);
if (result)
goto end;
if (cut_power) {
device->power.state = state;
state = ACPI_STATE_D3_COLD;
result = acpi_power_transition(device, state);
if (device->power.flags.power_resources)
result = acpi_power_transition(device, target_state);
} else {
if (device->power.flags.power_resources) {
result = acpi_power_transition(device, ACPI_STATE_D0);
if (result)
goto end;
}
result = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
}
end:
@ -264,13 +272,24 @@ int acpi_bus_init_power(struct acpi_device *device)
return result;
if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) {
/* Reference count the power resources. */
result = acpi_power_on_resources(device, state);
if (result)
return result;
result = acpi_dev_pm_explicit_set(device, state);
if (result)
return result;
if (state == ACPI_STATE_D0) {
/*
* If _PSC is not present and the state inferred from
* power resources appears to be D0, it still may be
* necessary to execute _PS0 at this point, because
* another device using the same power resources may
* have been put into D0 previously and that's why we
* see D0 here.
*/
result = acpi_dev_pm_explicit_set(device, state);
if (result)
return result;
}
} else if (state == ACPI_STATE_UNKNOWN) {
/*
* No power resources and missing _PSC? Cross fingers and make
@ -603,12 +622,12 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3_COLD)
return -EINVAL;
if (d_max_in > ACPI_STATE_D3_HOT) {
if (d_max_in > ACPI_STATE_D2) {
enum pm_qos_flags_status stat;
stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF);
if (stat == PM_QOS_FLAGS_ALL)
d_max_in = ACPI_STATE_D3_HOT;
d_max_in = ACPI_STATE_D2;
}
adev = ACPI_COMPANION(dev);
@ -953,6 +972,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
*/
void acpi_subsys_complete(struct device *dev)
{
pm_generic_complete(dev);
/*
* If the device had been runtime-suspended before the system went into
* the sleep state it is going out of and it has never been resumed till

View file

@ -158,8 +158,9 @@ static int fan_get_state(struct acpi_device *device, unsigned long *state)
if (result)
return result;
*state = (acpi_state == ACPI_STATE_D3_COLD ? 0 :
(acpi_state == ACPI_STATE_D0 ? 1 : -1));
*state = acpi_state == ACPI_STATE_D3_COLD
|| acpi_state == ACPI_STATE_D3_HOT ?
0 : (acpi_state == ACPI_STATE_D0 ? 1 : -1);
return 0;
}

View file

@ -536,7 +536,7 @@ static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
acpi_string * new_val)
char **new_val)
{
if (!init_val || !new_val)
return AE_BAD_PARAMETER;

View file

@ -44,7 +44,6 @@
ACPI_MODULE_NAME("pci_irq");
struct acpi_prt_entry {
struct list_head list;
struct acpi_pci_id id;
u8 pin;
acpi_handle link;

View file

@ -684,7 +684,8 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
}
}
*state = ACPI_STATE_D3_COLD;
*state = device->power.states[ACPI_STATE_D3_COLD].flags.valid ?
ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT;
return 0;
}
@ -710,8 +711,6 @@ int acpi_power_transition(struct acpi_device *device, int state)
|| (device->power.state > ACPI_STATE_D3_COLD))
return -ENODEV;
/* TBD: Resources must be ordered. */
/*
* First we reference all power resources required in the target list
* (e.g. so the device doesn't lose power while transitioning). Then,
@ -761,6 +760,25 @@ static void acpi_power_sysfs_remove(struct acpi_device *device)
device_remove_file(&device->dev, &dev_attr_resource_in_use);
}
static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource)
{
mutex_lock(&power_resource_list_lock);
if (!list_empty(&acpi_power_resource_list)) {
struct acpi_power_resource *r;
list_for_each_entry(r, &acpi_power_resource_list, list_node)
if (r->order > resource->order) {
list_add_tail(&resource->list_node, &r->list_node);
goto out;
}
}
list_add_tail(&resource->list_node, &acpi_power_resource_list);
out:
mutex_unlock(&power_resource_list_lock);
}
int acpi_add_power_resource(acpi_handle handle)
{
struct acpi_power_resource *resource;
@ -811,9 +829,7 @@ int acpi_add_power_resource(acpi_handle handle)
if (!device_create_file(&device->dev, &dev_attr_resource_in_use))
device->remove = acpi_power_sysfs_remove;
mutex_lock(&power_resource_list_lock);
list_add(&resource->list_node, &acpi_power_resource_list);
mutex_unlock(&power_resource_list_lock);
acpi_power_add_resource_to_list(resource);
acpi_device_add_finalize(device);
return 0;
@ -844,7 +860,22 @@ void acpi_resume_power_resources(void)
&& resource->ref_count) {
dev_info(&resource->device.dev, "Turning ON\n");
__acpi_power_on(resource);
} else if (state == ACPI_POWER_RESOURCE_STATE_ON
}
mutex_unlock(&resource->resource_lock);
}
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
int result, state;
mutex_lock(&resource->resource_lock);
result = acpi_power_get_state(resource->device.handle, &state);
if (result) {
mutex_unlock(&resource->resource_lock);
continue;
}
if (state == ACPI_POWER_RESOURCE_STATE_ON
&& !resource->ref_count) {
dev_info(&resource->device.dev, "Turning OFF\n");
__acpi_power_off(resource);

View file

@ -1768,15 +1768,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
if (acpi_has_method(device->handle, pathname))
ps->flags.explicit_set = 1;
/*
* State is valid if there are means to put the device into it.
* D3hot is only valid if _PR3 present.
*/
if (!list_empty(&ps->resources)
|| (ps->flags.explicit_set && state < ACPI_STATE_D3_HOT)) {
/* State is valid if there are means to put the device into it. */
if (!list_empty(&ps->resources) || ps->flags.explicit_set)
ps->flags.valid = 1;
ps->flags.os_accessible = 1;
}
ps->power = -1; /* Unknown - driver assigned */
ps->latency = -1; /* Unknown - driver assigned */
@ -1812,21 +1806,13 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
acpi_bus_init_power_state(device, i);
INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
/* Set defaults for D0 and D3 states (always valid) */
/* Set defaults for D0 and D3hot states (always valid) */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
device->power.states[ACPI_STATE_D0].power = 100;
device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
device->power.states[ACPI_STATE_D3_COLD].power = 0;
/* Set D3cold's explicit_set flag if _PS3 exists. */
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1;
/* Presence of _PS3 or _PRx means we can put the device into D3 cold */
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set ||
device->power.flags.power_resources)
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;

View file

@ -420,7 +420,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
[PCI_D2] = ACPI_STATE_D2,
[PCI_D3hot] = ACPI_STATE_D3_COLD,
[PCI_D3hot] = ACPI_STATE_D3_HOT,
[PCI_D3cold] = ACPI_STATE_D3_COLD,
};
int error = -EINVAL;

View file

@ -272,7 +272,6 @@ struct acpi_device_power_flags {
struct acpi_device_power_state {
struct {
u8 valid:1;
u8 os_accessible:1;
u8 explicit_set:1; /* _PSx present? */
u8 reserved:6;
} flags;
@ -602,7 +601,7 @@ static inline bool acpi_device_can_wakeup(struct acpi_device *adev)
static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
{
return adev->power.states[ACPI_STATE_D3_COLD].flags.os_accessible;
return adev->power.states[ACPI_STATE_D3_COLD].flags.valid;
}
#else /* CONFIG_ACPI */

View file

@ -95,7 +95,7 @@ acpi_physical_address acpi_os_get_root_pointer(void);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_predefined_override
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
acpi_string * new_val);
char **new_val);
#endif
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_table_override