1
0
Fork 0
alistair23-linux/drivers/acpi/acpica/hwgpe.c

684 lines
18 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
*
* Copyright (C) 2000 - 2021, Intel Corp.
*
*****************************************************************************/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwgpe")
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
/* Local prototypes */
static acpi_status
acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context);
static acpi_status
acpi_hw_gpe_enable_write(u8 enable_mask,
struct acpi_gpe_register_info *gpe_register_info);
/******************************************************************************
*
* FUNCTION: acpi_hw_gpe_read
*
* PARAMETERS: value - Where the value is returned
* reg - GPE register structure
*
* RETURN: Status
*
* DESCRIPTION: Read from a GPE register in either memory or IO space.
*
* LIMITATIONS: <These limitations also apply to acpi_hw_gpe_write>
* space_ID must be system_memory or system_IO.
*
******************************************************************************/
acpi_status acpi_hw_gpe_read(u64 *value, struct acpi_gpe_address *reg)
{
acpi_status status;
u32 value32;
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
ACPICA: Add support for using logical addresses of GPE blocks The logical address of every GPE block in system memory must be known before passing it to acpi_ev_initialize_gpe_block(), because memory cannot be mapped on the fly from an interrupt handler. Accordingly, the host OS must map every GPE block in system memory upfront and it can store the logical addresses of GPE blocks for future use. If these logical addresses were known to ACPICA, it could use them instead of the corresponding physical addresses of GPE block for GPE register accesses and the memory mapping lookups carried out by acpi_os_read_memory() and acpi_os_write_memory() on every attempt to access a GPE register would not be necessary any more. To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol to indicate whether or not the host OS wants ACPICA to use the logical addresses of GPE registers in system memory directly (which is the case if this symbol is defined). Moreover, conditional on whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new global variables for storing the logical addresses of the FADT GPE blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize() pass their values instead of the physical addresses of the GPE blocks in question to acpi_ev_create_gpe_block() and modify acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly via the addresses stored in the struct acpi_gpe_address objects, which are expected to be the logical addresses of GPE registers if ACPI_GPE_USE_LOGICAL_ADDRESSES is defined. With the above changes in place, a host OS wanting ACPICA to access GPE registers directly through their logical addresses needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and make sure that the logical addresses of the FADT GPE blocks 0 and 1 are stored in acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, respectively, prior to calling acpi_ev_gpe_initialize(). [If such a host OS also uses acpi_install_gpe_block() to add non-FADT GPE register blocks located in system memory, it must pass their logical addresses instead of their physical addresses to this function.] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 08:44:45 -06:00
#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
*value = (u64)ACPI_GET8((unsigned long)reg->address);
ACPICA: Add support for using logical addresses of GPE blocks The logical address of every GPE block in system memory must be known before passing it to acpi_ev_initialize_gpe_block(), because memory cannot be mapped on the fly from an interrupt handler. Accordingly, the host OS must map every GPE block in system memory upfront and it can store the logical addresses of GPE blocks for future use. If these logical addresses were known to ACPICA, it could use them instead of the corresponding physical addresses of GPE block for GPE register accesses and the memory mapping lookups carried out by acpi_os_read_memory() and acpi_os_write_memory() on every attempt to access a GPE register would not be necessary any more. To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol to indicate whether or not the host OS wants ACPICA to use the logical addresses of GPE registers in system memory directly (which is the case if this symbol is defined). Moreover, conditional on whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new global variables for storing the logical addresses of the FADT GPE blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize() pass their values instead of the physical addresses of the GPE blocks in question to acpi_ev_create_gpe_block() and modify acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly via the addresses stored in the struct acpi_gpe_address objects, which are expected to be the logical addresses of GPE registers if ACPI_GPE_USE_LOGICAL_ADDRESSES is defined. With the above changes in place, a host OS wanting ACPICA to access GPE registers directly through their logical addresses needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and make sure that the logical addresses of the FADT GPE blocks 0 and 1 are stored in acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, respectively, prior to calling acpi_ev_gpe_initialize(). [If such a host OS also uses acpi_install_gpe_block() to add non-FADT GPE register blocks located in system memory, it must pass their logical addresses instead of their physical addresses to this function.] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 08:44:45 -06:00
return_ACPI_STATUS(AE_OK);
#else
return acpi_os_read_memory((acpi_physical_address)reg->address,
value, ACPI_GPE_REGISTER_WIDTH);
ACPICA: Add support for using logical addresses of GPE blocks The logical address of every GPE block in system memory must be known before passing it to acpi_ev_initialize_gpe_block(), because memory cannot be mapped on the fly from an interrupt handler. Accordingly, the host OS must map every GPE block in system memory upfront and it can store the logical addresses of GPE blocks for future use. If these logical addresses were known to ACPICA, it could use them instead of the corresponding physical addresses of GPE block for GPE register accesses and the memory mapping lookups carried out by acpi_os_read_memory() and acpi_os_write_memory() on every attempt to access a GPE register would not be necessary any more. To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol to indicate whether or not the host OS wants ACPICA to use the logical addresses of GPE registers in system memory directly (which is the case if this symbol is defined). Moreover, conditional on whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new global variables for storing the logical addresses of the FADT GPE blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize() pass their values instead of the physical addresses of the GPE blocks in question to acpi_ev_create_gpe_block() and modify acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly via the addresses stored in the struct acpi_gpe_address objects, which are expected to be the logical addresses of GPE registers if ACPI_GPE_USE_LOGICAL_ADDRESSES is defined. With the above changes in place, a host OS wanting ACPICA to access GPE registers directly through their logical addresses needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and make sure that the logical addresses of the FADT GPE blocks 0 and 1 are stored in acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, respectively, prior to calling acpi_ev_gpe_initialize(). [If such a host OS also uses acpi_install_gpe_block() to add non-FADT GPE register blocks located in system memory, it must pass their logical addresses instead of their physical addresses to this function.] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 08:44:45 -06:00
#endif
}
status = acpi_os_read_port((acpi_io_address)reg->address,
&value32, ACPI_GPE_REGISTER_WIDTH);
if (ACPI_FAILURE(status))
return_ACPI_STATUS(status);
*value = (u64)value32;
return_ACPI_STATUS(AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_gpe_write
*
* PARAMETERS: value - Value to be written
* reg - GPE register structure
*
* RETURN: Status
*
* DESCRIPTION: Write to a GPE register in either memory or IO space.
*
******************************************************************************/
acpi_status acpi_hw_gpe_write(u64 value, struct acpi_gpe_address *reg)
{
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
ACPICA: Add support for using logical addresses of GPE blocks The logical address of every GPE block in system memory must be known before passing it to acpi_ev_initialize_gpe_block(), because memory cannot be mapped on the fly from an interrupt handler. Accordingly, the host OS must map every GPE block in system memory upfront and it can store the logical addresses of GPE blocks for future use. If these logical addresses were known to ACPICA, it could use them instead of the corresponding physical addresses of GPE block for GPE register accesses and the memory mapping lookups carried out by acpi_os_read_memory() and acpi_os_write_memory() on every attempt to access a GPE register would not be necessary any more. To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol to indicate whether or not the host OS wants ACPICA to use the logical addresses of GPE registers in system memory directly (which is the case if this symbol is defined). Moreover, conditional on whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new global variables for storing the logical addresses of the FADT GPE blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize() pass their values instead of the physical addresses of the GPE blocks in question to acpi_ev_create_gpe_block() and modify acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly via the addresses stored in the struct acpi_gpe_address objects, which are expected to be the logical addresses of GPE registers if ACPI_GPE_USE_LOGICAL_ADDRESSES is defined. With the above changes in place, a host OS wanting ACPICA to access GPE registers directly through their logical addresses needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and make sure that the logical addresses of the FADT GPE blocks 0 and 1 are stored in acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, respectively, prior to calling acpi_ev_gpe_initialize(). [If such a host OS also uses acpi_install_gpe_block() to add non-FADT GPE register blocks located in system memory, it must pass their logical addresses instead of their physical addresses to this function.] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 08:44:45 -06:00
#ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES
ACPI_SET8((unsigned long)reg->address, value);
ACPICA: Add support for using logical addresses of GPE blocks The logical address of every GPE block in system memory must be known before passing it to acpi_ev_initialize_gpe_block(), because memory cannot be mapped on the fly from an interrupt handler. Accordingly, the host OS must map every GPE block in system memory upfront and it can store the logical addresses of GPE blocks for future use. If these logical addresses were known to ACPICA, it could use them instead of the corresponding physical addresses of GPE block for GPE register accesses and the memory mapping lookups carried out by acpi_os_read_memory() and acpi_os_write_memory() on every attempt to access a GPE register would not be necessary any more. To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol to indicate whether or not the host OS wants ACPICA to use the logical addresses of GPE registers in system memory directly (which is the case if this symbol is defined). Moreover, conditional on whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new global variables for storing the logical addresses of the FADT GPE blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize() pass their values instead of the physical addresses of the GPE blocks in question to acpi_ev_create_gpe_block() and modify acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly via the addresses stored in the struct acpi_gpe_address objects, which are expected to be the logical addresses of GPE registers if ACPI_GPE_USE_LOGICAL_ADDRESSES is defined. With the above changes in place, a host OS wanting ACPICA to access GPE registers directly through their logical addresses needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and make sure that the logical addresses of the FADT GPE blocks 0 and 1 are stored in acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, respectively, prior to calling acpi_ev_gpe_initialize(). [If such a host OS also uses acpi_install_gpe_block() to add non-FADT GPE register blocks located in system memory, it must pass their logical addresses instead of their physical addresses to this function.] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 08:44:45 -06:00
return_ACPI_STATUS(AE_OK);
#else
return acpi_os_write_memory((acpi_physical_address)reg->address,
value, ACPI_GPE_REGISTER_WIDTH);
ACPICA: Add support for using logical addresses of GPE blocks The logical address of every GPE block in system memory must be known before passing it to acpi_ev_initialize_gpe_block(), because memory cannot be mapped on the fly from an interrupt handler. Accordingly, the host OS must map every GPE block in system memory upfront and it can store the logical addresses of GPE blocks for future use. If these logical addresses were known to ACPICA, it could use them instead of the corresponding physical addresses of GPE block for GPE register accesses and the memory mapping lookups carried out by acpi_os_read_memory() and acpi_os_write_memory() on every attempt to access a GPE register would not be necessary any more. To allow that to happen, introduce the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol to indicate whether or not the host OS wants ACPICA to use the logical addresses of GPE registers in system memory directly (which is the case if this symbol is defined). Moreover, conditional on whether ACPI_GPE_USE_LOGICAL_ADDRESSES is defined, introduce two new global variables for storing the logical addresses of the FADT GPE blocks 0 and 1, respectively, acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, make acpi_ev_gpe_initialize() pass their values instead of the physical addresses of the GPE blocks in question to acpi_ev_create_gpe_block() and modify acpi_hw_gpe_read() and acpi_hw_gpe_write() to access memory directly via the addresses stored in the struct acpi_gpe_address objects, which are expected to be the logical addresses of GPE registers if ACPI_GPE_USE_LOGICAL_ADDRESSES is defined. With the above changes in place, a host OS wanting ACPICA to access GPE registers directly through their logical addresses needs to define the ACPI_GPE_USE_LOGICAL_ADDRESSES symbol and make sure that the logical addresses of the FADT GPE blocks 0 and 1 are stored in acpi_gbl_xgpe0_block_logical_address and acpi_gbl_xgpe1_block_logical_address, respectively, prior to calling acpi_ev_gpe_initialize(). [If such a host OS also uses acpi_install_gpe_block() to add non-FADT GPE register blocks located in system memory, it must pass their logical addresses instead of their physical addresses to this function.] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-09-11 08:44:45 -06:00
#endif
}
return acpi_os_write_port((acpi_io_address)reg->address, (u32)value,
ACPI_GPE_REGISTER_WIDTH);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_get_gpe_register_bit
*
* PARAMETERS: gpe_event_info - Info block for the GPE
*
* RETURN: Register mask with a one in the GPE bit position
*
* DESCRIPTION: Compute the register mask for this GPE. One bit is set in the
* correct position for the input GPE.
*
******************************************************************************/
u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
{
return ((u32)1 <<
(gpe_event_info->gpe_number -
gpe_event_info->register_info->base_gpe_number));
}
/******************************************************************************
*
* FUNCTION: acpi_hw_low_set_gpe
*
* PARAMETERS: gpe_event_info - Info block for the GPE to be disabled
* action - Enable or disable
*
* RETURN: Status
*
* DESCRIPTION: Enable or disable a single GPE in the parent enable register.
* The enable_mask field of the involved GPE register must be
* updated by the caller if necessary.
*
******************************************************************************/
acpi_status
acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
{
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status = AE_OK;
u64 enable_mask;
u32 register_bit;
ACPI_FUNCTION_ENTRY();
/* Get the info block for the entire GPE register */
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return (AE_NOT_EXIST);
}
/* Get current value of the enable register that contains this GPE */
status = acpi_hw_gpe_read(&enable_mask,
&gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Set or clear just the bit that corresponds to this GPE */
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
switch (action) {
case ACPI_GPE_CONDITIONAL_ENABLE:
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
/* Only enable if the corresponding enable_mask bit is set */
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
if (!(register_bit & gpe_register_info->enable_mask)) {
return (AE_BAD_PARAMETER);
}
ACPI_FALLTHROUGH;
case ACPI_GPE_ENABLE:
ACPI_SET_BIT(enable_mask, register_bit);
break;
case ACPI_GPE_DISABLE:
ACPI_CLEAR_BIT(enable_mask, register_bit);
break;
default:
ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u", action));
return (AE_BAD_PARAMETER);
}
if (!(register_bit & gpe_register_info->mask_for_run)) {
/* Write the updated enable mask */
status = acpi_hw_gpe_write(enable_mask,
&gpe_register_info->enable_address);
}
return (status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_clear_gpe
*
* PARAMETERS: gpe_event_info - Info block for the GPE to be cleared
*
* RETURN: Status
*
* DESCRIPTION: Clear the status bit for a single GPE.
*
******************************************************************************/
acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
u32 register_bit;
ACPI_FUNCTION_ENTRY();
/* Get the info block for the entire GPE register */
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return (AE_NOT_EXIST);
}
/*
* Write a one to the appropriate bit in the status register to
* clear this GPE.
*/
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
status = acpi_hw_gpe_write(register_bit,
&gpe_register_info->status_address);
return (status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_get_gpe_status
*
* PARAMETERS: gpe_event_info - Info block for the GPE to queried
* event_status - Where the GPE status is returned
*
* RETURN: Status
*
* DESCRIPTION: Return the status of a single GPE.
*
******************************************************************************/
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
acpi_status
acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_event_status *event_status)
{
u64 in_byte;
u32 register_bit;
struct acpi_gpe_register_info *gpe_register_info;
acpi_event_status local_event_status = 0;
acpi_status status;
ACPI_FUNCTION_ENTRY();
if (!event_status) {
return (AE_BAD_PARAMETER);
}
/* GPE currently handled? */
ACPICA: Events: Cleanup GPE dispatcher type obtaining code ACPICA commit 7926d5ca9452c87f866938dcea8f12e1efb58f89 There is an issue in acpi_install_gpe_handler() and acpi_remove_gpe_handler(). The code to obtain the GPE dispatcher type from the Handler->original_flags is wrong: if (((Handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || (Handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && ACPI_GPE_DISPATCH_NOTIFY is 0x03 and ACPI_GPE_DISPATCH_METHOD is 0x02, thus this statement is TRUE for the following dispatcher types: 0x01 (ACPI_GPE_DISPATCH_HANDLER): not expected 0x02 (ACPI_GPE_DISPATCH_METHOD): expected 0x03 (ACPI_GPE_DISPATCH_NOTIFY): expected There is no functional issue due to this because Handler->original_flags is only set in acpi_install_gpe_handler(), and an earlier checker has excluded the ACPI_GPE_DISPATCH_HANDLER: if ((gpe_event_info->Flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { Status = AE_ALREADY_EXISTS; goto free_and_exit; } ... Handler->original_flags = (u8) (gpe_event_info->Flags & (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK)); We need to clean this up before modifying the GPE dispatcher type values. In order to prevent such issue from happening in the future, this patch introduces ACPI_GPE_DISPATCH_TYPE() macro to be used to obtain the GPE dispatcher types. Lv Zheng. Link: https://github.com/acpica/acpica/commit/7926d5ca Signed-off-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: David E. Box <david.e.box@linux.intel.com> Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-02-05 00:20:29 -07:00
if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
ACPI_GPE_DISPATCH_NONE) {
local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
}
/* Get the info block for the entire GPE register */
gpe_register_info = gpe_event_info->register_info;
/* Get the register bitmask for this GPE */
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* GPE currently enabled? (enabled for runtime?) */
if (register_bit & gpe_register_info->enable_for_run) {
local_event_status |= ACPI_EVENT_FLAG_ENABLED;
}
/* GPE currently masked? (masked for runtime?) */
if (register_bit & gpe_register_info->mask_for_run) {
local_event_status |= ACPI_EVENT_FLAG_MASKED;
}
/* GPE enabled for wake? */
if (register_bit & gpe_register_info->enable_for_wake) {
local_event_status |= ACPI_EVENT_FLAG_WAKE_ENABLED;
}
/* GPE currently enabled (enable bit == 1)? */
status = acpi_hw_gpe_read(&in_byte, &gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
return (status);
}
if (register_bit & in_byte) {
local_event_status |= ACPI_EVENT_FLAG_ENABLE_SET;
}
/* GPE currently active (status bit == 1)? */
status = acpi_hw_gpe_read(&in_byte, &gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
return (status);
}
if (register_bit & in_byte) {
local_event_status |= ACPI_EVENT_FLAG_STATUS_SET;
}
/* Set return value */
(*event_status) = local_event_status;
return (AE_OK);
}
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
/******************************************************************************
*
* FUNCTION: acpi_hw_gpe_enable_write
*
* PARAMETERS: enable_mask - Bit mask to write to the GPE register
* gpe_register_info - Gpe Register info
*
* RETURN: Status
*
* DESCRIPTION: Write the enable mask byte to the given GPE register.
*
******************************************************************************/
static acpi_status
acpi_hw_gpe_enable_write(u8 enable_mask,
struct acpi_gpe_register_info *gpe_register_info)
{
acpi_status status;
gpe_register_info->enable_mask = enable_mask;
status = acpi_hw_gpe_write(enable_mask,
&gpe_register_info->enable_address);
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
return (status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_disable_gpe_block
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Status
*
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
* DESCRIPTION: Disable all GPEs within a single GPE block
*
******************************************************************************/
acpi_status
acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context)
{
u32 i;
acpi_status status;
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/* Disable all GPEs in this register */
status =
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
acpi_hw_gpe_enable_write(0x00,
&gpe_block->register_info[i]);
if (ACPI_FAILURE(status)) {
return (status);
}
}
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_clear_gpe_block
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Status
*
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
* DESCRIPTION: Clear status bits for all GPEs within a single GPE block
*
******************************************************************************/
acpi_status
acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context)
{
u32 i;
acpi_status status;
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/* Clear status on all GPEs in this register */
status = acpi_hw_gpe_write(0xFF,
&gpe_block->register_info[i].status_address);
if (ACPI_FAILURE(status)) {
return (status);
}
}
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_enable_runtime_gpe_block
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Status
*
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
* DESCRIPTION: Enable all "runtime" GPEs within a single GPE block. Includes
* combination wake/run GPEs.
*
******************************************************************************/
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context)
{
u32 i;
acpi_status status;
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
struct acpi_gpe_register_info *gpe_register_info;
u8 enable_mask;
/* NOTE: assumes that all GPEs are currently disabled */
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
gpe_register_info = &gpe_block->register_info[i];
if (!gpe_register_info->enable_for_run) {
continue;
}
/* Enable all "runtime" GPEs in this register */
enable_mask = gpe_register_info->enable_for_run &
~gpe_register_info->mask_for_run;
status =
acpi_hw_gpe_enable_write(enable_mask, gpe_register_info);
if (ACPI_FAILURE(status)) {
return (status);
}
}
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_enable_wakeup_gpe_block
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Status
*
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
* DESCRIPTION: Enable all "wake" GPEs within a single GPE block. Includes
* combination wake/run GPEs.
*
******************************************************************************/
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
static acpi_status
acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context)
{
u32 i;
acpi_status status;
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
struct acpi_gpe_register_info *gpe_register_info;
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
gpe_register_info = &gpe_block->register_info[i];
/*
* Enable all "wake" GPEs in this register and disable the
* remaining ones.
*/
status =
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-01 15:50:16 -07:00
acpi_hw_gpe_enable_write(gpe_register_info->enable_for_wake,
gpe_register_info);
if (ACPI_FAILURE(status)) {
return (status);
}
}
return (AE_OK);
}
struct acpi_gpe_block_status_context {
struct acpi_gpe_register_info *gpe_skip_register_info;
u8 gpe_skip_mask;
u8 retval;
};
/******************************************************************************
*
* FUNCTION: acpi_hw_get_gpe_block_status
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
* context - GPE list walk context data
*
* RETURN: Success
*
* DESCRIPTION: Produce a combined GPE status bits mask for the given block.
*
******************************************************************************/
static acpi_status
acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context)
{
struct acpi_gpe_block_status_context *c = context;
struct acpi_gpe_register_info *gpe_register_info;
u64 in_enable, in_status;
acpi_status status;
u8 ret_mask;
u32 i;
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
gpe_register_info = &gpe_block->register_info[i];
status = acpi_hw_gpe_read(&in_enable,
&gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
continue;
}
status = acpi_hw_gpe_read(&in_status,
&gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
continue;
}
ret_mask = in_enable & in_status;
if (ret_mask && c->gpe_skip_register_info == gpe_register_info) {
ret_mask &= ~c->gpe_skip_mask;
}
c->retval |= ret_mask;
}
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_disable_all_gpes
*
ACPICA 20050617-0624 from Bob Moore <robert.moore@intel.com> ACPICA 20050617: Moved the object cache operations into the OS interface layer (OSL) to allow the host OS to handle these operations if desired (for example, the Linux OSL will invoke the slab allocator). This support is optional; the compile time define ACPI_USE_LOCAL_CACHE may be used to utilize the original cache code in the ACPI CA core. The new OSL interfaces are shown below. See utalloc.c for an example implementation, and acpiosxf.h for the exact interface definitions. Thanks to Alexey Starikovskiy. acpi_os_create_cache acpi_os_delete_cache acpi_os_purge_cache acpi_os_acquire_object acpi_os_release_object Modified the interfaces to acpi_os_acquire_lock and acpi_os_release_lock to return and restore a flags parameter. This fits better with many OS lock models. Note: the current execution state (interrupt handler or not) is no longer passed to these interfaces. If necessary, the OSL must determine this state by itself, a simple and fast operation. Thanks to Alexey Starikovskiy. Fixed a problem in the ACPI table handling where a valid XSDT was assumed present if the revision of the RSDP was 2 or greater. According to the ACPI specification, the XSDT is optional in all cases, and the table manager therefore now checks for both an RSDP >=2 and a valid XSDT pointer. Otherwise, the RSDT pointer is used. Some ACPI 2.0 compliant BIOSs contain only the RSDT. Fixed an interpreter problem with the Mid() operator in the case of an input string where the resulting output string is of zero length. It now correctly returns a valid, null terminated string object instead of a string object with a null pointer. Fixed a problem with the control method argument handling to allow a store to an Arg object that already contains an object of type Device. The Device object is now correctly overwritten. Previously, an error was returned. ACPICA 20050624: Modified the new OSL cache interfaces to use ACPI_CACHE_T as the type for the host-defined cache object. This allows the OSL implementation to define and type this object in any manner desired, simplifying the OSL implementation. For example, ACPI_CACHE_T is defined as kmem_cache_t for Linux, and should be defined in the OS-specific header file for other operating systems as required. Changed the interface to AcpiOsAcquireObject to directly return the requested object as the function return (instead of ACPI_STATUS.) This change was made for performance reasons, since this is the purpose of the interface in the first place. acpi_os_acquire_object is now similar to the acpi_os_allocate interface. Thanks to Alexey Starikovskiy. Modified the initialization sequence in acpi_initialize_subsystem to call the OSL interface acpi_osl_initialize first, before any local initialization. This change was required because the global initialization now calls OSL interfaces. Restructured the code base to split some files because of size and/or because the code logically belonged in a separate file. New files are listed below. utilities/utcache.c /* Local cache interfaces */ utilities/utmutex.c /* Local mutex support */ utilities/utstate.c /* State object support */ parser/psloop.c /* Main AML parse loop */ Signed-off-by: Len Brown <len.brown@intel.com>
2005-06-23 22:00:00 -06:00
* PARAMETERS: None
*
* RETURN: Status
*
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
* DESCRIPTION: Disable and clear all GPEs in all GPE blocks
*
******************************************************************************/
acpi_status acpi_hw_disable_all_gpes(void)
{
acpi_status status;
ACPI: ACPICA 20060421 Removed a device initialization optimization introduced in 20051216 where the _STA method was not run unless an _INI was also present for the same device. This optimization could cause problems because it could allow _INI methods to be run within a not-present device subtree (If a not-present device had no _INI, _STA would not be run, the not-present status would not be discovered, and the children of the device would be incorrectly traversed.) Implemented a new _STA optimization where namespace subtrees that do not contain _INI are identified and ignored during device initialization. Selectively running _STA can significantly improve boot time on large machines (with assistance from Len Brown.) Implemented support for the device initialization case where the returned _STA flags indicate a device not-present but functioning. In this case, _INI is not run, but the device children are examined for presence, as per the ACPI specification. Implemented an additional change to the IndexField support in order to conform to MS behavior. The value written to the Index Register is not simply a byte offset, it is a byte offset in units of the access width of the parent Index Field. (Fiodor Suietov) Defined and deployed a new OSL interface, acpi_os_validate_address(). This interface is called during the creation of all AML operation regions, and allows the host OS to exert control over what addresses it will allow the AML code to access. Operation Regions whose addresses are disallowed will cause a runtime exception when they are actually accessed (will not affect or abort table loading.) Defined and deployed a new OSL interface, acpi_os_validate_interface(). This interface allows the host OS to match the various "optional" interface/behavior strings for the _OSI predefined control method as appropriate (with assistance from Bjorn Helgaas.) Restructured and corrected various problems in the exception handling code paths within DsCallControlMethod and DsTerminateControlMethod in dsmethod (with assistance from Takayoshi Kochi.) Modified the Linux source converter to ignore quoted string literals while converting identifiers from mixed to lower case. This will correct problems with the disassembler and other areas where such strings must not be modified. The ACPI_FUNCTION_* macros no longer require quotes around the function name. This allows the Linux source converter to convert the names, now that the converter ignores quoted strings. Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 15:15:00 -06:00
ACPI_FUNCTION_TRACE(hw_disable_all_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
return_ACPI_STATUS(status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_enable_all_runtime_gpes
*
ACPICA 20050617-0624 from Bob Moore <robert.moore@intel.com> ACPICA 20050617: Moved the object cache operations into the OS interface layer (OSL) to allow the host OS to handle these operations if desired (for example, the Linux OSL will invoke the slab allocator). This support is optional; the compile time define ACPI_USE_LOCAL_CACHE may be used to utilize the original cache code in the ACPI CA core. The new OSL interfaces are shown below. See utalloc.c for an example implementation, and acpiosxf.h for the exact interface definitions. Thanks to Alexey Starikovskiy. acpi_os_create_cache acpi_os_delete_cache acpi_os_purge_cache acpi_os_acquire_object acpi_os_release_object Modified the interfaces to acpi_os_acquire_lock and acpi_os_release_lock to return and restore a flags parameter. This fits better with many OS lock models. Note: the current execution state (interrupt handler or not) is no longer passed to these interfaces. If necessary, the OSL must determine this state by itself, a simple and fast operation. Thanks to Alexey Starikovskiy. Fixed a problem in the ACPI table handling where a valid XSDT was assumed present if the revision of the RSDP was 2 or greater. According to the ACPI specification, the XSDT is optional in all cases, and the table manager therefore now checks for both an RSDP >=2 and a valid XSDT pointer. Otherwise, the RSDT pointer is used. Some ACPI 2.0 compliant BIOSs contain only the RSDT. Fixed an interpreter problem with the Mid() operator in the case of an input string where the resulting output string is of zero length. It now correctly returns a valid, null terminated string object instead of a string object with a null pointer. Fixed a problem with the control method argument handling to allow a store to an Arg object that already contains an object of type Device. The Device object is now correctly overwritten. Previously, an error was returned. ACPICA 20050624: Modified the new OSL cache interfaces to use ACPI_CACHE_T as the type for the host-defined cache object. This allows the OSL implementation to define and type this object in any manner desired, simplifying the OSL implementation. For example, ACPI_CACHE_T is defined as kmem_cache_t for Linux, and should be defined in the OS-specific header file for other operating systems as required. Changed the interface to AcpiOsAcquireObject to directly return the requested object as the function return (instead of ACPI_STATUS.) This change was made for performance reasons, since this is the purpose of the interface in the first place. acpi_os_acquire_object is now similar to the acpi_os_allocate interface. Thanks to Alexey Starikovskiy. Modified the initialization sequence in acpi_initialize_subsystem to call the OSL interface acpi_osl_initialize first, before any local initialization. This change was required because the global initialization now calls OSL interfaces. Restructured the code base to split some files because of size and/or because the code logically belonged in a separate file. New files are listed below. utilities/utcache.c /* Local cache interfaces */ utilities/utmutex.c /* Local mutex support */ utilities/utstate.c /* State object support */ parser/psloop.c /* Main AML parse loop */ Signed-off-by: Len Brown <len.brown@intel.com>
2005-06-23 22:00:00 -06:00
* PARAMETERS: None
*
* RETURN: Status
*
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
* DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
*
******************************************************************************/
acpi_status acpi_hw_enable_all_runtime_gpes(void)
{
acpi_status status;
ACPI: ACPICA 20060421 Removed a device initialization optimization introduced in 20051216 where the _STA method was not run unless an _INI was also present for the same device. This optimization could cause problems because it could allow _INI methods to be run within a not-present device subtree (If a not-present device had no _INI, _STA would not be run, the not-present status would not be discovered, and the children of the device would be incorrectly traversed.) Implemented a new _STA optimization where namespace subtrees that do not contain _INI are identified and ignored during device initialization. Selectively running _STA can significantly improve boot time on large machines (with assistance from Len Brown.) Implemented support for the device initialization case where the returned _STA flags indicate a device not-present but functioning. In this case, _INI is not run, but the device children are examined for presence, as per the ACPI specification. Implemented an additional change to the IndexField support in order to conform to MS behavior. The value written to the Index Register is not simply a byte offset, it is a byte offset in units of the access width of the parent Index Field. (Fiodor Suietov) Defined and deployed a new OSL interface, acpi_os_validate_address(). This interface is called during the creation of all AML operation regions, and allows the host OS to exert control over what addresses it will allow the AML code to access. Operation Regions whose addresses are disallowed will cause a runtime exception when they are actually accessed (will not affect or abort table loading.) Defined and deployed a new OSL interface, acpi_os_validate_interface(). This interface allows the host OS to match the various "optional" interface/behavior strings for the _OSI predefined control method as appropriate (with assistance from Bjorn Helgaas.) Restructured and corrected various problems in the exception handling code paths within DsCallControlMethod and DsTerminateControlMethod in dsmethod (with assistance from Takayoshi Kochi.) Modified the Linux source converter to ignore quoted string literals while converting identifiers from mixed to lower case. This will correct problems with the disassembler and other areas where such strings must not be modified. The ACPI_FUNCTION_* macros no longer require quotes around the function name. This allows the Linux source converter to convert the names, now that the converter ignores quoted strings. Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 15:15:00 -06:00
ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block, NULL);
return_ACPI_STATUS(status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_enable_all_wakeup_gpes
*
ACPICA 20050617-0624 from Bob Moore <robert.moore@intel.com> ACPICA 20050617: Moved the object cache operations into the OS interface layer (OSL) to allow the host OS to handle these operations if desired (for example, the Linux OSL will invoke the slab allocator). This support is optional; the compile time define ACPI_USE_LOCAL_CACHE may be used to utilize the original cache code in the ACPI CA core. The new OSL interfaces are shown below. See utalloc.c for an example implementation, and acpiosxf.h for the exact interface definitions. Thanks to Alexey Starikovskiy. acpi_os_create_cache acpi_os_delete_cache acpi_os_purge_cache acpi_os_acquire_object acpi_os_release_object Modified the interfaces to acpi_os_acquire_lock and acpi_os_release_lock to return and restore a flags parameter. This fits better with many OS lock models. Note: the current execution state (interrupt handler or not) is no longer passed to these interfaces. If necessary, the OSL must determine this state by itself, a simple and fast operation. Thanks to Alexey Starikovskiy. Fixed a problem in the ACPI table handling where a valid XSDT was assumed present if the revision of the RSDP was 2 or greater. According to the ACPI specification, the XSDT is optional in all cases, and the table manager therefore now checks for both an RSDP >=2 and a valid XSDT pointer. Otherwise, the RSDT pointer is used. Some ACPI 2.0 compliant BIOSs contain only the RSDT. Fixed an interpreter problem with the Mid() operator in the case of an input string where the resulting output string is of zero length. It now correctly returns a valid, null terminated string object instead of a string object with a null pointer. Fixed a problem with the control method argument handling to allow a store to an Arg object that already contains an object of type Device. The Device object is now correctly overwritten. Previously, an error was returned. ACPICA 20050624: Modified the new OSL cache interfaces to use ACPI_CACHE_T as the type for the host-defined cache object. This allows the OSL implementation to define and type this object in any manner desired, simplifying the OSL implementation. For example, ACPI_CACHE_T is defined as kmem_cache_t for Linux, and should be defined in the OS-specific header file for other operating systems as required. Changed the interface to AcpiOsAcquireObject to directly return the requested object as the function return (instead of ACPI_STATUS.) This change was made for performance reasons, since this is the purpose of the interface in the first place. acpi_os_acquire_object is now similar to the acpi_os_allocate interface. Thanks to Alexey Starikovskiy. Modified the initialization sequence in acpi_initialize_subsystem to call the OSL interface acpi_osl_initialize first, before any local initialization. This change was required because the global initialization now calls OSL interfaces. Restructured the code base to split some files because of size and/or because the code logically belonged in a separate file. New files are listed below. utilities/utcache.c /* Local cache interfaces */ utilities/utmutex.c /* Local mutex support */ utilities/utstate.c /* State object support */ parser/psloop.c /* Main AML parse loop */ Signed-off-by: Len Brown <len.brown@intel.com>
2005-06-23 22:00:00 -06:00
* PARAMETERS: None
*
* RETURN: Status
*
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-18 20:49:35 -06:00
* DESCRIPTION: Enable all "wakeup" GPEs, in all GPE blocks
*
******************************************************************************/
acpi_status acpi_hw_enable_all_wakeup_gpes(void)
{
acpi_status status;
ACPI: ACPICA 20060421 Removed a device initialization optimization introduced in 20051216 where the _STA method was not run unless an _INI was also present for the same device. This optimization could cause problems because it could allow _INI methods to be run within a not-present device subtree (If a not-present device had no _INI, _STA would not be run, the not-present status would not be discovered, and the children of the device would be incorrectly traversed.) Implemented a new _STA optimization where namespace subtrees that do not contain _INI are identified and ignored during device initialization. Selectively running _STA can significantly improve boot time on large machines (with assistance from Len Brown.) Implemented support for the device initialization case where the returned _STA flags indicate a device not-present but functioning. In this case, _INI is not run, but the device children are examined for presence, as per the ACPI specification. Implemented an additional change to the IndexField support in order to conform to MS behavior. The value written to the Index Register is not simply a byte offset, it is a byte offset in units of the access width of the parent Index Field. (Fiodor Suietov) Defined and deployed a new OSL interface, acpi_os_validate_address(). This interface is called during the creation of all AML operation regions, and allows the host OS to exert control over what addresses it will allow the AML code to access. Operation Regions whose addresses are disallowed will cause a runtime exception when they are actually accessed (will not affect or abort table loading.) Defined and deployed a new OSL interface, acpi_os_validate_interface(). This interface allows the host OS to match the various "optional" interface/behavior strings for the _OSI predefined control method as appropriate (with assistance from Bjorn Helgaas.) Restructured and corrected various problems in the exception handling code paths within DsCallControlMethod and DsTerminateControlMethod in dsmethod (with assistance from Takayoshi Kochi.) Modified the Linux source converter to ignore quoted string literals while converting identifiers from mixed to lower case. This will correct problems with the disassembler and other areas where such strings must not be modified. The ACPI_FUNCTION_* macros no longer require quotes around the function name. This allows the Linux source converter to convert the names, now that the converter ignores quoted strings. Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 15:15:00 -06:00
ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL);
return_ACPI_STATUS(status);
}
/******************************************************************************
*
* FUNCTION: acpi_hw_check_all_gpes
*
* PARAMETERS: gpe_skip_device - GPE devoce of the GPE to skip
* gpe_skip_number - Number of the GPE to skip
*
* RETURN: Combined status of all GPEs
*
* DESCRIPTION: Check all enabled GPEs in all GPE blocks, except for the one
* represented by the "skip" arguments, and return TRUE if the
* status bit is set for at least one of them of FALSE otherwise.
*
******************************************************************************/
u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number)
{
struct acpi_gpe_block_status_context context = {
.gpe_skip_register_info = NULL,
.retval = 0,
};
struct acpi_gpe_event_info *gpe_event_info;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_skip_device,
gpe_skip_number);
if (gpe_event_info) {
context.gpe_skip_register_info = gpe_event_info->register_info;
context.gpe_skip_mask = acpi_hw_get_gpe_register_bit(gpe_event_info);
}
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
(void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &context);
return (context.retval != 0);
}
#endif /* !ACPI_REDUCED_HARDWARE */