diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c index 36c1ca0b9adb..58ad00b31ee9 100644 --- a/drivers/acpi/dispatcher/dsmethod.c +++ b/drivers/acpi/dispatcher/dsmethod.c @@ -51,133 +51,6 @@ #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsmethod") -/******************************************************************************* - * - * FUNCTION: acpi_ds_parse_method - * - * PARAMETERS: Node - Method node - * - * RETURN: Status - * - * DESCRIPTION: Parse the AML that is associated with the method. - * - * MUTEX: Assumes parser is locked - * - ******************************************************************************/ -acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node) -{ - acpi_status status; - union acpi_operand_object *obj_desc; - union acpi_parse_object *op; - struct acpi_walk_state *walk_state; - - ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node); - - /* Parameter Validation */ - - if (!node) { - return_ACPI_STATUS(AE_NULL_ENTRY); - } - - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, - "**** Parsing [%4.4s] **** named_obj=%p\n", - acpi_ut_get_node_name(node), node)); - - /* Extract the method object from the method Node */ - - obj_desc = acpi_ns_get_attached_object(node); - if (!obj_desc) { - return_ACPI_STATUS(AE_NULL_OBJECT); - } - - /* Create a mutex for the method if there is a concurrency limit */ - - if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) && - (!obj_desc->method.semaphore)) { - status = acpi_os_create_semaphore(obj_desc->method.concurrency, - obj_desc->method.concurrency, - &obj_desc->method.semaphore); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } - - /* - * Allocate a new parser op to be the root of the parsed - * method tree - */ - op = acpi_ps_alloc_op(AML_METHOD_OP); - if (!op) { - return_ACPI_STATUS(AE_NO_MEMORY); - } - - /* Init new op with the method name and pointer back to the Node */ - - acpi_ps_set_name(op, node->name.integer); - op->common.node = node; - - /* - * Get a new owner_id for objects created by this method. Namespace - * objects (such as Operation Regions) can be created during the - * first pass parse. - */ - status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); - if (ACPI_FAILURE(status)) { - goto cleanup; - } - - /* Create and initialize a new walk state */ - - walk_state = - acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL, - NULL); - if (!walk_state) { - status = AE_NO_MEMORY; - goto cleanup2; - } - - status = acpi_ds_init_aml_walk(walk_state, op, node, - obj_desc->method.aml_start, - obj_desc->method.aml_length, NULL, 1); - if (ACPI_FAILURE(status)) { - acpi_ds_delete_walk_state(walk_state); - goto cleanup2; - } - - /* - * Parse the method, first pass - * - * The first pass load is where newly declared named objects are added into - * the namespace. Actual evaluation of the named objects (what would be - * called a "second pass") happens during the actual execution of the - * method so that operands to the named objects can take on dynamic - * run-time values. - */ - status = acpi_ps_parse_aml(walk_state); - if (ACPI_FAILURE(status)) { - goto cleanup2; - } - - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, - "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n", - acpi_ut_get_node_name(node), node, op)); - - /* - * Delete the parse tree. We simply re-parse the method for every - * execution since there isn't much overhead (compared to keeping lots - * of parse trees around) - */ - acpi_ns_delete_namespace_subtree(node); - acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id); - - cleanup2: - acpi_ut_release_owner_id(&obj_desc->method.owner_id); - - cleanup: - acpi_ps_delete_parse_tree(op); - return_ACPI_STATUS(status); -} - /******************************************************************************* * * FUNCTION: acpi_ds_begin_method_execution @@ -193,7 +66,6 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node) * for clearance to execute. * ******************************************************************************/ - acpi_status acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, union acpi_operand_object *obj_desc, @@ -545,16 +417,54 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state) } } + /* + * There are no more threads executing this method. Perform + * additional cleanup. + * + * The method Node is stored in the walk state + */ + method_node = walk_state->method_node; + + /* Lock namespace for possible update */ + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + goto exit; + } + + /* + * Delete any namespace entries created immediately underneath + * the method + */ + if (method_node->child) { + acpi_ns_delete_namespace_subtree(method_node); + } + + /* + * Delete any namespace entries created anywhere else within + * the namespace by the execution of this method + */ + acpi_ns_delete_namespace_by_owner(walk_state->method_desc->method. + owner_id); + status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + + /* Are there any other threads currently executing this method? */ + if (walk_state->method_desc->method.thread_count) { + /* + * Additional threads. Do not release the owner_id in this case, + * we immediately reuse it for the next thread executing this method + */ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, - "*** Not deleting method namespace, there are still %d threads\n", + "*** Completed execution of one thread, %d threads remaining\n", walk_state->method_desc->method. thread_count)); - } else { /* This is the last executing thread */ + } else { + /* This is the only executing thread for this method */ /* * Support to dynamically change a method from not_serialized to - * Serialized if it appears that the method is written foolishly and + * Serialized if it appears that the method is incorrectly written and * does not support multiple thread execution. The best example of this * is if such a method creates namespace objects and blocks. A second * thread will fail with an AE_ALREADY_EXISTS exception @@ -570,34 +480,8 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state) semaphore); } - /* - * There are no more threads executing this method. Perform - * additional cleanup. - * - * The method Node is stored in the walk state - */ - method_node = walk_state->method_node; + /* No more threads, we can free the owner_id */ - /* - * Delete any namespace entries created immediately underneath - * the method - */ - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); - if (ACPI_FAILURE(status)) { - goto exit; - } - - if (method_node->child) { - acpi_ns_delete_namespace_subtree(method_node); - } - - /* - * Delete any namespace entries created anywhere else within - * the namespace - */ - acpi_ns_delete_namespace_by_owner(walk_state->method_desc-> - method.owner_id); - status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); acpi_ut_release_owner_id(&walk_state->method_desc->method. owner_id); } @@ -606,3 +490,140 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state) (void)acpi_ut_release_mutex(ACPI_MTX_PARSER); return_VOID; } + +#ifdef ACPI_INIT_PARSE_METHODS + /* + * Note 11/2005: Removed this code to parse all methods during table + * load because it causes problems if there are any errors during the + * parse. Also, it seems like overkill and we probably don't want to + * abort a table load because of an issue with a single method. + */ + +/******************************************************************************* + * + * FUNCTION: acpi_ds_parse_method + * + * PARAMETERS: Node - Method node + * + * RETURN: Status + * + * DESCRIPTION: Parse the AML that is associated with the method. + * + * MUTEX: Assumes parser is locked + * + ******************************************************************************/ + +acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node) +{ + acpi_status status; + union acpi_operand_object *obj_desc; + union acpi_parse_object *op; + struct acpi_walk_state *walk_state; + + ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node); + + /* Parameter Validation */ + + if (!node) { + return_ACPI_STATUS(AE_NULL_ENTRY); + } + + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "**** Parsing [%4.4s] **** named_obj=%p\n", + acpi_ut_get_node_name(node), node)); + + /* Extract the method object from the method Node */ + + obj_desc = acpi_ns_get_attached_object(node); + if (!obj_desc) { + return_ACPI_STATUS(AE_NULL_OBJECT); + } + + /* Create a mutex for the method if there is a concurrency limit */ + + if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) && + (!obj_desc->method.semaphore)) { + status = acpi_os_create_semaphore(obj_desc->method.concurrency, + obj_desc->method.concurrency, + &obj_desc->method.semaphore); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } + + /* + * Allocate a new parser op to be the root of the parsed + * method tree + */ + op = acpi_ps_alloc_op(AML_METHOD_OP); + if (!op) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + /* Init new op with the method name and pointer back to the Node */ + + acpi_ps_set_name(op, node->name.integer); + op->common.node = node; + + /* + * Get a new owner_id for objects created by this method. Namespace + * objects (such as Operation Regions) can be created during the + * first pass parse. + */ + status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); + if (ACPI_FAILURE(status)) { + goto cleanup; + } + + /* Create and initialize a new walk state */ + + walk_state = + acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL, + NULL); + if (!walk_state) { + status = AE_NO_MEMORY; + goto cleanup2; + } + + status = acpi_ds_init_aml_walk(walk_state, op, node, + obj_desc->method.aml_start, + obj_desc->method.aml_length, NULL, 1); + if (ACPI_FAILURE(status)) { + acpi_ds_delete_walk_state(walk_state); + goto cleanup2; + } + + /* + * Parse the method, first pass + * + * The first pass load is where newly declared named objects are added into + * the namespace. Actual evaluation of the named objects (what would be + * called a "second pass") happens during the actual execution of the + * method so that operands to the named objects can take on dynamic + * run-time values. + */ + status = acpi_ps_parse_aml(walk_state); + if (ACPI_FAILURE(status)) { + goto cleanup2; + } + + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n", + acpi_ut_get_node_name(node), node, op)); + + /* + * Delete the parse tree. We simply re-parse the method for every + * execution since there isn't much overhead (compared to keeping lots + * of parse trees around) + */ + acpi_ns_delete_namespace_subtree(node); + acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id); + + cleanup2: + acpi_ut_release_owner_id(&obj_desc->method.owner_id); + + cleanup: + acpi_ps_delete_parse_tree(op); + return_ACPI_STATUS(status); +} +#endif diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c index 89d318cbc8a3..44d4f4bb2f92 100644 --- a/drivers/acpi/dispatcher/dswload.c +++ b/drivers/acpi/dispatcher/dswload.c @@ -127,7 +127,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, char *path; u32 flags; - ACPI_FUNCTION_NAME("ds_load1_begin_op"); + ACPI_FUNCTION_TRACE("ds_load1_begin_op"); op = walk_state->op; ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, @@ -138,14 +138,14 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, if (op) { if (!(walk_state->op_info->flags & AML_NAMED)) { *out_op = op; - return (AE_OK); + return_ACPI_STATUS(AE_OK); } /* Check if this object has already been installed in the namespace */ if (op->common.node) { *out_op = op; - return (AE_OK); + return_ACPI_STATUS(AE_OK); } } @@ -188,7 +188,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, #endif if (ACPI_FAILURE(status)) { ACPI_REPORT_NSERROR(path, status); - return (status); + return_ACPI_STATUS(status); } /* @@ -235,7 +235,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ACPI_REPORT_ERROR(("Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)\n", acpi_ut_get_type_name(node->type), path)); - return (AE_AML_OPERAND_TYPE); + return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } break; @@ -257,6 +257,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, * buffer_field, or Package), the name of the object is already * in the namespace. */ + if (walk_state->deferred_node) { /* This name is already in the namespace, get the node */ @@ -265,6 +266,16 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, break; } + /* + * If we are executing a method, do not create any namespace objects + * during the load phase, only during execution. + */ + if (walk_state->method_node) { + node = NULL; + status = AE_OK; + break; + } + flags = ACPI_NS_NO_UPSEARCH; if ((walk_state->opcode != AML_SCOPE_OP) && (!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) { @@ -290,7 +301,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, &(node)); if (ACPI_FAILURE(status)) { ACPI_REPORT_NSERROR(path, status); - return (status); + return_ACPI_STATUS(status); } break; } @@ -302,28 +313,29 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, op = acpi_ps_alloc_op(walk_state->opcode); if (!op) { - return (AE_NO_MEMORY); + return_ACPI_STATUS(AE_NO_MEMORY); } } - /* Initialize */ - - op->named.name = node->name.integer; + /* Initialize the op */ #if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)) op->named.path = ACPI_CAST_PTR(u8, path); #endif - /* - * Put the Node in the "op" object that the parser uses, so we - * can get it again quickly when this scope is closed - */ - op->common.node = node; + if (node) { + /* + * Put the Node in the "op" object that the parser uses, so we + * can get it again quickly when this scope is closed + */ + op->common.node = node; + op->named.name = node->name.integer; + } + acpi_ps_append_arg(acpi_ps_get_parent_scope(&walk_state->parser_state), op); - *out_op = op; - return (status); + return_ACPI_STATUS(status); } /******************************************************************************* @@ -339,13 +351,13 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, * ******************************************************************************/ -acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) +acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) { union acpi_parse_object *op; acpi_object_type object_type; acpi_status status = AE_OK; - ACPI_FUNCTION_NAME("ds_load1_end_op"); + ACPI_FUNCTION_TRACE("ds_load1_end_op"); op = walk_state->op; ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op, @@ -354,7 +366,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) /* We are only interested in opcodes that have an associated name */ if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) { - return (AE_OK); + return_ACPI_STATUS(AE_OK); } /* Get the object type to determine if we should pop the scope */ @@ -363,21 +375,37 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) #ifndef ACPI_NO_METHOD_EXECUTION if (walk_state->op_info->flags & AML_FIELD) { - if (walk_state->opcode == AML_FIELD_OP || - walk_state->opcode == AML_BANK_FIELD_OP || - walk_state->opcode == AML_INDEX_FIELD_OP) { - status = acpi_ds_init_field_objects(op, walk_state); + /* + * If we are executing a method, do not create any namespace objects + * during the load phase, only during execution. + */ + if (!walk_state->method_node) { + if (walk_state->opcode == AML_FIELD_OP || + walk_state->opcode == AML_BANK_FIELD_OP || + walk_state->opcode == AML_INDEX_FIELD_OP) { + status = + acpi_ds_init_field_objects(op, walk_state); + } } - return (status); + return_ACPI_STATUS(status); } - if (op->common.aml_opcode == AML_REGION_OP) { - status = acpi_ex_create_region(op->named.data, op->named.length, - (acpi_adr_space_type) - ((op->common.value.arg)->common. - value.integer), walk_state); - if (ACPI_FAILURE(status)) { - return (status); + /* + * If we are executing a method, do not create any namespace objects + * during the load phase, only during execution. + */ + if (!walk_state->method_node) { + if (op->common.aml_opcode == AML_REGION_OP) { + status = + acpi_ex_create_region(op->named.data, + op->named.length, + (acpi_adr_space_type) + ((op->common.value.arg)-> + common.value.integer), + walk_state); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } } } #endif @@ -391,7 +419,12 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) common. aml_opcode))-> object_type; - op->common.node->type = (u8) object_type; + + /* Set node type if we have a namespace node */ + + if (op->common.node) { + op->common.node->type = (u8) object_type; + } } } @@ -424,7 +457,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) walk_state->num_operands = 0; if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } } } @@ -439,7 +472,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) status = acpi_ds_scope_stack_pop(walk_state); } - return (status); + return_ACPI_STATUS(status); } /******************************************************************************* @@ -456,8 +489,8 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state) ******************************************************************************/ acpi_status -acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state, - union acpi_parse_object ** out_op) +acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op) { union acpi_parse_object *op; struct acpi_namespace_node *node; @@ -840,6 +873,13 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) case AML_TYPE_NAMED_FIELD: + /* + * If we are executing a method, initialize the field + */ + if (walk_state->method_node) { + status = acpi_ds_init_field_objects(op, walk_state); + } + switch (op->common.aml_opcode) { case AML_INDEX_FIELD_OP: @@ -929,6 +969,24 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) switch (op->common.aml_opcode) { #ifndef ACPI_NO_METHOD_EXECUTION case AML_REGION_OP: + + /* + * If we are executing a method, initialize the region + */ + if (walk_state->method_node) { + status = + acpi_ex_create_region(op->named.data, + op->named.length, + (acpi_adr_space_type) + ((op->common.value. + arg)->common.value. + integer), + walk_state); + if (ACPI_FAILURE(status)) { + return (status); + } + } + /* * The op_region is not fully parsed at this time. Only valid * argument is the space_id. (We must save the address of the diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c index 9faf1d5c86ed..864c642759fa 100644 --- a/drivers/acpi/namespace/nsdump.c +++ b/drivers/acpi/namespace/nsdump.c @@ -212,7 +212,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, /* * Now we can print out the pertinent information */ - acpi_os_printf(" %-12s %p ", acpi_ut_get_type_name(type), this_node); + acpi_os_printf(" %-12s %p %2.2X ", + acpi_ut_get_type_name(type), this_node, + this_node->owner_id); dbg_level = acpi_dbg_level; acpi_dbg_level = 0; diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c index d6813d88a104..6828c7aefa8a 100644 --- a/drivers/acpi/utilities/utglobal.c +++ b/drivers/acpi/utilities/utglobal.c @@ -793,6 +793,11 @@ void acpi_ut_init_globals(void) acpi_gbl_mutex_info[i].use_count = 0; } + for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) { + acpi_gbl_owner_id_mask[i] = 0; + } + acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000; /* Last ID is never valid */ + /* GPE support */ acpi_gbl_gpe_xrupt_list_head = NULL; @@ -830,8 +835,8 @@ void acpi_ut_init_globals(void) acpi_gbl_ns_lookup_count = 0; acpi_gbl_ps_find_count = 0; acpi_gbl_acpi_hardware_present = TRUE; - acpi_gbl_owner_id_mask = 0; - acpi_gbl_last_owner_id = 0; + acpi_gbl_last_owner_id_index = 0; + acpi_gbl_next_owner_id_offset = 0; acpi_gbl_trace_method_name = 0; acpi_gbl_trace_dbg_level = 0; acpi_gbl_trace_dbg_layer = 0; diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c index 89efba7bf449..64dd64b1aa18 100644 --- a/drivers/acpi/utilities/utmisc.c +++ b/drivers/acpi/utilities/utmisc.c @@ -64,6 +64,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) { acpi_native_uint i; acpi_native_uint j; + acpi_native_uint k; acpi_status status; ACPI_FUNCTION_TRACE("ut_allocate_owner_id"); @@ -85,32 +86,50 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) /* * Find a free owner ID, cycle through all possible IDs on repeated - * allocations. Note: Index for next possible ID is equal to the value - * of the last allocated ID. + * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have + * to be scanned twice. */ - for (i = 0, j = acpi_gbl_last_owner_id; i < 32; i++, j++) { - if (j >= 32) { - j = 0; /* Wraparound to ID start */ + for (i = 0, j = acpi_gbl_last_owner_id_index; + i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) { + if (j >= ACPI_NUM_OWNERID_MASKS) { + j = 0; /* Wraparound to start of mask array */ } - if (!(acpi_gbl_owner_id_mask & (1 << j))) { - /* - * Found a free ID. The actual ID is the bit index plus one, - * making zero an invalid Owner ID. Save this as the last ID - * allocated and update the global ID mask. - */ - acpi_gbl_last_owner_id = (acpi_owner_id) (j + 1); - *owner_id = acpi_gbl_last_owner_id; + for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) { + if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) { + /* There are no free IDs in this mask */ - ACPI_DEBUG_PRINT((ACPI_DB_VALUES, - "Current owner_id mask: %8.8X New ID: %2.2X\n", - acpi_gbl_owner_id_mask, - (unsigned int) - acpi_gbl_last_owner_id)); + break; + } - acpi_gbl_owner_id_mask |= (1 << j); - goto exit; + if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) { + /* + * Found a free ID. The actual ID is the bit index plus one, + * making zero an invalid Owner ID. Save this as the last ID + * allocated and update the global ID mask. + */ + acpi_gbl_owner_id_mask[j] |= (1 << k); + + acpi_gbl_last_owner_id_index = (u8) j; + acpi_gbl_next_owner_id_offset = (u8) (k + 1); + + /* + * Construct encoded ID from the index and bit position + * + * Note: Last [j].k (bit 255) is never used and is marked + * permanently allocated (prevents +1 overflow) + */ + *owner_id = + (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j)); + + ACPI_DEBUG_PRINT((ACPI_DB_VALUES, + "Allocated owner_id: %2.2X\n", + (unsigned int)*owner_id)); + goto exit; + } } + + acpi_gbl_next_owner_id_offset = 0; } /* @@ -124,7 +143,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) * methods, or there may be a bug where the IDs are not released. */ status = AE_OWNER_ID_LIMIT; - ACPI_REPORT_ERROR(("Could not allocate new owner_id (32 max), AE_OWNER_ID_LIMIT\n")); + ACPI_REPORT_ERROR(("Could not allocate new owner_id (255 max), AE_OWNER_ID_LIMIT\n")); exit: (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); @@ -141,7 +160,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) * control method or unloading a table. Either way, we would * ignore any error anyway. * - * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 32 + * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 * ******************************************************************************/ @@ -149,6 +168,8 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr) { acpi_owner_id owner_id = *owner_id_ptr; acpi_status status; + acpi_native_uint index; + u32 bit; ACPI_FUNCTION_TRACE_U32("ut_release_owner_id", owner_id); @@ -158,7 +179,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr) /* Zero is not a valid owner_iD */ - if ((owner_id == 0) || (owner_id > 32)) { + if ((owner_id == 0) || (owner_id > 255)) { ACPI_REPORT_ERROR(("Invalid owner_id: %2.2X\n", owner_id)); return_VOID; } @@ -174,10 +195,18 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr) owner_id--; + /* Decode ID to index/offset pair */ + + index = ACPI_DIV_32(owner_id); + bit = 1 << ACPI_MOD_32(owner_id); + /* Free the owner ID only if it is valid */ - if (acpi_gbl_owner_id_mask & (1 << owner_id)) { - acpi_gbl_owner_id_mask ^= (1 << owner_id); + if (acpi_gbl_owner_id_mask[index] & bit) { + acpi_gbl_owner_id_mask[index] ^= bit; + } else { + ACPI_REPORT_ERROR(("Release of non-allocated owner_id: %2.2X\n", + owner_id + 1)); } (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 08eafece3eed..f48b9ee9a876 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -63,7 +63,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20051117 +#define ACPI_CA_VERSION 0x20051202 /* * OS name, used for the _OS object. The _OS object is essentially obsolete, @@ -110,6 +110,10 @@ #define ACPI_SYSMEM_REGION_WINDOW_SIZE 4096 +/* owner_id tracking. 8 entries allows for 255 owner_ids */ + +#define ACPI_NUM_OWNERID_MASKS 8 + /****************************************************************************** * * ACPI Specification constants (Do not change unless the specification changes) diff --git a/include/acpi/acglobal.h b/include/acpi/acglobal.h index bd344e51313b..3f37560c26ab 100644 --- a/include/acpi/acglobal.h +++ b/include/acpi/acglobal.h @@ -220,10 +220,11 @@ ACPI_EXTERN u32 acpi_gbl_original_mode; ACPI_EXTERN u32 acpi_gbl_rsdp_original_location; ACPI_EXTERN u32 acpi_gbl_ns_lookup_count; ACPI_EXTERN u32 acpi_gbl_ps_find_count; -ACPI_EXTERN u32 acpi_gbl_owner_id_mask; +ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS]; ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save; ACPI_EXTERN u16 acpi_gbl_global_lock_handle; -ACPI_EXTERN u8 acpi_gbl_last_owner_id; +ACPI_EXTERN u8 acpi_gbl_last_owner_id_index; +ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset; ACPI_EXTERN u8 acpi_gbl_debugger_configuration; ACPI_EXTERN u8 acpi_gbl_global_lock_acquired; ACPI_EXTERN u8 acpi_gbl_step_to_next_call; diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h index 5b78ff4091b9..65a1a5c1a689 100644 --- a/include/acpi/acmacros.h +++ b/include/acpi/acmacros.h @@ -332,6 +332,10 @@ #define ACPI_MUL_16(a) _ACPI_MUL(a,4) #define ACPI_MOD_16(a) _ACPI_MOD(a,16) +#define ACPI_DIV_32(a) _ACPI_DIV(a,5) +#define ACPI_MUL_32(a) _ACPI_MUL(a,5) +#define ACPI_MOD_32(a) _ACPI_MOD(a,32) + /* * Rounding macros (Power of two boundaries only) */