python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: pswalk - Parser routines to walk parsed op tree(s) * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("pswalk") /******************************************************************************* * * FUNCTION: acpi_ps_delete_parse_tree * * PARAMETERS: subtree_root - Root of tree (or subtree) to delete * * RETURN: None * * DESCRIPTION: Delete a portion of or an entire parse tree. * ******************************************************************************/ #include "amlcode.h" void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root) { union acpi_parse_object *op = subtree_root; union acpi_parse_object *next = NULL; union acpi_parse_object *parent = NULL; u32 level = 0; ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root); ACPI_DEBUG_PRINT((ACPI_DB_PARSE_TREES, " root %p\n", subtree_root)); /* Visit all nodes in the subtree */ while (op) { if (op != parent) { /* This is the descending case */ if (ACPI_IS_DEBUG_ENABLED (ACPI_LV_PARSE_TREES, _COMPONENT)) { /* This debug option will print the entire parse tree */ acpi_os_printf(" %*.s%s %p", (level * 4), " ", acpi_ps_get_opcode_name(op-> common. aml_opcode), op); if (op->named.aml_opcode == AML_INT_NAMEPATH_OP) { acpi_os_printf(" %4.4s", op->common.value.string); } if (op->named.aml_opcode == AML_STRING_OP) { acpi_os_printf(" %s", op->common.value.string); } acpi_os_printf("\n"); } /* Look for an argument or child of the current op */ next = acpi_ps_get_arg(op, 0); if (next) { /* Still going downward in tree (Op is not completed yet) */ op = next; level++; continue; } } /* No more children, this Op is complete. */ next = op->common.next; parent = op->common.parent; acpi_ps_free_op(op); /* If we are back to the starting point, the walk is complete. */ if (op == subtree_root) { return_VOID; } if (next) { op = next; } else { level--; op = parent; } } return_VOID; }
linux-master
drivers/acpi/acpica/pswalk.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: uteval - Object evaluation * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("uteval") /******************************************************************************* * * FUNCTION: acpi_ut_evaluate_object * * PARAMETERS: prefix_node - Starting node * path - Path to object from starting node * expected_return_types - Bitmap of allowed return types * return_desc - Where a return value is stored * * RETURN: Status * * DESCRIPTION: Evaluates a namespace object and verifies the type of the * return object. Common code that simplifies accessing objects * that have required return objects of fixed types. * * NOTE: Internal function, no parameter validation * ******************************************************************************/ acpi_status acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node, const char *path, u32 expected_return_btypes, union acpi_operand_object **return_desc) { struct acpi_evaluate_info *info; acpi_status status; u32 return_btype; ACPI_FUNCTION_TRACE(ut_evaluate_object); /* Allocate the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } info->prefix_node = prefix_node; info->relative_pathname = path; /* Evaluate the object/method */ status = acpi_ns_evaluate(info); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s.%s] was not found\n", acpi_ut_get_node_name(prefix_node), path)); } else { ACPI_ERROR_METHOD("Method execution failed", prefix_node, path, status); } goto cleanup; } /* Did we get a return object? */ if (!info->return_object) { if (expected_return_btypes) { ACPI_ERROR_METHOD("No object was returned from", prefix_node, path, AE_NOT_EXIST); status = AE_NOT_EXIST; } goto cleanup; } /* Map the return object type to the bitmapped type */ switch ((info->return_object)->common.type) { case ACPI_TYPE_INTEGER: return_btype = ACPI_BTYPE_INTEGER; break; case ACPI_TYPE_BUFFER: return_btype = ACPI_BTYPE_BUFFER; break; case ACPI_TYPE_STRING: return_btype = ACPI_BTYPE_STRING; break; case ACPI_TYPE_PACKAGE: return_btype = ACPI_BTYPE_PACKAGE; break; default: return_btype = 0; break; } if ((acpi_gbl_enable_interpreter_slack) && (!expected_return_btypes)) { /* * We received a return object, but one was not expected. This can * happen frequently if the "implicit return" feature is enabled. * Just delete the return object and return AE_OK. */ acpi_ut_remove_reference(info->return_object); goto cleanup; } /* Is the return object one of the expected types? */ if (!(expected_return_btypes & return_btype)) { ACPI_ERROR_METHOD("Return object type is incorrect", prefix_node, path, AE_TYPE); ACPI_ERROR((AE_INFO, "Type returned from %s was incorrect: %s, expected Btypes: 0x%X", path, acpi_ut_get_object_type_name(info->return_object), expected_return_btypes)); /* On error exit, we must delete the return object */ acpi_ut_remove_reference(info->return_object); status = AE_TYPE; goto cleanup; } /* Object type is OK, return it */ *return_desc = info->return_object; cleanup: ACPI_FREE(info); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_evaluate_numeric_object * * PARAMETERS: object_name - Object name to be evaluated * device_node - Node for the device * value - Where the value is returned * * RETURN: Status * * DESCRIPTION: Evaluates a numeric namespace object for a selected device * and stores result in *Value. * * NOTE: Internal function, no parameter validation * ******************************************************************************/ acpi_status acpi_ut_evaluate_numeric_object(const char *object_name, struct acpi_namespace_node *device_node, u64 *value) { union acpi_operand_object *obj_desc; acpi_status status; ACPI_FUNCTION_TRACE(ut_evaluate_numeric_object); status = acpi_ut_evaluate_object(device_node, object_name, ACPI_BTYPE_INTEGER, &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Get the returned Integer */ *value = obj_desc->integer.value; /* On exit, we must delete the return object */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_execute_STA * * PARAMETERS: device_node - Node for the device * flags - Where the status flags are returned * * RETURN: Status * * DESCRIPTION: Executes _STA for selected device and stores results in * *Flags. If _STA does not exist, then the device is assumed * to be present/functional/enabled (as per the ACPI spec). * * NOTE: Internal function, no parameter validation * ******************************************************************************/ acpi_status acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags) { union acpi_operand_object *obj_desc; acpi_status status; ACPI_FUNCTION_TRACE(ut_execute_STA); status = acpi_ut_evaluate_object(device_node, METHOD_NAME__STA, ACPI_BTYPE_INTEGER, &obj_desc); if (ACPI_FAILURE(status)) { if (AE_NOT_FOUND == status) { /* * if _STA does not exist, then (as per the ACPI specification), * the returned flags will indicate that the device is present, * functional, and enabled. */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "_STA on %4.4s was not found, assuming device is present\n", acpi_ut_get_node_name(device_node))); *flags = ACPI_UINT32_MAX; status = AE_OK; } return_ACPI_STATUS(status); } /* Extract the status flags */ *flags = (u32) obj_desc->integer.value; /* On exit, we must delete the return object */ acpi_ut_remove_reference(obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_execute_power_methods * * PARAMETERS: device_node - Node for the device * method_names - Array of power method names * method_count - Number of methods to execute * out_values - Where the power method values are returned * * RETURN: Status, out_values * * DESCRIPTION: Executes the specified power methods for the device and returns * the result(s). * * NOTE: Internal function, no parameter validation * ******************************************************************************/ acpi_status acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node, const char **method_names, u8 method_count, u8 *out_values) { union acpi_operand_object *obj_desc; acpi_status status; acpi_status final_status = AE_NOT_FOUND; u32 i; ACPI_FUNCTION_TRACE(ut_execute_power_methods); for (i = 0; i < method_count; i++) { /* * Execute the power method (_sx_d or _sx_w). The only allowable * return type is an Integer. */ status = acpi_ut_evaluate_object(device_node, ACPI_CAST_PTR(char, method_names[i]), ACPI_BTYPE_INTEGER, &obj_desc); if (ACPI_SUCCESS(status)) { out_values[i] = (u8)obj_desc->integer.value; /* Delete the return object */ acpi_ut_remove_reference(obj_desc); final_status = AE_OK; /* At least one value is valid */ continue; } out_values[i] = ACPI_UINT8_MAX; if (status == AE_NOT_FOUND) { continue; /* Ignore if not found */ } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Failed %s on Device %4.4s, %s\n", ACPI_CAST_PTR(char, method_names[i]), acpi_ut_get_node_name(device_node), acpi_format_exception(status))); } return_ACPI_STATUS(final_status); }
linux-master
drivers/acpi/acpica/uteval.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exdump - Interpreter debug output routines * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #include "acnamesp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exdump") /* * The following routines are used for debug output only */ #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) /* Local prototypes */ static void acpi_ex_out_string(const char *title, const char *value); static void acpi_ex_out_pointer(const char *title, const void *value); static void acpi_ex_dump_object(union acpi_operand_object *obj_desc, struct acpi_exdump_info *info); static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc); static void acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc, u32 level, u32 index); /******************************************************************************* * * Object Descriptor info tables * * Note: The first table entry must be an INIT opcode and must contain * the table length (number of table entries) * ******************************************************************************/ static struct acpi_exdump_info acpi_ex_dump_integer[2] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_integer), NULL}, {ACPI_EXD_UINT64, ACPI_EXD_OFFSET(integer.value), "Value"} }; static struct acpi_exdump_info acpi_ex_dump_string[4] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_string), NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(string.length), "Length"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(string.pointer), "Pointer"}, {ACPI_EXD_STRING, 0, NULL} }; static struct acpi_exdump_info acpi_ex_dump_buffer[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"}, {ACPI_EXD_NODE, ACPI_EXD_OFFSET(buffer.node), "Parent Node"}, {ACPI_EXD_BUFFER, 0, NULL} }; static struct acpi_exdump_info acpi_ex_dump_package[6] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL}, {ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Element Count"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"}, {ACPI_EXD_PACKAGE, 0, NULL} }; static struct acpi_exdump_info acpi_ex_dump_device[4] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[0]), "System Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[1]), "Device Notify"}, {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(device.handler), "Handler"} }; static struct acpi_exdump_info acpi_ex_dump_event[2] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_event), NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"} }; static struct acpi_exdump_info acpi_ex_dump_method[9] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.info_flags), "Info Flags"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "Parameter Count"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"}, {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.aml_start), "Aml Start"} }; static struct acpi_exdump_info acpi_ex_dump_mutex[6] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.original_sync_level), "Original Sync Level"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"}, {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), "Acquire Depth"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"} }; static struct acpi_exdump_info acpi_ex_dump_region[8] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.space_id), "Space Id"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.flags), "Flags"}, {ACPI_EXD_NODE, ACPI_EXD_OFFSET(region.node), "Parent Node"}, {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(region.address), "Address"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(region.length), "Length"}, {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(region.handler), "Handler"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.next), "Next"} }; static struct acpi_exdump_info acpi_ex_dump_power[6] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_power), NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.system_level), "System Level"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.resource_order), "Resource Order"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[0]), "System Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[1]), "Device Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.handler), "Handler"} }; static struct acpi_exdump_info acpi_ex_dump_processor[7] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"}, {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.notify_list[0]), "System Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.notify_list[1]), "Device Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.handler), "Handler"} }; static struct acpi_exdump_info acpi_ex_dump_thermal[4] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_thermal), NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.notify_list[0]), "System Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.notify_list[1]), "Device Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.handler), "Handler"} }; static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer_field), NULL}, {ACPI_EXD_FIELD, 0, NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer_field.buffer_obj), "Buffer Object"} }; static struct acpi_exdump_info acpi_ex_dump_region_field[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL}, {ACPI_EXD_FIELD, 0, NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer), "ResourceBuffer"} }; static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL}, {ACPI_EXD_FIELD, 0, NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(bank_field.value), "Value"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.region_obj), "Region Object"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(bank_field.bank_obj), "Bank Object"} }; static struct acpi_exdump_info acpi_ex_dump_index_field[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_bank_field), NULL}, {ACPI_EXD_FIELD, 0, NULL}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(index_field.value), "Value"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.index_obj), "Index Object"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(index_field.data_obj), "Data Object"} }; static struct acpi_exdump_info acpi_ex_dump_reference[9] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_reference), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.class), "Class"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"}, {ACPI_EXD_NODE, ACPI_EXD_OFFSET(reference.node), "Node"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.index_pointer), "Index Pointer"}, {ACPI_EXD_REFERENCE, 0, NULL} }; static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_address_handler), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(address_space.space_id), "Space Id"}, {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(address_space.next), "Next"}, {ACPI_EXD_RGN_LIST, ACPI_EXD_OFFSET(address_space.region_list), "Region List"}, {ACPI_EXD_NODE, ACPI_EXD_OFFSET(address_space.node), "Node"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"} }; static struct acpi_exdump_info acpi_ex_dump_notify[7] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL}, {ACPI_EXD_NODE, ACPI_EXD_OFFSET(notify.node), "Node"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(notify.handler_type), "Handler Type"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.handler), "Handler"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[0]), "Next System Notify"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[1]), "Next Device Notify"} }; static struct acpi_exdump_info acpi_ex_dump_extra[6] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_extra), NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.method_REG), "_REG Method"}, {ACPI_EXD_NODE, ACPI_EXD_OFFSET(extra.scope_node), "Scope Node"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.region_context), "Region Context"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.aml_start), "Aml Start"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(extra.aml_length), "Aml Length"} }; static struct acpi_exdump_info acpi_ex_dump_data[3] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_data), NULL}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.handler), "Handler"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.pointer), "Raw Data"} }; /* Miscellaneous tables */ static struct acpi_exdump_info acpi_ex_dump_common[5] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_common), NULL}, {ACPI_EXD_TYPE, 0, NULL}, {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(common.reference_count), "Reference Count"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"}, {ACPI_EXD_LIST, ACPI_EXD_OFFSET(common.next_object), "Object List"} }; static struct acpi_exdump_info acpi_ex_dump_field_common[7] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_field_common), NULL}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.field_flags), "Field Flags"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.access_byte_width), "Access Byte Width"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.bit_length), "Bit Length"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common_field.start_field_bit_offset), "Field Bit Offset"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.base_byte_offset), "Base Byte Offset"}, {ACPI_EXD_NODE, ACPI_EXD_OFFSET(common_field.node), "Parent Node"} }; static struct acpi_exdump_info acpi_ex_dump_node[7] = { {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL}, {ACPI_EXD_UINT16, ACPI_EXD_NSOFFSET(flags), "Flags"}, {ACPI_EXD_UINT16, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"}, {ACPI_EXD_LIST, ACPI_EXD_NSOFFSET(object), "Object List"}, {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(parent), "Parent"}, {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(child), "Child"}, {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(peer), "Peer"} }; /* Dispatch table, indexed by object type */ static struct acpi_exdump_info *acpi_ex_dump_info[] = { NULL, acpi_ex_dump_integer, acpi_ex_dump_string, acpi_ex_dump_buffer, acpi_ex_dump_package, NULL, acpi_ex_dump_device, acpi_ex_dump_event, acpi_ex_dump_method, acpi_ex_dump_mutex, acpi_ex_dump_region, acpi_ex_dump_power, acpi_ex_dump_processor, acpi_ex_dump_thermal, acpi_ex_dump_buffer_field, NULL, NULL, acpi_ex_dump_region_field, acpi_ex_dump_bank_field, acpi_ex_dump_index_field, acpi_ex_dump_reference, NULL, NULL, acpi_ex_dump_notify, acpi_ex_dump_address_handler, NULL, NULL, NULL, acpi_ex_dump_extra, acpi_ex_dump_data }; /******************************************************************************* * * FUNCTION: acpi_ex_dump_object * * PARAMETERS: obj_desc - Descriptor to dump * info - Info table corresponding to this object * type * * RETURN: None * * DESCRIPTION: Walk the info table for this object * ******************************************************************************/ static void acpi_ex_dump_object(union acpi_operand_object *obj_desc, struct acpi_exdump_info *info) { u8 *target; const char *name; u8 count; union acpi_operand_object *start; union acpi_operand_object *data = NULL; union acpi_operand_object *next; struct acpi_namespace_node *node; if (!info) { acpi_os_printf ("ExDumpObject: Display not implemented for object type %s\n", acpi_ut_get_object_type_name(obj_desc)); return; } /* First table entry must contain the table length (# of table entries) */ count = info->offset; while (count) { if (!obj_desc) { return; } target = ACPI_ADD_PTR(u8, obj_desc, info->offset); name = info->name; switch (info->opcode) { case ACPI_EXD_INIT: break; case ACPI_EXD_TYPE: acpi_os_printf("%20s : %2.2X [%s]\n", "Type", obj_desc->common.type, acpi_ut_get_object_type_name(obj_desc)); break; case ACPI_EXD_UINT8: acpi_os_printf("%20s : %2.2X\n", name, *target); break; case ACPI_EXD_UINT16: acpi_os_printf("%20s : %4.4X\n", name, ACPI_GET16(target)); break; case ACPI_EXD_UINT32: acpi_os_printf("%20s : %8.8X\n", name, ACPI_GET32(target)); break; case ACPI_EXD_UINT64: acpi_os_printf("%20s : %8.8X%8.8X\n", "Value", ACPI_FORMAT_UINT64(ACPI_GET64(target))); break; case ACPI_EXD_POINTER: case ACPI_EXD_ADDRESS: acpi_ex_out_pointer(name, *ACPI_CAST_PTR(void *, target)); break; case ACPI_EXD_STRING: acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX); acpi_os_printf("\n"); break; case ACPI_EXD_BUFFER: ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, obj_desc->buffer.length); break; case ACPI_EXD_PACKAGE: /* Dump the package contents */ acpi_os_printf("\nPackage Contents:\n"); acpi_ex_dump_package_obj(obj_desc, 0, 0); break; case ACPI_EXD_FIELD: acpi_ex_dump_object(obj_desc, acpi_ex_dump_field_common); break; case ACPI_EXD_REFERENCE: acpi_ex_out_string("Class Name", acpi_ut_get_reference_name (obj_desc)); acpi_ex_dump_reference_obj(obj_desc); break; case ACPI_EXD_LIST: start = *ACPI_CAST_PTR(void *, target); next = start; acpi_os_printf("%20s : %p ", name, next); if (next) { acpi_os_printf("%s (Type %2.2X)", acpi_ut_get_object_type_name (next), next->common.type); while (next->common.next_object) { if ((next->common.type == ACPI_TYPE_LOCAL_DATA) && !data) { data = next; } next = next->common.next_object; acpi_os_printf("->%p(%s %2.2X)", next, acpi_ut_get_object_type_name (next), next->common.type); if ((next == start) || (next == data)) { acpi_os_printf ("\n**** Error: Object list appears to be circular linked"); break; } } } else { acpi_os_printf("- No attached objects"); } acpi_os_printf("\n"); break; case ACPI_EXD_HDLR_LIST: start = *ACPI_CAST_PTR(void *, target); next = start; acpi_os_printf("%20s : %p", name, next); if (next) { acpi_os_printf("(%s %2.2X)", acpi_ut_get_object_type_name (next), next->address_space.space_id); while (next->address_space.next) { if ((next->common.type == ACPI_TYPE_LOCAL_DATA) && !data) { data = next; } next = next->address_space.next; acpi_os_printf("->%p(%s %2.2X)", next, acpi_ut_get_object_type_name (next), next->address_space. space_id); if ((next == start) || (next == data)) { acpi_os_printf ("\n**** Error: Handler list appears to be circular linked"); break; } } } acpi_os_printf("\n"); break; case ACPI_EXD_RGN_LIST: start = *ACPI_CAST_PTR(void *, target); next = start; acpi_os_printf("%20s : %p", name, next); if (next) { acpi_os_printf("(%s %2.2X)", acpi_ut_get_object_type_name (next), next->common.type); while (next->region.next) { if ((next->common.type == ACPI_TYPE_LOCAL_DATA) && !data) { data = next; } next = next->region.next; acpi_os_printf("->%p(%s %2.2X)", next, acpi_ut_get_object_type_name (next), next->common.type); if ((next == start) || (next == data)) { acpi_os_printf ("\n**** Error: Region list appears to be circular linked"); break; } } } acpi_os_printf("\n"); break; case ACPI_EXD_NODE: node = *ACPI_CAST_PTR(struct acpi_namespace_node *, target); acpi_os_printf("%20s : %p", name, node); if (node) { acpi_os_printf(" [%4.4s]", node->name.ascii); } acpi_os_printf("\n"); break; default: acpi_os_printf("**** Invalid table opcode [%X] ****\n", info->opcode); return; } info++; count--; } } /******************************************************************************* * * FUNCTION: acpi_ex_dump_operand * * PARAMETERS: *obj_desc - Pointer to entry to be dumped * depth - Current nesting depth * * RETURN: None * * DESCRIPTION: Dump an operand object * ******************************************************************************/ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) { u32 length; u32 index; ACPI_FUNCTION_NAME(ex_dump_operand); /* Check if debug output enabled */ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) { return; } if (!obj_desc) { /* This could be a null element of a package */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Null Object Descriptor\n")); return; } if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p Namespace Node: ", obj_desc)); ACPI_DUMP_ENTRY(obj_desc, ACPI_LV_EXEC); return; } if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p is not a node or operand object: [%s]\n", obj_desc, acpi_ut_get_descriptor_name(obj_desc))); ACPI_DUMP_BUFFER(obj_desc, sizeof(union acpi_operand_object)); return; } /* obj_desc is a valid object */ if (depth > 0) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%*s[%u] %p Refs=%u ", depth, " ", depth, obj_desc, obj_desc->common.reference_count)); } else { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%p Refs=%u ", obj_desc, obj_desc->common.reference_count)); } /* Decode object type */ switch (obj_desc->common.type) { case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("Reference: [%s] ", acpi_ut_get_reference_name(obj_desc)); switch (obj_desc->reference.class) { case ACPI_REFCLASS_DEBUG: acpi_os_printf("\n"); break; case ACPI_REFCLASS_INDEX: acpi_os_printf("%p\n", obj_desc->reference.object); break; case ACPI_REFCLASS_TABLE: acpi_os_printf("Table Index %X\n", obj_desc->reference.value); break; case ACPI_REFCLASS_REFOF: acpi_os_printf("%p [%s]\n", obj_desc->reference.object, acpi_ut_get_type_name(((union acpi_operand_object *) obj_desc-> reference. object)->common. type)); break; case ACPI_REFCLASS_NAME: acpi_ut_repair_name(obj_desc->reference.node->name. ascii); acpi_os_printf("- [%4.4s] (Node %p)\n", obj_desc->reference.node->name.ascii, obj_desc->reference.node); break; case ACPI_REFCLASS_ARG: case ACPI_REFCLASS_LOCAL: acpi_os_printf("%X\n", obj_desc->reference.value); break; default: /* Unknown reference class */ acpi_os_printf("%2.2X\n", obj_desc->reference.class); break; } break; case ACPI_TYPE_BUFFER: acpi_os_printf("Buffer length %.2X @ %p\n", obj_desc->buffer.length, obj_desc->buffer.pointer); /* Debug only -- dump the buffer contents */ if (obj_desc->buffer.pointer) { length = obj_desc->buffer.length; if (length > 128) { length = 128; } acpi_os_printf ("Buffer Contents: (displaying length 0x%.2X)\n", length); ACPI_DUMP_BUFFER(obj_desc->buffer.pointer, length); } break; case ACPI_TYPE_INTEGER: acpi_os_printf("Integer %8.8X%8.8X\n", ACPI_FORMAT_UINT64(obj_desc->integer.value)); break; case ACPI_TYPE_PACKAGE: acpi_os_printf("Package [Len %X] ElementArray %p\n", obj_desc->package.count, obj_desc->package.elements); /* * If elements exist, package element pointer is valid, * and debug_level exceeds 1, dump package's elements. */ if (obj_desc->package.count && obj_desc->package.elements && acpi_dbg_level > 1) { for (index = 0; index < obj_desc->package.count; index++) { acpi_ex_dump_operand(obj_desc->package. elements[index], depth + 1); } } break; case ACPI_TYPE_REGION: acpi_os_printf("Region %s (%X)", acpi_ut_get_region_name(obj_desc->region. space_id), obj_desc->region.space_id); /* * If the address and length have not been evaluated, * don't print them. */ if (!(obj_desc->region.flags & AOPOBJ_DATA_VALID)) { acpi_os_printf("\n"); } else { acpi_os_printf(" base %8.8X%8.8X Length %X\n", ACPI_FORMAT_UINT64(obj_desc->region. address), obj_desc->region.length); } break; case ACPI_TYPE_STRING: acpi_os_printf("String length %X @ %p ", obj_desc->string.length, obj_desc->string.pointer); acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX); acpi_os_printf("\n"); break; case ACPI_TYPE_LOCAL_BANK_FIELD: acpi_os_printf("BankField\n"); break; case ACPI_TYPE_LOCAL_REGION_FIELD: acpi_os_printf ("RegionField: Bits=%X AccWidth=%X Lock=%X Update=%X at " "byte=%X bit=%X of below:\n", obj_desc->field.bit_length, obj_desc->field.access_byte_width, obj_desc->field.field_flags & AML_FIELD_LOCK_RULE_MASK, obj_desc->field.field_flags & AML_FIELD_UPDATE_RULE_MASK, obj_desc->field.base_byte_offset, obj_desc->field.start_field_bit_offset); acpi_ex_dump_operand(obj_desc->field.region_obj, depth + 1); break; case ACPI_TYPE_LOCAL_INDEX_FIELD: acpi_os_printf("IndexField\n"); break; case ACPI_TYPE_BUFFER_FIELD: acpi_os_printf("BufferField: %X bits at byte %X bit %X of\n", obj_desc->buffer_field.bit_length, obj_desc->buffer_field.base_byte_offset, obj_desc->buffer_field.start_field_bit_offset); if (!obj_desc->buffer_field.buffer_obj) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "*NULL*\n")); } else if ((obj_desc->buffer_field.buffer_obj)->common.type != ACPI_TYPE_BUFFER) { acpi_os_printf("*not a Buffer*\n"); } else { acpi_ex_dump_operand(obj_desc->buffer_field.buffer_obj, depth + 1); } break; case ACPI_TYPE_EVENT: acpi_os_printf("Event\n"); break; case ACPI_TYPE_METHOD: acpi_os_printf("Method(%X) @ %p:%X\n", obj_desc->method.param_count, obj_desc->method.aml_start, obj_desc->method.aml_length); break; case ACPI_TYPE_MUTEX: acpi_os_printf("Mutex\n"); break; case ACPI_TYPE_DEVICE: acpi_os_printf("Device\n"); break; case ACPI_TYPE_POWER: acpi_os_printf("Power\n"); break; case ACPI_TYPE_PROCESSOR: acpi_os_printf("Processor\n"); break; case ACPI_TYPE_THERMAL: acpi_os_printf("Thermal\n"); break; default: /* Unknown Type */ acpi_os_printf("Unknown Type %X\n", obj_desc->common.type); break; } return; } /******************************************************************************* * * FUNCTION: acpi_ex_dump_operands * * PARAMETERS: operands - A list of Operand objects * opcode_name - AML opcode name * num_operands - Operand count for this opcode * * DESCRIPTION: Dump the operands associated with the opcode * ******************************************************************************/ void acpi_ex_dump_operands(union acpi_operand_object **operands, const char *opcode_name, u32 num_operands) { ACPI_FUNCTION_TRACE(ex_dump_operands); if (!opcode_name) { opcode_name = "UNKNOWN"; } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "**** Start operand dump for opcode [%s], %u operands\n", opcode_name, num_operands)); if (num_operands == 0) { num_operands = 1; } /* Dump the individual operands */ while (num_operands) { acpi_ex_dump_operand(*operands, 0); operands++; num_operands--; } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "**** End operand dump for [%s]\n", opcode_name)); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ex_out* functions * * PARAMETERS: title - Descriptive text * value - Value to be displayed * * DESCRIPTION: Object dump output formatting functions. These functions * reduce the number of format strings required and keeps them * all in one place for easy modification. * ******************************************************************************/ static void acpi_ex_out_string(const char *title, const char *value) { acpi_os_printf("%20s : %s\n", title, value); } static void acpi_ex_out_pointer(const char *title, const void *value) { acpi_os_printf("%20s : %p\n", title, value); } /******************************************************************************* * * FUNCTION: acpi_ex_dump_namespace_node * * PARAMETERS: node - Descriptor to dump * flags - Force display if TRUE * * DESCRIPTION: Dumps the members of the given.Node * ******************************************************************************/ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags) { ACPI_FUNCTION_ENTRY(); if (!flags) { /* Check if debug output enabled */ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_OBJECTS, _COMPONENT)) { return; } } acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node)); acpi_os_printf("%20s : %2.2X [%s]\n", "Type", node->type, acpi_ut_get_type_name(node->type)); acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node), acpi_ex_dump_node); } /******************************************************************************* * * FUNCTION: acpi_ex_dump_reference_obj * * PARAMETERS: object - Descriptor to dump * * DESCRIPTION: Dumps a reference object * ******************************************************************************/ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc) { struct acpi_buffer ret_buf; acpi_status status; ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER; if (obj_desc->reference.class == ACPI_REFCLASS_NAME) { acpi_os_printf(" %p ", obj_desc->reference.node); status = acpi_ns_handle_to_pathname(obj_desc->reference.node, &ret_buf, TRUE); if (ACPI_FAILURE(status)) { acpi_os_printf (" Could not convert name to pathname: %s\n", acpi_format_exception(status)); } else { acpi_os_printf("%s: %s\n", acpi_ut_get_type_name(obj_desc-> reference.node-> type), (char *)ret_buf.pointer); ACPI_FREE(ret_buf.pointer); } } else if (obj_desc->reference.object) { if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_OPERAND) { acpi_os_printf("%22s %p", "Target :", obj_desc->reference.object); if (obj_desc->reference.class == ACPI_REFCLASS_TABLE) { acpi_os_printf(" Table Index: %X\n", obj_desc->reference.value); } else { acpi_os_printf(" [%s]\n", acpi_ut_get_type_name(((union acpi_operand_object *) obj_desc-> reference. object)-> common. type)); } } else { acpi_os_printf(" Target: %p\n", obj_desc->reference.object); } } } /******************************************************************************* * * FUNCTION: acpi_ex_dump_package_obj * * PARAMETERS: obj_desc - Descriptor to dump * level - Indentation Level * index - Package index for this object * * DESCRIPTION: Dumps the elements of the package * ******************************************************************************/ static void acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc, u32 level, u32 index) { u32 i; /* Indentation and index output */ if (level > 0) { for (i = 0; i < level; i++) { acpi_os_printf(" "); } acpi_os_printf("[%.2d] ", index); } acpi_os_printf("%p ", obj_desc); /* Null package elements are allowed */ if (!obj_desc) { acpi_os_printf("[Null Object]\n"); return; } /* Packages may only contain a few object types */ switch (obj_desc->common.type) { case ACPI_TYPE_INTEGER: acpi_os_printf("[Integer] = %8.8X%8.8X\n", ACPI_FORMAT_UINT64(obj_desc->integer.value)); break; case ACPI_TYPE_STRING: acpi_os_printf("[String] Value: "); acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX); acpi_os_printf("\n"); break; case ACPI_TYPE_BUFFER: acpi_os_printf("[Buffer] Length %.2X = ", obj_desc->buffer.length); if (obj_desc->buffer.length) { acpi_ut_debug_dump_buffer(ACPI_CAST_PTR (u8, obj_desc->buffer.pointer), obj_desc->buffer.length, DB_DWORD_DISPLAY, _COMPONENT); } else { acpi_os_printf("\n"); } break; case ACPI_TYPE_PACKAGE: acpi_os_printf("[Package] Contains %u Elements:\n", obj_desc->package.count); for (i = 0; i < obj_desc->package.count; i++) { acpi_ex_dump_package_obj(obj_desc->package.elements[i], level + 1, i); } break; case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("[Object Reference] Class [%s]", acpi_ut_get_reference_name(obj_desc)); acpi_ex_dump_reference_obj(obj_desc); break; default: acpi_os_printf("[%s] Type: %2.2X\n", acpi_ut_get_type_name(obj_desc->common.type), obj_desc->common.type); break; } } /******************************************************************************* * * FUNCTION: acpi_ex_dump_object_descriptor * * PARAMETERS: obj_desc - Descriptor to dump * flags - Force display if TRUE * * DESCRIPTION: Dumps the members of the object descriptor given. * ******************************************************************************/ void acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags) { ACPI_FUNCTION_TRACE(ex_dump_object_descriptor); if (!obj_desc) { return_VOID; } if (!flags) { /* Check if debug output enabled */ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_OBJECTS, _COMPONENT)) { return_VOID; } } if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) { acpi_ex_dump_namespace_node((struct acpi_namespace_node *) obj_desc, flags); obj_desc = ((struct acpi_namespace_node *)obj_desc)->object; if (!obj_desc) { return_VOID; } acpi_os_printf("\nAttached Object %p", obj_desc); if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) { acpi_os_printf(" - Namespace Node"); } acpi_os_printf(":\n"); goto dump_object; } if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) { acpi_os_printf("%p is not an ACPI operand object: [%s]\n", obj_desc, acpi_ut_get_descriptor_name(obj_desc)); return_VOID; } /* Validate the object type */ if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) { acpi_os_printf("Not a known object type: %2.2X\n", obj_desc->common.type); return_VOID; } dump_object: if (!obj_desc) { return_VOID; } /* Common Fields */ acpi_ex_dump_object(obj_desc, acpi_ex_dump_common); /* Object-specific fields */ acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]); if (obj_desc->common.type == ACPI_TYPE_REGION) { obj_desc = obj_desc->common.next_object; if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) { acpi_os_printf ("Secondary object is not a known object type: %2.2X\n", obj_desc->common.type); return_VOID; } acpi_os_printf("\nExtra attached Object (%p):\n", obj_desc); acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]); } return_VOID; } #endif
linux-master
drivers/acpi/acpica/exdump.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: utxfinit - External interfaces for ACPICA initialization * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #define EXPORT_ACPI_INTERFACES #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #include "acdebug.h" #include "actables.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utxfinit") /* For acpi_exec only */ void ae_do_object_overrides(void); /******************************************************************************* * * FUNCTION: acpi_initialize_subsystem * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initializes all global variables. This is the first function * called, so any early initialization belongs here. * ******************************************************************************/ acpi_status ACPI_INIT_FUNCTION acpi_initialize_subsystem(void) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_initialize_subsystem); acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE; ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace()); /* Initialize the OS-Dependent layer */ status = acpi_os_initialize(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization")); return_ACPI_STATUS(status); } /* Initialize all globals used by the subsystem */ status = acpi_ut_init_globals(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During initialization of globals")); return_ACPI_STATUS(status); } /* Create the default mutex objects */ status = acpi_ut_mutex_initialize(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During Global Mutex creation")); return_ACPI_STATUS(status); } /* * Initialize the namespace manager and * the root of the namespace tree */ status = acpi_ns_root_initialize(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During Namespace initialization")); return_ACPI_STATUS(status); } /* Initialize the global OSI interfaces list with the static names */ status = acpi_ut_initialize_interfaces(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During OSI interfaces initialization")); return_ACPI_STATUS(status); } return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_subsystem) /******************************************************************************* * * FUNCTION: acpi_enable_subsystem * * PARAMETERS: flags - Init/enable Options * * RETURN: Status * * DESCRIPTION: Completes the subsystem initialization including hardware. * Puts system into ACPI mode if it isn't already. * ******************************************************************************/ acpi_status ACPI_INIT_FUNCTION acpi_enable_subsystem(u32 flags) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(acpi_enable_subsystem); /* * The early initialization phase is complete. The namespace is loaded, * and we can now support address spaces other than Memory, I/O, and * PCI_Config. */ acpi_gbl_early_initialization = FALSE; #if (!ACPI_REDUCED_HARDWARE) /* Enable ACPI mode */ if (!(flags & ACPI_NO_ACPI_ENABLE)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[Init] Going into ACPI mode\n")); acpi_gbl_original_mode = acpi_hw_get_mode(); status = acpi_enable(); if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "AcpiEnable failed")); return_ACPI_STATUS(status); } } /* * Obtain a permanent mapping for the FACS. This is required for the * Global Lock and the Firmware Waking Vector */ if (!(flags & ACPI_NO_FACS_INIT)) { status = acpi_tb_initialize_facs(); if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "Could not map the FACS table")); return_ACPI_STATUS(status); } } /* * Initialize ACPI Event handling (Fixed and General Purpose) * * Note1: We must have the hardware and events initialized before we can * execute any control methods safely. Any control method can require * ACPI hardware support, so the hardware must be fully initialized before * any method execution! * * Note2: Fixed events are initialized and enabled here. GPEs are * initialized, but cannot be enabled until after the hardware is * completely initialized (SCI and global_lock activated) and the various * initialization control methods are run (_REG, _STA, _INI) on the * entire namespace. */ if (!(flags & ACPI_NO_EVENT_INIT)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[Init] Initializing ACPI events\n")); status = acpi_ev_initialize_events(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* * Install the SCI handler and Global Lock handler. This completes the * hardware initialization. */ if (!(flags & ACPI_NO_HANDLER_INIT)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[Init] Installing SCI/GL handlers\n")); status = acpi_ev_install_xrupt_handlers(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } #endif /* !ACPI_REDUCED_HARDWARE */ return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL_INIT(acpi_enable_subsystem) /******************************************************************************* * * FUNCTION: acpi_initialize_objects * * PARAMETERS: flags - Init/enable Options * * RETURN: Status * * DESCRIPTION: Completes namespace initialization by initializing device * objects and executing AML code for Regions, buffers, etc. * ******************************************************************************/ acpi_status ACPI_INIT_FUNCTION acpi_initialize_objects(u32 flags) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(acpi_initialize_objects); #ifdef ACPI_OBSOLETE_BEHAVIOR /* * 05/2019: Removed, initialization now happens at both object * creation and table load time */ /* * Initialize the objects that remain uninitialized. This * runs the executable AML that may be part of the * declaration of these objects: operation_regions, buffer_fields, * bank_fields, Buffers, and Packages. */ if (!(flags & ACPI_NO_OBJECT_INIT)) { status = acpi_ns_initialize_objects(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } #endif /* * Initialize all device/region objects in the namespace. This runs * the device _STA and _INI methods and region _REG methods. */ if (!(flags & (ACPI_NO_DEVICE_INIT | ACPI_NO_ADDRESS_SPACE_INIT))) { status = acpi_ns_initialize_devices(flags); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* * Empty the caches (delete the cached objects) on the assumption that * the table load filled them up more than they will be at runtime -- * thus wasting non-paged memory. */ status = acpi_purge_cached_objects(); acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK; return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_objects)
linux-master
drivers/acpi/acpica/utxfinit.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: utglobal - Global variables for the ACPI subsystem * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #define EXPORT_ACPI_INTERFACES #define DEFINE_ACPI_GLOBALS #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utglobal") /******************************************************************************* * * Static global variable initialization. * ******************************************************************************/ /* Various state name strings */ const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = { "\\_S0_", "\\_S1_", "\\_S2_", "\\_S3_", "\\_S4_", "\\_S5_" }; const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS] = { "_S0W", "_S1W", "_S2W", "_S3W", "_S4W" }; const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS] = { "_S1D", "_S2D", "_S3D", "_S4D" }; /* Hex-to-ascii */ const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef"; const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF"; /******************************************************************************* * * Namespace globals * ******************************************************************************/ /* * Predefined ACPI Names (Built-in to the Interpreter) * * NOTES: * 1) _SB_ is defined to be a device to allow \_SB_._INI to be run * during the initialization sequence. * 2) _TZ_ is defined to be a thermal zone in order to allow ASL code to * perform a Notify() operation on it. 09/2010: Changed to type Device. * This still allows notifies, but does not confuse host code that * searches for valid thermal_zone objects. */ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = { {"_GPE", ACPI_TYPE_LOCAL_SCOPE, NULL}, {"_PR_", ACPI_TYPE_LOCAL_SCOPE, NULL}, {"_SB_", ACPI_TYPE_DEVICE, NULL}, {"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL}, {"_TZ_", ACPI_TYPE_DEVICE, NULL}, /* * March, 2015: * The _REV object is in the process of being deprecated, because * other ACPI implementations permanently return 2. Thus, it * has little or no value. Return 2 for compatibility with * other ACPI implementations. */ {"_REV", ACPI_TYPE_INTEGER, ACPI_CAST_PTR(char, 2)}, {"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME}, {"_GL_", ACPI_TYPE_MUTEX, ACPI_CAST_PTR(char, 1)}, {"_OSI", ACPI_TYPE_METHOD, ACPI_CAST_PTR(char, 1)}, /* Table terminator */ {NULL, ACPI_TYPE_ANY, NULL} }; #if (!ACPI_REDUCED_HARDWARE) /****************************************************************************** * * Event and Hardware globals * ******************************************************************************/ struct acpi_bit_register_info acpi_gbl_bit_register_info[ACPI_NUM_BITREG] = { /* Name Parent Register Register Bit Position Register Bit Mask */ /* ACPI_BITREG_TIMER_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_TIMER_STATUS, ACPI_BITMASK_TIMER_STATUS}, /* ACPI_BITREG_BUS_MASTER_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_BUS_MASTER_STATUS, ACPI_BITMASK_BUS_MASTER_STATUS}, /* ACPI_BITREG_GLOBAL_LOCK_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_GLOBAL_LOCK_STATUS, ACPI_BITMASK_GLOBAL_LOCK_STATUS}, /* ACPI_BITREG_POWER_BUTTON_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_POWER_BUTTON_STATUS, ACPI_BITMASK_POWER_BUTTON_STATUS}, /* ACPI_BITREG_SLEEP_BUTTON_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_SLEEP_BUTTON_STATUS, ACPI_BITMASK_SLEEP_BUTTON_STATUS}, /* ACPI_BITREG_RT_CLOCK_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_RT_CLOCK_STATUS, ACPI_BITMASK_RT_CLOCK_STATUS}, /* ACPI_BITREG_WAKE_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_WAKE_STATUS, ACPI_BITMASK_WAKE_STATUS}, /* ACPI_BITREG_PCIEXP_WAKE_STATUS */ {ACPI_REGISTER_PM1_STATUS, ACPI_BITPOSITION_PCIEXP_WAKE_STATUS, ACPI_BITMASK_PCIEXP_WAKE_STATUS}, /* ACPI_BITREG_TIMER_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_TIMER_ENABLE, ACPI_BITMASK_TIMER_ENABLE}, /* ACPI_BITREG_GLOBAL_LOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_GLOBAL_LOCK_ENABLE, ACPI_BITMASK_GLOBAL_LOCK_ENABLE}, /* ACPI_BITREG_POWER_BUTTON_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_POWER_BUTTON_ENABLE, ACPI_BITMASK_POWER_BUTTON_ENABLE}, /* ACPI_BITREG_SLEEP_BUTTON_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_SLEEP_BUTTON_ENABLE, ACPI_BITMASK_SLEEP_BUTTON_ENABLE}, /* ACPI_BITREG_RT_CLOCK_ENABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_RT_CLOCK_ENABLE, ACPI_BITMASK_RT_CLOCK_ENABLE}, /* ACPI_BITREG_PCIEXP_WAKE_DISABLE */ {ACPI_REGISTER_PM1_ENABLE, ACPI_BITPOSITION_PCIEXP_WAKE_DISABLE, ACPI_BITMASK_PCIEXP_WAKE_DISABLE}, /* ACPI_BITREG_SCI_ENABLE */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SCI_ENABLE, ACPI_BITMASK_SCI_ENABLE}, /* ACPI_BITREG_BUS_MASTER_RLD */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_BUS_MASTER_RLD, ACPI_BITMASK_BUS_MASTER_RLD}, /* ACPI_BITREG_GLOBAL_LOCK_RELEASE */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_GLOBAL_LOCK_RELEASE, ACPI_BITMASK_GLOBAL_LOCK_RELEASE}, /* ACPI_BITREG_SLEEP_TYPE */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SLEEP_TYPE, ACPI_BITMASK_SLEEP_TYPE}, /* ACPI_BITREG_SLEEP_ENABLE */ {ACPI_REGISTER_PM1_CONTROL, ACPI_BITPOSITION_SLEEP_ENABLE, ACPI_BITMASK_SLEEP_ENABLE}, /* ACPI_BITREG_ARB_DIS */ {ACPI_REGISTER_PM2_CONTROL, ACPI_BITPOSITION_ARB_DISABLE, ACPI_BITMASK_ARB_DISABLE} }; struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] = { /* ACPI_EVENT_PMTIMER */ {ACPI_BITREG_TIMER_STATUS, ACPI_BITREG_TIMER_ENABLE, ACPI_BITMASK_TIMER_STATUS, ACPI_BITMASK_TIMER_ENABLE}, /* ACPI_EVENT_GLOBAL */ {ACPI_BITREG_GLOBAL_LOCK_STATUS, ACPI_BITREG_GLOBAL_LOCK_ENABLE, ACPI_BITMASK_GLOBAL_LOCK_STATUS, ACPI_BITMASK_GLOBAL_LOCK_ENABLE}, /* ACPI_EVENT_POWER_BUTTON */ {ACPI_BITREG_POWER_BUTTON_STATUS, ACPI_BITREG_POWER_BUTTON_ENABLE, ACPI_BITMASK_POWER_BUTTON_STATUS, ACPI_BITMASK_POWER_BUTTON_ENABLE}, /* ACPI_EVENT_SLEEP_BUTTON */ {ACPI_BITREG_SLEEP_BUTTON_STATUS, ACPI_BITREG_SLEEP_BUTTON_ENABLE, ACPI_BITMASK_SLEEP_BUTTON_STATUS, ACPI_BITMASK_SLEEP_BUTTON_ENABLE}, /* ACPI_EVENT_RTC */ {ACPI_BITREG_RT_CLOCK_STATUS, ACPI_BITREG_RT_CLOCK_ENABLE, ACPI_BITMASK_RT_CLOCK_STATUS, ACPI_BITMASK_RT_CLOCK_ENABLE}, }; #endif /* !ACPI_REDUCED_HARDWARE */ #if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER) /* to_pld macro: compile/disassemble strings */ const char *acpi_gbl_pld_panel_list[] = { "TOP", "BOTTOM", "LEFT", "RIGHT", "FRONT", "BACK", "UNKNOWN", NULL }; const char *acpi_gbl_pld_vertical_position_list[] = { "UPPER", "CENTER", "LOWER", NULL }; const char *acpi_gbl_pld_horizontal_position_list[] = { "LEFT", "CENTER", "RIGHT", NULL }; const char *acpi_gbl_pld_shape_list[] = { "ROUND", "OVAL", "SQUARE", "VERTICALRECTANGLE", "HORIZONTALRECTANGLE", "VERTICALTRAPEZOID", "HORIZONTALTRAPEZOID", "UNKNOWN", "CHAMFERED", NULL }; #endif /* Public globals */ ACPI_EXPORT_SYMBOL(acpi_gbl_FADT) ACPI_EXPORT_SYMBOL(acpi_dbg_level) ACPI_EXPORT_SYMBOL(acpi_dbg_layer) ACPI_EXPORT_SYMBOL(acpi_gpe_count) ACPI_EXPORT_SYMBOL(acpi_current_gpe_count)
linux-master
drivers/acpi/acpica/utglobal.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: nsrepair - Repair for objects returned by predefined methods * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acinterp.h" #include "acpredef.h" #include "amlresrc.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsrepair") /******************************************************************************* * * This module attempts to repair or convert objects returned by the * predefined methods to an object type that is expected, as per the ACPI * specification. The need for this code is dictated by the many machines that * return incorrect types for the standard predefined methods. Performing these * conversions here, in one place, eliminates the need for individual ACPI * device drivers to do the same. Note: Most of these conversions are different * than the internal object conversion routines used for implicit object * conversion. * * The following conversions can be performed as necessary: * * Integer -> String * Integer -> Buffer * String -> Integer * String -> Buffer * Buffer -> Integer * Buffer -> String * Buffer -> Package of Integers * Package -> Package of one Package * * Additional conversions that are available: * Convert a null return or zero return value to an end_tag descriptor * Convert an ASCII string to a Unicode buffer * * An incorrect standalone object is wrapped with required outer package * * Additional possible repairs: * Required package elements that are NULL replaced by Integer/String/Buffer * ******************************************************************************/ /* Local prototypes */ static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct acpi_namespace_node *node, u32 return_btype, u32 package_index); /* * Special but simple repairs for some names. * * 2nd argument: Unexpected types that can be repaired */ static const struct acpi_simple_repair_info acpi_object_repair_info[] = { /* Resource descriptor conversions */ {"_CRS", ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER | ACPI_RTYPE_NONE, ACPI_NOT_PACKAGE_ELEMENT, acpi_ns_convert_to_resource}, {"_DMA", ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER | ACPI_RTYPE_NONE, ACPI_NOT_PACKAGE_ELEMENT, acpi_ns_convert_to_resource}, {"_PRS", ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER | ACPI_RTYPE_NONE, ACPI_NOT_PACKAGE_ELEMENT, acpi_ns_convert_to_resource}, /* Object reference conversions */ {"_DEP", ACPI_RTYPE_STRING, ACPI_ALL_PACKAGE_ELEMENTS, acpi_ns_convert_to_reference}, /* Unicode conversions */ {"_MLS", ACPI_RTYPE_STRING, 1, acpi_ns_convert_to_unicode}, {"_STR", ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER, ACPI_NOT_PACKAGE_ELEMENT, acpi_ns_convert_to_unicode}, {{0, 0, 0, 0}, 0, 0, NULL} /* Table terminator */ }; /******************************************************************************* * * FUNCTION: acpi_ns_simple_repair * * PARAMETERS: info - Method execution information block * expected_btypes - Object types expected * package_index - Index of object within parent package (if * applicable - ACPI_NOT_PACKAGE_ELEMENT * otherwise) * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if repair was successful. * * DESCRIPTION: Attempt to repair/convert a return object of a type that was * not expected. * ******************************************************************************/ acpi_status acpi_ns_simple_repair(struct acpi_evaluate_info *info, u32 expected_btypes, u32 package_index, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; union acpi_operand_object *new_object = NULL; acpi_status status; const struct acpi_simple_repair_info *predefined; ACPI_FUNCTION_NAME(ns_simple_repair); /* * Special repairs for certain names that are in the repair table. * Check if this name is in the list of repairable names. */ predefined = acpi_ns_match_simple_repair(info->node, info->return_btype, package_index); if (predefined) { if (!return_object) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, "Missing expected return value")); } status = predefined->object_converter(info->node, return_object, &new_object); if (ACPI_FAILURE(status)) { /* A fatal error occurred during a conversion */ ACPI_EXCEPTION((AE_INFO, status, "During return object analysis")); return (status); } if (new_object) { goto object_repaired; } } /* * Do not perform simple object repair unless the return type is not * expected. */ if (info->return_btype & expected_btypes) { return (AE_OK); } /* * At this point, we know that the type of the returned object was not * one of the expected types for this predefined name. Attempt to * repair the object by converting it to one of the expected object * types for this predefined name. */ /* * If there is no return value, check if we require a return value for * this predefined name. Either one return value is expected, or none, * for both methods and other objects. * * Try to fix if there was no return object. Warning if failed to fix. */ if (!return_object) { if (expected_btypes) { if (!(expected_btypes & ACPI_RTYPE_NONE) && package_index != ACPI_NOT_PACKAGE_ELEMENT) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, "Found unexpected NULL package element")); status = acpi_ns_repair_null_element(info, expected_btypes, package_index, return_object_ptr); if (ACPI_SUCCESS(status)) { return (AE_OK); /* Repair was successful */ } } if (expected_btypes != ACPI_RTYPE_NONE) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, ACPI_WARN_ALWAYS, "Missing expected return value")); return (AE_AML_NO_RETURN_VALUE); } } } if (expected_btypes & ACPI_RTYPE_INTEGER) { status = acpi_ns_convert_to_integer(return_object, &new_object); if (ACPI_SUCCESS(status)) { goto object_repaired; } } if (expected_btypes & ACPI_RTYPE_STRING) { status = acpi_ns_convert_to_string(return_object, &new_object); if (ACPI_SUCCESS(status)) { goto object_repaired; } } if (expected_btypes & ACPI_RTYPE_BUFFER) { status = acpi_ns_convert_to_buffer(return_object, &new_object); if (ACPI_SUCCESS(status)) { goto object_repaired; } } if (expected_btypes & ACPI_RTYPE_PACKAGE) { /* * A package is expected. We will wrap the existing object with a * new package object. It is often the case that if a variable-length * package is required, but there is only a single object needed, the * BIOS will return that object instead of wrapping it with a Package * object. Note: after the wrapping, the package will be validated * for correct contents (expected object type or types). */ status = acpi_ns_wrap_with_package(info, return_object, &new_object); if (ACPI_SUCCESS(status)) { /* * The original object just had its reference count * incremented for being inserted into the new package. */ *return_object_ptr = new_object; /* New Package object */ info->return_flags |= ACPI_OBJECT_REPAIRED; return (AE_OK); } } /* We cannot repair this object */ return (AE_AML_OPERAND_TYPE); object_repaired: /* Object was successfully repaired */ if (package_index != ACPI_NOT_PACKAGE_ELEMENT) { /* Update reference count of new object */ if (!(info->return_flags & ACPI_OBJECT_WRAPPED)) { new_object->common.reference_count = return_object->common.reference_count; } ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Converted %s to expected %s at Package index %u\n", info->full_pathname, acpi_ut_get_object_type_name(return_object), acpi_ut_get_object_type_name(new_object), package_index)); } else { ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Converted %s to expected %s\n", info->full_pathname, acpi_ut_get_object_type_name(return_object), acpi_ut_get_object_type_name(new_object))); } /* Delete old object, install the new return object */ acpi_ut_remove_reference(return_object); *return_object_ptr = new_object; info->return_flags |= ACPI_OBJECT_REPAIRED; return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ns_match_simple_repair * * PARAMETERS: node - Namespace node for the method/object * return_btype - Object type that was returned * package_index - Index of object within parent package (if * applicable - ACPI_NOT_PACKAGE_ELEMENT * otherwise) * * RETURN: Pointer to entry in repair table. NULL indicates not found. * * DESCRIPTION: Check an object name against the repairable object list. * *****************************************************************************/ static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct acpi_namespace_node *node, u32 return_btype, u32 package_index) { const struct acpi_simple_repair_info *this_name; /* Search info table for a repairable predefined method/object name */ this_name = acpi_object_repair_info; while (this_name->object_converter) { if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) { /* Check if we can actually repair this name/type combination */ if ((return_btype & this_name->unexpected_btypes) && (this_name->package_index == ACPI_ALL_PACKAGE_ELEMENTS || package_index == this_name->package_index)) { return (this_name); } return (NULL); } this_name++; } return (NULL); /* Name was not found in the repair table */ } /******************************************************************************* * * FUNCTION: acpi_ns_repair_null_element * * PARAMETERS: info - Method execution information block * expected_btypes - Object types expected * package_index - Index of object within parent package (if * applicable - ACPI_NOT_PACKAGE_ELEMENT * otherwise) * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status. AE_OK if repair was successful. * * DESCRIPTION: Attempt to repair a NULL element of a returned Package object. * ******************************************************************************/ acpi_status acpi_ns_repair_null_element(struct acpi_evaluate_info *info, u32 expected_btypes, u32 package_index, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; union acpi_operand_object *new_object; ACPI_FUNCTION_NAME(ns_repair_null_element); /* No repair needed if return object is non-NULL */ if (return_object) { return (AE_OK); } /* * Attempt to repair a NULL element of a Package object. This applies to * predefined names that return a fixed-length package and each element * is required. It does not apply to variable-length packages where NULL * elements are allowed, especially at the end of the package. */ if (expected_btypes & ACPI_RTYPE_INTEGER) { /* Need an integer - create a zero-value integer */ new_object = acpi_ut_create_integer_object((u64)0); } else if (expected_btypes & ACPI_RTYPE_STRING) { /* Need a string - create a NULL string */ new_object = acpi_ut_create_string_object(0); } else if (expected_btypes & ACPI_RTYPE_BUFFER) { /* Need a buffer - create a zero-length buffer */ new_object = acpi_ut_create_buffer_object(0); } else { /* Error for all other expected types */ return (AE_AML_OPERAND_TYPE); } if (!new_object) { return (AE_NO_MEMORY); } /* Set the reference count according to the parent Package object */ new_object->common.reference_count = info->parent_package->common.reference_count; ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Converted NULL package element to expected %s at index %u\n", info->full_pathname, acpi_ut_get_object_type_name(new_object), package_index)); *return_object_ptr = new_object; info->return_flags |= ACPI_OBJECT_REPAIRED; return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ns_remove_null_elements * * PARAMETERS: info - Method execution information block * package_type - An acpi_return_package_types value * obj_desc - A Package object * * RETURN: None. * * DESCRIPTION: Remove all NULL package elements from packages that contain * a variable number of subpackages. For these types of * packages, NULL elements can be safely removed. * *****************************************************************************/ void acpi_ns_remove_null_elements(struct acpi_evaluate_info *info, u8 package_type, union acpi_operand_object *obj_desc) { union acpi_operand_object **source; union acpi_operand_object **dest; u32 count; u32 new_count; u32 i; ACPI_FUNCTION_NAME(ns_remove_null_elements); /* * We can safely remove all NULL elements from these package types: * PTYPE1_VAR packages contain a variable number of simple data types. * PTYPE2 packages contain a variable number of subpackages. */ switch (package_type) { case ACPI_PTYPE1_VAR: case ACPI_PTYPE2: case ACPI_PTYPE2_COUNT: case ACPI_PTYPE2_PKG_COUNT: case ACPI_PTYPE2_FIXED: case ACPI_PTYPE2_MIN: case ACPI_PTYPE2_REV_FIXED: case ACPI_PTYPE2_FIX_VAR: break; default: case ACPI_PTYPE2_VAR_VAR: case ACPI_PTYPE1_FIXED: case ACPI_PTYPE1_OPTION: return; } count = obj_desc->package.count; new_count = count; source = obj_desc->package.elements; dest = source; /* Examine all elements of the package object, remove nulls */ for (i = 0; i < count; i++) { if (!*source) { new_count--; } else { *dest = *source; dest++; } source++; } /* Update parent package if any null elements were removed */ if (new_count < count) { ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Found and removed %u NULL elements\n", info->full_pathname, (count - new_count))); /* NULL terminate list and update the package count */ *dest = NULL; obj_desc->package.count = new_count; } } /******************************************************************************* * * FUNCTION: acpi_ns_wrap_with_package * * PARAMETERS: info - Method execution information block * original_object - Pointer to the object to repair. * obj_desc_ptr - The new package object is returned here * * RETURN: Status, new object in *obj_desc_ptr * * DESCRIPTION: Repair a common problem with objects that are defined to * return a variable-length Package of sub-objects. If there is * only one sub-object, some BIOS code mistakenly simply declares * the single object instead of a Package with one sub-object. * This function attempts to repair this error by wrapping a * Package object around the original object, creating the * correct and expected Package with one sub-object. * * Names that can be repaired in this manner include: * _ALR, _CSD, _HPX, _MLS, _PLD, _PRT, _PSS, _TRT, _TSS, * _BCL, _DOD, _FIX, _Sx * ******************************************************************************/ acpi_status acpi_ns_wrap_with_package(struct acpi_evaluate_info *info, union acpi_operand_object *original_object, union acpi_operand_object **obj_desc_ptr) { union acpi_operand_object *pkg_obj_desc; ACPI_FUNCTION_NAME(ns_wrap_with_package); /* * Create the new outer package and populate it. The new * package will have a single element, the lone sub-object. */ pkg_obj_desc = acpi_ut_create_package_object(1); if (!pkg_obj_desc) { return (AE_NO_MEMORY); } pkg_obj_desc->package.elements[0] = original_object; ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Wrapped %s with expected Package object\n", info->full_pathname, acpi_ut_get_object_type_name(original_object))); /* Return the new object in the object pointer */ *obj_desc_ptr = pkg_obj_desc; info->return_flags |= ACPI_OBJECT_REPAIRED | ACPI_OBJECT_WRAPPED; return (AE_OK); }
linux-master
drivers/acpi/acpica/nsrepair.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: evgpeutil - GPE utilities * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evgpeutil") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /******************************************************************************* * * FUNCTION: acpi_ev_walk_gpe_list * * PARAMETERS: gpe_walk_callback - Routine called for each GPE block * context - Value passed to callback * * RETURN: Status * * DESCRIPTION: Walk the GPE lists. * ******************************************************************************/ acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context) { struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_xrupt_info *gpe_xrupt_info; acpi_status status = AE_OK; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(ev_walk_gpe_list); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Walk the interrupt level descriptor list */ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; while (gpe_xrupt_info) { /* Walk all Gpe Blocks attached to this interrupt level */ gpe_block = gpe_xrupt_info->gpe_block_list_head; while (gpe_block) { /* One callback per GPE block */ status = gpe_walk_callback(gpe_xrupt_info, gpe_block, context); if (ACPI_FAILURE(status)) { if (status == AE_CTRL_END) { /* Callback abort */ status = AE_OK; } goto unlock_and_exit; } gpe_block = gpe_block->next; } gpe_xrupt_info = gpe_xrupt_info->next; } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_get_gpe_device * * PARAMETERS: GPE_WALK_CALLBACK * * RETURN: Status * * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE * block device. NULL if the GPE is one of the FADT-defined GPEs. * ******************************************************************************/ acpi_status acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context) { struct acpi_gpe_device_info *info = context; /* Increment Index by the number of GPEs in this block */ info->next_block_base_index += gpe_block->gpe_count; if (info->index < info->next_block_base_index) { /* * The GPE index is within this block, get the node. Leave the node * NULL for the FADT-defined GPEs */ if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { info->gpe_device = gpe_block->node; } info->status = AE_OK; return (AE_CTRL_END); } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_get_gpe_xrupt_block * * PARAMETERS: interrupt_number - Interrupt for a GPE block * gpe_xrupt_block - Where the block is returned * * RETURN: Status * * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt * block per unique interrupt level used for GPEs. Should be * called only when the GPE lists are semaphore locked and not * subject to change. * ******************************************************************************/ acpi_status acpi_ev_get_gpe_xrupt_block(u32 interrupt_number, struct acpi_gpe_xrupt_info **gpe_xrupt_block) { struct acpi_gpe_xrupt_info *next_gpe_xrupt; struct acpi_gpe_xrupt_info *gpe_xrupt; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block); /* No need for lock since we are not changing any list elements here */ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; while (next_gpe_xrupt) { if (next_gpe_xrupt->interrupt_number == interrupt_number) { *gpe_xrupt_block = next_gpe_xrupt; return_ACPI_STATUS(AE_OK); } next_gpe_xrupt = next_gpe_xrupt->next; } /* Not found, must allocate a new xrupt descriptor */ gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info)); if (!gpe_xrupt) { return_ACPI_STATUS(AE_NO_MEMORY); } gpe_xrupt->interrupt_number = interrupt_number; /* Install new interrupt descriptor with spin lock */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); if (acpi_gbl_gpe_xrupt_list_head) { next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head; while (next_gpe_xrupt->next) { next_gpe_xrupt = next_gpe_xrupt->next; } next_gpe_xrupt->next = gpe_xrupt; gpe_xrupt->previous = next_gpe_xrupt; } else { acpi_gbl_gpe_xrupt_list_head = gpe_xrupt; } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); /* Install new interrupt handler if not SCI_INT */ if (interrupt_number != acpi_gbl_FADT.sci_interrupt) { status = acpi_os_install_interrupt_handler(interrupt_number, acpi_ev_gpe_xrupt_handler, gpe_xrupt); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not install GPE interrupt handler at level 0x%X", interrupt_number)); return_ACPI_STATUS(status); } } *gpe_xrupt_block = gpe_xrupt; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_delete_gpe_xrupt * * PARAMETERS: gpe_xrupt - A GPE interrupt info block * * RETURN: Status * * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated * interrupt handler if not the SCI interrupt. * ******************************************************************************/ acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt) { acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt); /* We never want to remove the SCI interrupt handler */ if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) { gpe_xrupt->gpe_block_list_head = NULL; return_ACPI_STATUS(AE_OK); } /* Disable this interrupt */ status = acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number, acpi_ev_gpe_xrupt_handler); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Unlink the interrupt block with lock */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); if (gpe_xrupt->previous) { gpe_xrupt->previous->next = gpe_xrupt->next; } else { /* No previous, update list head */ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next; } if (gpe_xrupt->next) { gpe_xrupt->next->previous = gpe_xrupt->previous; } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); /* Free the block */ ACPI_FREE(gpe_xrupt); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_delete_gpe_handlers * * PARAMETERS: gpe_xrupt_info - GPE Interrupt info * gpe_block - Gpe Block info * * RETURN: Status * * DESCRIPTION: Delete all Handler objects found in the GPE data structs. * Used only prior to termination. * ******************************************************************************/ acpi_status acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context) { struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_notify_info *notify; struct acpi_gpe_notify_info *next; u32 i; u32 j; ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers); /* Examine each GPE Register within the block */ for (i = 0; i < gpe_block->register_count; i++) { /* Now look at the individual GPEs in this byte register */ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { gpe_event_info = &gpe_block->event_info[((acpi_size)i * ACPI_GPE_REGISTER_WIDTH) + j]; if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == ACPI_GPE_DISPATCH_HANDLER) || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == ACPI_GPE_DISPATCH_RAW_HANDLER)) { /* Delete an installed handler block */ ACPI_FREE(gpe_event_info->dispatch.handler); gpe_event_info->dispatch.handler = NULL; gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; } else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == ACPI_GPE_DISPATCH_NOTIFY) { /* Delete the implicit notification device list */ notify = gpe_event_info->dispatch.notify_list; while (notify) { next = notify->next; ACPI_FREE(notify); notify = next; } gpe_event_info->dispatch.notify_list = NULL; gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; } } } return_ACPI_STATUS(AE_OK); } #endif /* !ACPI_REDUCED_HARDWARE */
linux-master
drivers/acpi/acpica/evgpeutil.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: uttrack - Memory allocation tracking routines (debug only) * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ /* * These procedures are used for tracking memory leaks in the subsystem, and * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set. * * Each memory allocation is tracked via a doubly linked list. Each * element contains the caller's component, module name, function name, and * line number. acpi_ut_allocate and acpi_ut_allocate_zeroed call * acpi_ut_track_allocation to add an element to the list; deletion * occurs in the body of acpi_ut_free. */ #include <acpi/acpi.h> #include "accommon.h" #ifdef ACPI_DBG_TRACK_ALLOCATIONS #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("uttrack") /* Local prototypes */ static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct acpi_debug_mem_block *allocation); static acpi_status acpi_ut_track_allocation(struct acpi_debug_mem_block *address, acpi_size size, u8 alloc_type, u32 component, const char *module, u32 line); static acpi_status acpi_ut_remove_allocation(struct acpi_debug_mem_block *address, u32 component, const char *module, u32 line); /******************************************************************************* * * FUNCTION: acpi_ut_create_list * * PARAMETERS: cache_name - Ascii name for the cache * object_size - Size of each cached object * return_cache - Where the new cache object is returned * * RETURN: Status * * DESCRIPTION: Create a local memory list for tracking purposed * ******************************************************************************/ acpi_status acpi_ut_create_list(const char *list_name, u16 object_size, struct acpi_memory_list **return_cache) { struct acpi_memory_list *cache; cache = acpi_os_allocate_zeroed(sizeof(struct acpi_memory_list)); if (!cache) { return (AE_NO_MEMORY); } cache->list_name = list_name; cache->object_size = object_size; *return_cache = cache; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_allocate_and_track * * PARAMETERS: size - Size of the allocation * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: Address of the allocated memory on success, NULL on failure. * * DESCRIPTION: The subsystem's equivalent of malloc. * ******************************************************************************/ void *acpi_ut_allocate_and_track(acpi_size size, u32 component, const char *module, u32 line) { struct acpi_debug_mem_block *allocation; acpi_status status; /* Check for an inadvertent size of zero bytes */ if (!size) { ACPI_WARNING((module, line, "Attempt to allocate zero bytes, allocating 1 byte")); size = 1; } allocation = acpi_os_allocate(size + sizeof(struct acpi_debug_mem_header)); if (!allocation) { /* Report allocation error */ ACPI_WARNING((module, line, "Could not allocate size %u", (u32)size)); return (NULL); } status = acpi_ut_track_allocation(allocation, size, ACPI_MEM_MALLOC, component, module, line); if (ACPI_FAILURE(status)) { acpi_os_free(allocation); return (NULL); } acpi_gbl_global_list->total_allocated++; acpi_gbl_global_list->total_size += (u32)size; acpi_gbl_global_list->current_total_size += (u32)size; if (acpi_gbl_global_list->current_total_size > acpi_gbl_global_list->max_occupied) { acpi_gbl_global_list->max_occupied = acpi_gbl_global_list->current_total_size; } return ((void *)&allocation->user_space); } /******************************************************************************* * * FUNCTION: acpi_ut_allocate_zeroed_and_track * * PARAMETERS: size - Size of the allocation * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: Address of the allocated memory on success, NULL on failure. * * DESCRIPTION: Subsystem equivalent of calloc. * ******************************************************************************/ void *acpi_ut_allocate_zeroed_and_track(acpi_size size, u32 component, const char *module, u32 line) { struct acpi_debug_mem_block *allocation; acpi_status status; /* Check for an inadvertent size of zero bytes */ if (!size) { ACPI_WARNING((module, line, "Attempt to allocate zero bytes, allocating 1 byte")); size = 1; } allocation = acpi_os_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header)); if (!allocation) { /* Report allocation error */ ACPI_ERROR((module, line, "Could not allocate size %u", (u32)size)); return (NULL); } status = acpi_ut_track_allocation(allocation, size, ACPI_MEM_CALLOC, component, module, line); if (ACPI_FAILURE(status)) { acpi_os_free(allocation); return (NULL); } acpi_gbl_global_list->total_allocated++; acpi_gbl_global_list->total_size += (u32)size; acpi_gbl_global_list->current_total_size += (u32)size; if (acpi_gbl_global_list->current_total_size > acpi_gbl_global_list->max_occupied) { acpi_gbl_global_list->max_occupied = acpi_gbl_global_list->current_total_size; } return ((void *)&allocation->user_space); } /******************************************************************************* * * FUNCTION: acpi_ut_free_and_track * * PARAMETERS: allocation - Address of the memory to deallocate * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: None * * DESCRIPTION: Frees the memory at Allocation * ******************************************************************************/ void acpi_ut_free_and_track(void *allocation, u32 component, const char *module, u32 line) { struct acpi_debug_mem_block *debug_block; acpi_status status; ACPI_FUNCTION_TRACE_PTR(ut_free, allocation); if (NULL == allocation) { ACPI_ERROR((module, line, "Attempt to delete a NULL address")); return_VOID; } debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block, (((char *)allocation) - sizeof(struct acpi_debug_mem_header))); acpi_gbl_global_list->total_freed++; acpi_gbl_global_list->current_total_size -= debug_block->size; status = acpi_ut_remove_allocation(debug_block, component, module, line); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not free memory")); } acpi_os_free(debug_block); ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed (block %p)\n", allocation, debug_block)); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_find_allocation * * PARAMETERS: allocation - Address of allocated memory * * RETURN: Three cases: * 1) List is empty, NULL is returned. * 2) Element was found. Returns Allocation parameter. * 3) Element was not found. Returns position where it should be * inserted into the list. * * DESCRIPTION: Searches for an element in the global allocation tracking list. * If the element is not found, returns the location within the * list where the element should be inserted. * * Note: The list is ordered by larger-to-smaller addresses. * * This global list is used to detect memory leaks in ACPICA as * well as other issues such as an attempt to release the same * internal object more than once. Although expensive as far * as cpu time, this list is much more helpful for finding these * types of issues than using memory leak detectors outside of * the ACPICA code. * ******************************************************************************/ static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct acpi_debug_mem_block *allocation) { struct acpi_debug_mem_block *element; element = acpi_gbl_global_list->list_head; if (!element) { return (NULL); } /* * Search for the address. * * Note: List is ordered by larger-to-smaller addresses, on the * assumption that a new allocation usually has a larger address * than previous allocations. */ while (element > allocation) { /* Check for end-of-list */ if (!element->next) { return (element); } element = element->next; } if (element == allocation) { return (element); } return (element->previous); } /******************************************************************************* * * FUNCTION: acpi_ut_track_allocation * * PARAMETERS: allocation - Address of allocated memory * size - Size of the allocation * alloc_type - MEM_MALLOC or MEM_CALLOC * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: Status * * DESCRIPTION: Inserts an element into the global allocation tracking list. * ******************************************************************************/ static acpi_status acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation, acpi_size size, u8 alloc_type, u32 component, const char *module, u32 line) { struct acpi_memory_list *mem_list; struct acpi_debug_mem_block *element; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_PTR(ut_track_allocation, allocation); if (acpi_gbl_disable_mem_tracking) { return_ACPI_STATUS(AE_OK); } mem_list = acpi_gbl_global_list; status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Search the global list for this address to make sure it is not * already present. This will catch several kinds of problems. */ element = acpi_ut_find_allocation(allocation); if (element == allocation) { ACPI_ERROR((AE_INFO, "UtTrackAllocation: Allocation (%p) already present in global list!", allocation)); goto unlock_and_exit; } /* Fill in the instance data */ allocation->size = (u32)size; allocation->alloc_type = alloc_type; allocation->component = component; allocation->line = line; acpi_ut_safe_strncpy(allocation->module, (char *)module, ACPI_MAX_MODULE_NAME); if (!element) { /* Insert at list head */ if (mem_list->list_head) { ((struct acpi_debug_mem_block *)(mem_list->list_head))-> previous = allocation; } allocation->next = mem_list->list_head; allocation->previous = NULL; mem_list->list_head = allocation; } else { /* Insert after element */ allocation->next = element->next; allocation->previous = element; if (element->next) { (element->next)->previous = allocation; } element->next = allocation; } unlock_and_exit: status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_remove_allocation * * PARAMETERS: allocation - Address of allocated memory * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: Status * * DESCRIPTION: Deletes an element from the global allocation tracking list. * ******************************************************************************/ static acpi_status acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation, u32 component, const char *module, u32 line) { struct acpi_memory_list *mem_list; acpi_status status; ACPI_FUNCTION_NAME(ut_remove_allocation); if (acpi_gbl_disable_mem_tracking) { return (AE_OK); } mem_list = acpi_gbl_global_list; if (NULL == mem_list->list_head) { /* No allocations! */ ACPI_ERROR((module, line, "Empty allocation list, nothing to free!")); return (AE_OK); } status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); if (ACPI_FAILURE(status)) { return (status); } /* Unlink */ if (allocation->previous) { (allocation->previous)->next = allocation->next; } else { mem_list->list_head = allocation->next; } if (allocation->next) { (allocation->next)->previous = allocation->previous; } ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing %p, size 0%X\n", &allocation->user_space, allocation->size)); /* Mark the segment as deleted */ memset(&allocation->user_space, 0xEA, allocation->size); status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_dump_allocation_info * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Print some info about the outstanding allocations. * ******************************************************************************/ void acpi_ut_dump_allocation_info(void) { /* struct acpi_memory_list *mem_list; */ ACPI_FUNCTION_TRACE(ut_dump_allocation_info); /* ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Current allocations", mem_list->current_count, ROUND_UP_TO_1K (mem_list->current_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations", mem_list->max_concurrent_count, ROUND_UP_TO_1K (mem_list->max_concurrent_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects", running_object_count, ROUND_UP_TO_1K (running_object_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Total (all) allocations", running_alloc_count, ROUND_UP_TO_1K (running_alloc_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Current Nodes", acpi_gbl_current_node_count, ROUND_UP_TO_1K (acpi_gbl_current_node_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Max Nodes", acpi_gbl_max_concurrent_node_count, ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count * sizeof (struct acpi_namespace_node))))); */ return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_dump_allocations * * PARAMETERS: component - Component(s) to dump info for. * module - Module to dump info for. NULL means all. * * RETURN: None * * DESCRIPTION: Print a list of all outstanding allocations. * ******************************************************************************/ void acpi_ut_dump_allocations(u32 component, const char *module) { struct acpi_debug_mem_block *element; union acpi_descriptor *descriptor; u32 num_outstanding = 0; u8 descriptor_type; ACPI_FUNCTION_TRACE(ut_dump_allocations); if (acpi_gbl_disable_mem_tracking) { return_VOID; } /* * Walk the allocation list. */ if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) { return_VOID; } if (!acpi_gbl_global_list) { goto exit; } element = acpi_gbl_global_list->list_head; while (element) { if ((element->component & component) && ((module == NULL) || (0 == strcmp(module, element->module)))) { descriptor = ACPI_CAST_PTR(union acpi_descriptor, &element->user_space); if (element->size < sizeof(struct acpi_common_descriptor)) { acpi_os_printf("%p Length 0x%04X %9.9s-%4.4u " "[Not a Descriptor - too small]\n", descriptor, element->size, element->module, element->line); } else { /* Ignore allocated objects that are in a cache */ if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) != ACPI_DESC_TYPE_CACHED) { acpi_os_printf ("%p Length 0x%04X %9.9s-%4.4u [%s] ", descriptor, element->size, element->module, element->line, acpi_ut_get_descriptor_name (descriptor)); /* Optional object hex dump */ if (acpi_gbl_verbose_leak_dump) { acpi_os_printf("\n"); acpi_ut_dump_buffer((u8 *) descriptor, element-> size, DB_BYTE_DISPLAY, 0); } /* Validate the descriptor type using Type field and length */ descriptor_type = 0; /* Not a valid descriptor type */ switch (ACPI_GET_DESCRIPTOR_TYPE (descriptor)) { case ACPI_DESC_TYPE_OPERAND: if (element->size == sizeof(union acpi_operand_object)) { descriptor_type = ACPI_DESC_TYPE_OPERAND; } break; case ACPI_DESC_TYPE_PARSER: if (element->size == sizeof(union acpi_parse_object)) { descriptor_type = ACPI_DESC_TYPE_PARSER; } break; case ACPI_DESC_TYPE_NAMED: if (element->size == sizeof(struct acpi_namespace_node)) { descriptor_type = ACPI_DESC_TYPE_NAMED; } break; default: break; } /* Display additional info for the major descriptor types */ switch (descriptor_type) { case ACPI_DESC_TYPE_OPERAND: acpi_os_printf ("%12.12s RefCount 0x%04X\n", acpi_ut_get_type_name (descriptor->object.common. type), descriptor->object.common. reference_count); break; case ACPI_DESC_TYPE_PARSER: acpi_os_printf ("AmlOpcode 0x%04X\n", descriptor->op.asl. aml_opcode); break; case ACPI_DESC_TYPE_NAMED: acpi_os_printf("%4.4s\n", acpi_ut_get_node_name (&descriptor-> node)); break; default: acpi_os_printf("\n"); break; } } } num_outstanding++; } element = element->next; } exit: (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY); /* Print summary */ if (!num_outstanding) { ACPI_INFO(("No outstanding allocations")); } else { ACPI_ERROR((AE_INFO, "%u (0x%X) Outstanding cache allocations", num_outstanding, num_outstanding)); } return_VOID; } #endif /* ACPI_DBG_TRACK_ALLOCATIONS */
linux-master
drivers/acpi/acpica/uttrack.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exoparg6 - AML execution - opcodes with 6 arguments * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "acparser.h" #include "amlcode.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exoparg6") /*! * Naming convention for AML interpreter execution routines. * * The routines that begin execution of AML opcodes are named with a common * convention based upon the number of arguments, the number of target operands, * and whether or not a value is returned: * * AcpiExOpcode_xA_yT_zR * * Where: * * xA - ARGUMENTS: The number of arguments (input operands) that are * required for this opcode type (1 through 6 args). * yT - TARGETS: The number of targets (output operands) that are required * for this opcode type (0, 1, or 2 targets). * zR - RETURN VALUE: Indicates whether this opcode type returns a value * as the function return (0 or 1). * * The AcpiExOpcode* functions are called via the Dispatcher component with * fully resolved operands. !*/ /* Local prototypes */ static u8 acpi_ex_do_match(u32 match_op, union acpi_operand_object *package_obj, union acpi_operand_object *match_obj); /******************************************************************************* * * FUNCTION: acpi_ex_do_match * * PARAMETERS: match_op - The AML match operand * package_obj - Object from the target package * match_obj - Object to be matched * * RETURN: TRUE if the match is successful, FALSE otherwise * * DESCRIPTION: Implements the low-level match for the ASL Match operator. * Package elements will be implicitly converted to the type of * the match object (Integer/Buffer/String). * ******************************************************************************/ static u8 acpi_ex_do_match(u32 match_op, union acpi_operand_object *package_obj, union acpi_operand_object *match_obj) { u8 logical_result = TRUE; acpi_status status; /* * Note: Since the package_obj/match_obj ordering is opposite to that of * the standard logical operators, we have to reverse them when we call * do_logical_op in order to make the implicit conversion rules work * correctly. However, this means we have to flip the entire equation * also. A bit ugly perhaps, but overall, better than fussing the * parameters around at runtime, over and over again. * * Below, P[i] refers to the package element, M refers to the Match object. */ switch (match_op) { case MATCH_MTR: /* Always true */ break; case MATCH_MEQ: /* * True if equal: (P[i] == M) * Change to: (M == P[i]) */ status = acpi_ex_do_logical_op(AML_LOGICAL_EQUAL_OP, match_obj, package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); } break; case MATCH_MLE: /* * True if less than or equal: (P[i] <= M) (P[i] not_greater than M) * Change to: (M >= P[i]) (M not_less than P[i]) */ status = acpi_ex_do_logical_op(AML_LOGICAL_LESS_OP, match_obj, package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); } logical_result = (u8) ! logical_result; break; case MATCH_MLT: /* * True if less than: (P[i] < M) * Change to: (M > P[i]) */ status = acpi_ex_do_logical_op(AML_LOGICAL_GREATER_OP, match_obj, package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); } break; case MATCH_MGE: /* * True if greater than or equal: (P[i] >= M) (P[i] not_less than M) * Change to: (M <= P[i]) (M not_greater than P[i]) */ status = acpi_ex_do_logical_op(AML_LOGICAL_GREATER_OP, match_obj, package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); } logical_result = (u8) ! logical_result; break; case MATCH_MGT: /* * True if greater than: (P[i] > M) * Change to: (M < P[i]) */ status = acpi_ex_do_logical_op(AML_LOGICAL_LESS_OP, match_obj, package_obj, &logical_result); if (ACPI_FAILURE(status)) { return (FALSE); } break; default: /* Undefined */ return (FALSE); } return (logical_result); } /******************************************************************************* * * FUNCTION: acpi_ex_opcode_6A_0T_1R * * PARAMETERS: walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Execute opcode with 6 arguments, no target, and a return value * ******************************************************************************/ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state *walk_state) { union acpi_operand_object **operand = &walk_state->operands[0]; union acpi_operand_object *return_desc = NULL; acpi_status status = AE_OK; u64 index; union acpi_operand_object *this_element; ACPI_FUNCTION_TRACE_STR(ex_opcode_6A_0T_1R, acpi_ps_get_opcode_name(walk_state->opcode)); switch (walk_state->opcode) { case AML_MATCH_OP: /* * Match (search_pkg[0], match_op1[1], match_obj1[2], * match_op2[3], match_obj2[4], start_index[5]) */ /* Validate both Match Term Operators (MTR, MEQ, etc.) */ if ((operand[1]->integer.value > MAX_MATCH_OPERATOR) || (operand[3]->integer.value > MAX_MATCH_OPERATOR)) { ACPI_ERROR((AE_INFO, "Match operator out of range")); status = AE_AML_OPERAND_VALUE; goto cleanup; } /* Get the package start_index, validate against the package length */ index = operand[5]->integer.value; if (index >= operand[0]->package.count) { ACPI_ERROR((AE_INFO, "Index (0x%8.8X%8.8X) beyond package end (0x%X)", ACPI_FORMAT_UINT64(index), operand[0]->package.count)); status = AE_AML_PACKAGE_LIMIT; goto cleanup; } /* Create an integer for the return value */ /* Default return value is ACPI_UINT64_MAX if no match found */ return_desc = acpi_ut_create_integer_object(ACPI_UINT64_MAX); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } /* * Examine each element until a match is found. Both match conditions * must be satisfied for a match to occur. Within the loop, * "continue" signifies that the current element does not match * and the next should be examined. * * Upon finding a match, the loop will terminate via "break" at * the bottom. If it terminates "normally", match_value will be * ACPI_UINT64_MAX (Ones) (its initial value) indicating that no * match was found. */ for (; index < operand[0]->package.count; index++) { /* Get the current package element */ this_element = operand[0]->package.elements[index]; /* Treat any uninitialized (NULL) elements as non-matching */ if (!this_element) { continue; } /* * Both match conditions must be satisfied. Execution of a continue * (proceed to next iteration of enclosing for loop) signifies a * non-match. */ if (!acpi_ex_do_match((u32) operand[1]->integer.value, this_element, operand[2])) { continue; } if (!acpi_ex_do_match((u32) operand[3]->integer.value, this_element, operand[4])) { continue; } /* Match found: Index is the return value */ return_desc->integer.value = index; break; } break; case AML_LOAD_TABLE_OP: status = acpi_ex_load_table_op(walk_state, &return_desc); break; default: ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; } cleanup: /* Delete return object on error */ if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(return_desc); } /* Save return object on success */ else { walk_state->result_obj = return_desc; } return_ACPI_STATUS(status); }
linux-master
drivers/acpi/acpica/exoparg6.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: rsmem24 - Memory resource descriptors * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rsmemory") /******************************************************************************* * * acpi_rs_convert_memory24 * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_memory24[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY24, ACPI_RS_SIZE(struct acpi_resource_memory24), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory24)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY24, sizeof(struct aml_resource_memory24), 0}, /* Read/Write bit */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory24.write_protect), AML_OFFSET(memory24.flags), 0}, /* * These fields are contiguous in both the source and destination: * Minimum Base Address * Maximum Base Address * Address Base Alignment * Range Length */ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.memory24.minimum), AML_OFFSET(memory24.minimum), 4} }; /******************************************************************************* * * acpi_rs_convert_memory32 * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_memory32[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY32, ACPI_RS_SIZE(struct acpi_resource_memory32), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory32)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY32, sizeof(struct aml_resource_memory32), 0}, /* Read/Write bit */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory32.write_protect), AML_OFFSET(memory32.flags), 0}, /* * These fields are contiguous in both the source and destination: * Minimum Base Address * Maximum Base Address * Address Base Alignment * Range Length */ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.memory32.minimum), AML_OFFSET(memory32.minimum), 4} }; /******************************************************************************* * * acpi_rs_convert_fixed_memory32 * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_fixed_memory32[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_MEMORY32, ACPI_RS_SIZE(struct acpi_resource_fixed_memory32), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_memory32)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_MEMORY32, sizeof(struct aml_resource_fixed_memory32), 0}, /* Read/Write bit */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.fixed_memory32.write_protect), AML_OFFSET(fixed_memory32.flags), 0}, /* * These fields are contiguous in both the source and destination: * Base Address * Range Length */ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.fixed_memory32.address), AML_OFFSET(fixed_memory32.address), 2} }; /******************************************************************************* * * acpi_rs_get_vendor_small * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_get_vendor_small[3] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR, ACPI_RS_SIZE(struct acpi_resource_vendor), ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_small)}, /* Length of the vendor data (byte count) */ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, sizeof(u8)}, /* Vendor data */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_small_header), 0} }; /******************************************************************************* * * acpi_rs_get_vendor_large * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_get_vendor_large[3] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR, ACPI_RS_SIZE(struct acpi_resource_vendor), ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_large)}, /* Length of the vendor data (byte count) */ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, sizeof(u8)}, /* Vendor data */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_large_header), 0} }; /******************************************************************************* * * acpi_rs_set_vendor * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_set_vendor[7] = { /* Default is a small vendor descriptor */ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_SMALL, sizeof(struct aml_resource_small_header), ACPI_RSC_TABLE_SIZE(acpi_rs_set_vendor)}, /* Get the length and copy the data */ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_small_header), 0}, /* * All done if the Vendor byte length is 7 or less, meaning that it will * fit within a small descriptor */ {ACPI_RSC_EXIT_LE, 0, 0, 7}, /* Must create a large vendor descriptor */ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_LARGE, sizeof(struct aml_resource_large_header), 0}, {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_large_header), 0} };
linux-master
drivers/acpi/acpica/rsmemory.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: utstrtoul64 - String-to-integer conversion support for both * 64-bit and 32-bit integers * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utstrtoul64") /******************************************************************************* * * This module contains the top-level string to 64/32-bit unsigned integer * conversion functions: * * 1) A standard strtoul() function that supports 64-bit integers, base * 8/10/16, with integer overflow support. This is used mainly by the * iASL compiler, which implements tighter constraints on integer * constants than the runtime (interpreter) integer-to-string conversions. * 2) Runtime "Explicit conversion" as defined in the ACPI specification. * 3) Runtime "Implicit conversion" as defined in the ACPI specification. * * Current users of this module: * * iASL - Preprocessor (constants and math expressions) * iASL - Main parser, conversion of constants to integers * iASL - Data Table Compiler parser (constants and math expressions) * interpreter - Implicit and explicit conversions, GPE method names * interpreter - Repair code for return values from predefined names * debugger - Command line input string conversion * acpi_dump - ACPI table physical addresses * acpi_exec - Support for namespace overrides * * Notes concerning users of these interfaces: * * acpi_gbl_integer_byte_width is used to set the 32/64 bit limit for explicit * and implicit conversions. This global must be set to the proper width. * For the core ACPICA code, the width depends on the DSDT version. For the * acpi_ut_strtoul64 interface, all conversions are 64 bits. This interface is * used primarily for iASL, where the default width is 64 bits for all parsers, * but error checking is performed later to flag cases where a 64-bit constant * is wrongly defined in a 32-bit DSDT/SSDT. * * In ACPI, the only place where octal numbers are supported is within * the ASL language itself. This is implemented via the main acpi_ut_strtoul64 * interface. According the ACPI specification, there is no ACPI runtime * support (explicit/implicit) for octal string conversions. * ******************************************************************************/ /******************************************************************************* * * FUNCTION: acpi_ut_strtoul64 * * PARAMETERS: string - Null terminated input string, * must be a valid pointer * return_value - Where the converted integer is * returned. Must be a valid pointer * * RETURN: Status and converted integer. Returns an exception on a * 64-bit numeric overflow * * DESCRIPTION: Convert a string into an unsigned integer. Always performs a * full 64-bit conversion, regardless of the current global * integer width. Supports Decimal, Hex, and Octal strings. * * Current users of this function: * * iASL - Preprocessor (constants and math expressions) * iASL - Main ASL parser, conversion of ASL constants to integers * iASL - Data Table Compiler parser (constants and math expressions) * interpreter - Repair code for return values from predefined names * acpi_dump - ACPI table physical addresses * acpi_exec - Support for namespace overrides * ******************************************************************************/ acpi_status acpi_ut_strtoul64(char *string, u64 *return_value) { acpi_status status = AE_OK; u8 original_bit_width; u32 base = 10; /* Default is decimal */ ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string); *return_value = 0; /* A NULL return string returns a value of zero */ if (*string == 0) { return_ACPI_STATUS(AE_OK); } if (!acpi_ut_remove_whitespace(&string)) { return_ACPI_STATUS(AE_OK); } /* * 1) Check for a hex constant. A "0x" prefix indicates base 16. */ if (acpi_ut_detect_hex_prefix(&string)) { base = 16; } /* * 2) Check for an octal constant, defined to be a leading zero * followed by sequence of octal digits (0-7) */ else if (acpi_ut_detect_octal_prefix(&string)) { base = 8; } if (!acpi_ut_remove_leading_zeros(&string)) { return_ACPI_STATUS(AE_OK); /* Return value 0 */ } /* * Force a full 64-bit conversion. The caller (usually iASL) must * check for a 32-bit overflow later as necessary (If current mode * is 32-bit, meaning a 32-bit DSDT). */ original_bit_width = acpi_gbl_integer_bit_width; acpi_gbl_integer_bit_width = 64; /* * Perform the base 8, 10, or 16 conversion. A 64-bit numeric overflow * will return an exception (to allow iASL to flag the statement). */ switch (base) { case 8: status = acpi_ut_convert_octal_string(string, return_value); break; case 10: status = acpi_ut_convert_decimal_string(string, return_value); break; case 16: default: status = acpi_ut_convert_hex_string(string, return_value); break; } /* Only possible exception from above is a 64-bit overflow */ acpi_gbl_integer_bit_width = original_bit_width; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_implicit_strtoul64 * * PARAMETERS: string - Null terminated input string, * must be a valid pointer * * RETURN: Converted integer * * DESCRIPTION: Perform a 64-bit conversion with restrictions placed upon * an "implicit conversion" by the ACPI specification. Used by * many ASL operators that require an integer operand, and support * an automatic (implicit) conversion from a string operand * to the final integer operand. The major restriction is that * only hex strings are supported. * * ----------------------------------------------------------------------------- * * Base is always 16, either with or without the 0x prefix. Decimal and * Octal strings are not supported, as per the ACPI specification. * * Examples (both are hex values): * Add ("BA98", Arg0, Local0) * Subtract ("0x12345678", Arg1, Local1) * * Conversion rules as extracted from the ACPI specification: * * The converted integer is initialized to the value zero. * The ASCII string is always interpreted as a hexadecimal constant. * * 1) According to the ACPI specification, a "0x" prefix is not allowed. * However, ACPICA allows this as an ACPI extension on general * principle. (NO ERROR) * * 2) The conversion terminates when the size of an integer is reached * (32 or 64 bits). There are no numeric overflow conditions. (NO ERROR) * * 3) The first non-hex character terminates the conversion and returns * the current accumulated value of the converted integer (NO ERROR). * * 4) Conversion of a null (zero-length) string to an integer is * technically not allowed. However, ACPICA allows this as an ACPI * extension. The conversion returns the value 0. (NO ERROR) * * NOTE: There are no error conditions returned by this function. At * the minimum, a value of zero is returned. * * Current users of this function: * * interpreter - All runtime implicit conversions, as per ACPI specification * iASL - Data Table Compiler parser (constants and math expressions) * ******************************************************************************/ u64 acpi_ut_implicit_strtoul64(char *string) { u64 converted_integer = 0; ACPI_FUNCTION_TRACE_STR(ut_implicit_strtoul64, string); if (!acpi_ut_remove_whitespace(&string)) { return_VALUE(0); } /* * Per the ACPI specification, only hexadecimal is supported for * implicit conversions, and the "0x" prefix is "not allowed". * However, allow a "0x" prefix as an ACPI extension. */ acpi_ut_remove_hex_prefix(&string); if (!acpi_ut_remove_leading_zeros(&string)) { return_VALUE(0); } /* * Ignore overflow as per the ACPI specification. This is implemented by * ignoring the return status from the conversion function called below. * On overflow, the input string is simply truncated. */ acpi_ut_convert_hex_string(string, &converted_integer); return_VALUE(converted_integer); } /******************************************************************************* * * FUNCTION: acpi_ut_explicit_strtoul64 * * PARAMETERS: string - Null terminated input string, * must be a valid pointer * * RETURN: Converted integer * * DESCRIPTION: Perform a 64-bit conversion with the restrictions placed upon * an "explicit conversion" by the ACPI specification. The * main restriction is that only hex and decimal are supported. * * ----------------------------------------------------------------------------- * * Base is either 10 (default) or 16 (with 0x prefix). Octal (base 8) strings * are not supported, as per the ACPI specification. * * Examples: * to_integer ("1000") Decimal * to_integer ("0xABCD") Hex * * Conversion rules as extracted from the ACPI specification: * * 1) The input string is either a decimal or hexadecimal numeric string. * A hex value must be prefixed by "0x" or it is interpreted as decimal. * * 2) The value must not exceed the maximum of an integer value * (32 or 64 bits). The ACPI specification states the behavior is * "unpredictable", so ACPICA matches the behavior of the implicit * conversion case. There are no numeric overflow conditions. (NO ERROR) * * 3) Behavior on the first non-hex character is not defined by the ACPI * specification (for the to_integer operator), so ACPICA matches the * behavior of the implicit conversion case. It terminates the * conversion and returns the current accumulated value of the converted * integer. (NO ERROR) * * 4) Conversion of a null (zero-length) string to an integer is * technically not allowed. However, ACPICA allows this as an ACPI * extension. The conversion returns the value 0. (NO ERROR) * * NOTE: There are no error conditions returned by this function. At the * minimum, a value of zero is returned. * * Current users of this function: * * interpreter - Runtime ASL to_integer operator, as per the ACPI specification * ******************************************************************************/ u64 acpi_ut_explicit_strtoul64(char *string) { u64 converted_integer = 0; u32 base = 10; /* Default is decimal */ ACPI_FUNCTION_TRACE_STR(ut_explicit_strtoul64, string); if (!acpi_ut_remove_whitespace(&string)) { return_VALUE(0); } /* * Only Hex and Decimal are supported, as per the ACPI specification. * A "0x" prefix indicates hex; otherwise decimal is assumed. */ if (acpi_ut_detect_hex_prefix(&string)) { base = 16; } if (!acpi_ut_remove_leading_zeros(&string)) { return_VALUE(0); } /* * Ignore overflow as per the ACPI specification. This is implemented by * ignoring the return status from the conversion functions called below. * On overflow, the input string is simply truncated. */ switch (base) { case 10: default: acpi_ut_convert_decimal_string(string, &converted_integer); break; case 16: acpi_ut_convert_hex_string(string, &converted_integer); break; } return_VALUE(converted_integer); }
linux-master
drivers/acpi/acpica/utstrtoul64.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: utprint - Formatted printing routines * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utprint") #define ACPI_FORMAT_SIGN 0x01 #define ACPI_FORMAT_SIGN_PLUS 0x02 #define ACPI_FORMAT_SIGN_PLUS_SPACE 0x04 #define ACPI_FORMAT_ZERO 0x08 #define ACPI_FORMAT_LEFT 0x10 #define ACPI_FORMAT_UPPER 0x20 #define ACPI_FORMAT_PREFIX 0x40 /* Local prototypes */ static acpi_size acpi_ut_bound_string_length(const char *string, acpi_size count); static char *acpi_ut_bound_string_output(char *string, const char *end, char c); static char *acpi_ut_format_number(char *string, char *end, u64 number, u8 base, s32 width, s32 precision, u8 type); static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper); /******************************************************************************* * * FUNCTION: acpi_ut_bound_string_length * * PARAMETERS: string - String with boundary * count - Boundary of the string * * RETURN: Length of the string. Less than or equal to Count. * * DESCRIPTION: Calculate the length of a string with boundary. * ******************************************************************************/ static acpi_size acpi_ut_bound_string_length(const char *string, acpi_size count) { u32 length = 0; while (*string && count) { length++; string++; count--; } return (length); } /******************************************************************************* * * FUNCTION: acpi_ut_bound_string_output * * PARAMETERS: string - String with boundary * end - Boundary of the string * c - Character to be output to the string * * RETURN: Updated position for next valid character * * DESCRIPTION: Output a character into a string with boundary check. * ******************************************************************************/ static char *acpi_ut_bound_string_output(char *string, const char *end, char c) { if (string < end) { *string = c; } ++string; return (string); } /******************************************************************************* * * FUNCTION: acpi_ut_put_number * * PARAMETERS: string - Buffer to hold reverse-ordered string * number - Integer to be converted * base - Base of the integer * upper - Whether or not using upper cased digits * * RETURN: Updated position for next valid character * * DESCRIPTION: Convert an integer into a string, note that, the string holds a * reversed ordered number without the trailing zero. * ******************************************************************************/ static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper) { const char *digits; u64 digit_index; char *pos; pos = string; digits = upper ? acpi_gbl_upper_hex_digits : acpi_gbl_lower_hex_digits; if (number == 0) { *(pos++) = '0'; } else { while (number) { (void)acpi_ut_divide(number, base, &number, &digit_index); *(pos++) = digits[digit_index]; } } /* *(Pos++) = '0'; */ return (pos); } /******************************************************************************* * * FUNCTION: acpi_ut_scan_number * * PARAMETERS: string - String buffer * number_ptr - Where the number is returned * * RETURN: Updated position for next valid character * * DESCRIPTION: Scan a string for a decimal integer. * ******************************************************************************/ const char *acpi_ut_scan_number(const char *string, u64 *number_ptr) { u64 number = 0; while (isdigit((int)*string)) { acpi_ut_short_multiply(number, 10, &number); number += *(string++) - '0'; } *number_ptr = number; return (string); } /******************************************************************************* * * FUNCTION: acpi_ut_print_number * * PARAMETERS: string - String buffer * number - The number to be converted * * RETURN: Updated position for next valid character * * DESCRIPTION: Print a decimal integer into a string. * ******************************************************************************/ const char *acpi_ut_print_number(char *string, u64 number) { char ascii_string[20]; const char *pos1; char *pos2; pos1 = acpi_ut_put_number(ascii_string, number, 10, FALSE); pos2 = string; while (pos1 != ascii_string) { *(pos2++) = *(--pos1); } *pos2 = 0; return (string); } /******************************************************************************* * * FUNCTION: acpi_ut_format_number * * PARAMETERS: string - String buffer with boundary * end - Boundary of the string * number - The number to be converted * base - Base of the integer * width - Field width * precision - Precision of the integer * type - Special printing flags * * RETURN: Updated position for next valid character * * DESCRIPTION: Print an integer into a string with any base and any precision. * ******************************************************************************/ static char *acpi_ut_format_number(char *string, char *end, u64 number, u8 base, s32 width, s32 precision, u8 type) { char *pos; char sign; char zero; u8 need_prefix; u8 upper; s32 i; char reversed_string[66]; /* Parameter validation */ if (base < 2 || base > 16) { return (NULL); } if (type & ACPI_FORMAT_LEFT) { type &= ~ACPI_FORMAT_ZERO; } need_prefix = ((type & ACPI_FORMAT_PREFIX) && base != 10) ? TRUE : FALSE; upper = (type & ACPI_FORMAT_UPPER) ? TRUE : FALSE; zero = (type & ACPI_FORMAT_ZERO) ? '0' : ' '; /* Calculate size according to sign and prefix */ sign = '\0'; if (type & ACPI_FORMAT_SIGN) { if ((s64)number < 0) { sign = '-'; number = -(s64)number; width--; } else if (type & ACPI_FORMAT_SIGN_PLUS) { sign = '+'; width--; } else if (type & ACPI_FORMAT_SIGN_PLUS_SPACE) { sign = ' '; width--; } } if (need_prefix) { width--; if (base == 16) { width--; } } /* Generate full string in reverse order */ pos = acpi_ut_put_number(reversed_string, number, base, upper); i = (s32)ACPI_PTR_DIFF(pos, reversed_string); /* Printing 100 using %2d gives "100", not "00" */ if (i > precision) { precision = i; } width -= precision; /* Output the string */ if (!(type & (ACPI_FORMAT_ZERO | ACPI_FORMAT_LEFT))) { while (--width >= 0) { string = acpi_ut_bound_string_output(string, end, ' '); } } if (sign) { string = acpi_ut_bound_string_output(string, end, sign); } if (need_prefix) { string = acpi_ut_bound_string_output(string, end, '0'); if (base == 16) { string = acpi_ut_bound_string_output(string, end, upper ? 'X' : 'x'); } } if (!(type & ACPI_FORMAT_LEFT)) { while (--width >= 0) { string = acpi_ut_bound_string_output(string, end, zero); } } while (i <= --precision) { string = acpi_ut_bound_string_output(string, end, '0'); } while (--i >= 0) { string = acpi_ut_bound_string_output(string, end, reversed_string[i]); } while (--width >= 0) { string = acpi_ut_bound_string_output(string, end, ' '); } return (string); } /******************************************************************************* * * FUNCTION: vsnprintf * * PARAMETERS: string - String with boundary * size - Boundary of the string * format - Standard printf format * args - Argument list * * RETURN: Number of bytes actually written. * * DESCRIPTION: Formatted output to a string using argument list pointer. * ******************************************************************************/ int vsnprintf(char *string, acpi_size size, const char *format, va_list args) { u8 base; u8 type; s32 width; s32 precision; char qualifier; u64 number; char *pos; char *end; char c; const char *s; const void *p; s32 length; int i; pos = string; if (size != ACPI_UINT32_MAX) { end = string + size; } else { end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX); } for (; *format; ++format) { if (*format != '%') { pos = acpi_ut_bound_string_output(pos, end, *format); continue; } type = 0; base = 10; /* Process sign */ do { ++format; if (*format == '#') { type |= ACPI_FORMAT_PREFIX; } else if (*format == '0') { type |= ACPI_FORMAT_ZERO; } else if (*format == '+') { type |= ACPI_FORMAT_SIGN_PLUS; } else if (*format == ' ') { type |= ACPI_FORMAT_SIGN_PLUS_SPACE; } else if (*format == '-') { type |= ACPI_FORMAT_LEFT; } else { break; } } while (1); /* Process width */ width = -1; if (isdigit((int)*format)) { format = acpi_ut_scan_number(format, &number); width = (s32)number; } else if (*format == '*') { ++format; width = va_arg(args, int); if (width < 0) { width = -width; type |= ACPI_FORMAT_LEFT; } } /* Process precision */ precision = -1; if (*format == '.') { ++format; if (isdigit((int)*format)) { format = acpi_ut_scan_number(format, &number); precision = (s32)number; } else if (*format == '*') { ++format; precision = va_arg(args, int); } if (precision < 0) { precision = 0; } } /* Process qualifier */ qualifier = -1; if (*format == 'h' || *format == 'l' || *format == 'L') { qualifier = *format; ++format; if (qualifier == 'l' && *format == 'l') { qualifier = 'L'; ++format; } } switch (*format) { case '%': pos = acpi_ut_bound_string_output(pos, end, '%'); continue; case 'c': if (!(type & ACPI_FORMAT_LEFT)) { while (--width > 0) { pos = acpi_ut_bound_string_output(pos, end, ' '); } } c = (char)va_arg(args, int); pos = acpi_ut_bound_string_output(pos, end, c); while (--width > 0) { pos = acpi_ut_bound_string_output(pos, end, ' '); } continue; case 's': s = va_arg(args, char *); if (!s) { s = "<NULL>"; } length = (s32)acpi_ut_bound_string_length(s, precision); if (!(type & ACPI_FORMAT_LEFT)) { while (length < width--) { pos = acpi_ut_bound_string_output(pos, end, ' '); } } for (i = 0; i < length; ++i) { pos = acpi_ut_bound_string_output(pos, end, *s); ++s; } while (length < width--) { pos = acpi_ut_bound_string_output(pos, end, ' '); } continue; case 'o': base = 8; break; case 'X': type |= ACPI_FORMAT_UPPER; ACPI_FALLTHROUGH; case 'x': base = 16; break; case 'd': case 'i': type |= ACPI_FORMAT_SIGN; case 'u': break; case 'p': if (width == -1) { width = 2 * sizeof(void *); type |= ACPI_FORMAT_ZERO; } p = va_arg(args, void *); pos = acpi_ut_format_number(pos, end, ACPI_TO_INTEGER(p), 16, width, precision, type); continue; default: pos = acpi_ut_bound_string_output(pos, end, '%'); if (*format) { pos = acpi_ut_bound_string_output(pos, end, *format); } else { --format; } continue; } if (qualifier == 'L') { number = va_arg(args, u64); if (type & ACPI_FORMAT_SIGN) { number = (s64)number; } } else if (qualifier == 'l') { number = va_arg(args, unsigned long); if (type & ACPI_FORMAT_SIGN) { number = (s32)number; } } else if (qualifier == 'h') { number = (u16)va_arg(args, int); if (type & ACPI_FORMAT_SIGN) { number = (s16)number; } } else { number = va_arg(args, unsigned int); if (type & ACPI_FORMAT_SIGN) { number = (signed int)number; } } pos = acpi_ut_format_number(pos, end, number, base, width, precision, type); } if (size > 0) { if (pos < end) { *pos = '\0'; } else { end[-1] = '\0'; } } return ((int)ACPI_PTR_DIFF(pos, string)); } /******************************************************************************* * * FUNCTION: snprintf * * PARAMETERS: string - String with boundary * size - Boundary of the string * Format, ... - Standard printf format * * RETURN: Number of bytes actually written. * * DESCRIPTION: Formatted output to a string. * ******************************************************************************/ int snprintf(char *string, acpi_size size, const char *format, ...) { va_list args; int length; va_start(args, format); length = vsnprintf(string, size, format, args); va_end(args); return (length); } /******************************************************************************* * * FUNCTION: sprintf * * PARAMETERS: string - String with boundary * Format, ... - Standard printf format * * RETURN: Number of bytes actually written. * * DESCRIPTION: Formatted output to a string. * ******************************************************************************/ int sprintf(char *string, const char *format, ...) { va_list args; int length; va_start(args, format); length = vsnprintf(string, ACPI_UINT32_MAX, format, args); va_end(args); return (length); } #ifdef ACPI_APPLICATION /******************************************************************************* * * FUNCTION: vprintf * * PARAMETERS: format - Standard printf format * args - Argument list * * RETURN: Number of bytes actually written. * * DESCRIPTION: Formatted output to stdout using argument list pointer. * ******************************************************************************/ int vprintf(const char *format, va_list args) { acpi_cpu_flags flags; int length; flags = acpi_os_acquire_lock(acpi_gbl_print_lock); length = vsnprintf(acpi_gbl_print_buffer, sizeof(acpi_gbl_print_buffer), format, args); (void)fwrite(acpi_gbl_print_buffer, length, 1, ACPI_FILE_OUT); acpi_os_release_lock(acpi_gbl_print_lock, flags); return (length); } /******************************************************************************* * * FUNCTION: printf * * PARAMETERS: Format, ... - Standard printf format * * RETURN: Number of bytes actually written. * * DESCRIPTION: Formatted output to stdout. * ******************************************************************************/ int printf(const char *format, ...) { va_list args; int length; va_start(args, format); length = vprintf(format, args); va_end(args); return (length); } /******************************************************************************* * * FUNCTION: vfprintf * * PARAMETERS: file - File descriptor * format - Standard printf format * args - Argument list * * RETURN: Number of bytes actually written. * * DESCRIPTION: Formatted output to a file using argument list pointer. * ******************************************************************************/ int vfprintf(FILE * file, const char *format, va_list args) { acpi_cpu_flags flags; int length; flags = acpi_os_acquire_lock(acpi_gbl_print_lock); length = vsnprintf(acpi_gbl_print_buffer, sizeof(acpi_gbl_print_buffer), format, args); (void)fwrite(acpi_gbl_print_buffer, length, 1, file); acpi_os_release_lock(acpi_gbl_print_lock, flags); return (length); } /******************************************************************************* * * FUNCTION: fprintf * * PARAMETERS: file - File descriptor * Format, ... - Standard printf format * * RETURN: Number of bytes actually written. * * DESCRIPTION: Formatted output to a file. * ******************************************************************************/ int fprintf(FILE * file, const char *format, ...) { va_list args; int length; va_start(args, format); length = vfprintf(file, format, args); va_end(args); return (length); } #endif
linux-master
drivers/acpi/acpica/utprint.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: rsserial - GPIO/serial_bus resource descriptors * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rsserial") /******************************************************************************* * * acpi_rs_convert_gpio * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO, ACPI_RS_SIZE(struct acpi_resource_gpio), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO, sizeof(struct aml_resource_gpio), 0}, /* * These fields are contiguous in both the source and destination: * revision_id * connection_type */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id), AML_OFFSET(gpio.revision_id), 2}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer), AML_OFFSET(gpio.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.shareable), AML_OFFSET(gpio.int_flags), 3}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.wake_capable), AML_OFFSET(gpio.int_flags), 4}, {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction), AML_OFFSET(gpio.int_flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering), AML_OFFSET(gpio.int_flags), 0}, {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity), AML_OFFSET(gpio.int_flags), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config), AML_OFFSET(gpio.pin_config), 1}, /* * These fields are contiguous in both the source and destination: * drive_strength * debounce_timeout */ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength), AML_OFFSET(gpio.drive_strength), 2}, /* Pin Table */ {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length), AML_OFFSET(gpio.pin_table_offset), AML_OFFSET(gpio.res_source_offset)}, {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table), AML_OFFSET(gpio.pin_table_offset), 0}, /* Resource Source */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index), AML_OFFSET(gpio.res_source_index), 1}, {ACPI_RSC_COUNT_GPIO_RES, ACPI_RS_OFFSET(data.gpio.resource_source.string_length), AML_OFFSET(gpio.res_source_offset), AML_OFFSET(gpio.vendor_offset)}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr), AML_OFFSET(gpio.res_source_offset), 0}, /* Vendor Data */ {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length), AML_OFFSET(gpio.vendor_length), 1}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data), AML_OFFSET(gpio.vendor_offset), 0}, }; /******************************************************************************* * * acpi_rs_convert_clock_input * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_clock_input[8] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_CLOCK_INPUT, ACPI_RS_SIZE(struct acpi_resource_clock_input), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_clock_input)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_CLOCK_INPUT, sizeof(struct aml_resource_clock_input), 0} , {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.clock_input.revision_id), AML_OFFSET(clock_input.revision_id), 1} , {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.clock_input.mode), AML_OFFSET(clock_input.flags), 0} , {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.clock_input.scale), AML_OFFSET(clock_input.flags), 1} , {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.clock_input.frequency_divisor), AML_OFFSET(clock_input.frequency_divisor), 2} , {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.clock_input.frequency_numerator), AML_OFFSET(clock_input.frequency_numerator), 4} , /* Resource Source */ {ACPI_RSC_SOURCE, ACPI_RS_OFFSET(data.clock_input.resource_source), 0, sizeof(struct aml_resource_clock_input)} , }; /******************************************************************************* * * acpi_rs_convert_pinfunction * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_pin_function[13] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_FUNCTION, ACPI_RS_SIZE(struct acpi_resource_pin_function), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_function)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_FUNCTION, sizeof(struct aml_resource_pin_function), 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_function.revision_id), AML_OFFSET(pin_function.revision_id), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_function.shareable), AML_OFFSET(pin_function.flags), 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_function.pin_config), AML_OFFSET(pin_function.pin_config), 1}, {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.pin_function.function_number), AML_OFFSET(pin_function.function_number), 2}, /* Pin Table */ /* * It is OK to use GPIO operations here because none of them refer GPIO * structures directly but instead use offsets given here. */ {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.pin_function.pin_table_length), AML_OFFSET(pin_function.pin_table_offset), AML_OFFSET(pin_function.res_source_offset)}, {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.pin_function.pin_table), AML_OFFSET(pin_function.pin_table_offset), 0}, /* Resource Source */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_function.resource_source.index), AML_OFFSET(pin_function.res_source_index), 1}, {ACPI_RSC_COUNT_GPIO_RES, ACPI_RS_OFFSET(data.pin_function.resource_source.string_length), AML_OFFSET(pin_function.res_source_offset), AML_OFFSET(pin_function.vendor_offset)}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_function.resource_source.string_ptr), AML_OFFSET(pin_function.res_source_offset), 0}, /* Vendor Data */ {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.pin_function.vendor_length), AML_OFFSET(pin_function.vendor_length), 1}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_function.vendor_data), AML_OFFSET(pin_function.vendor_offset), 0}, }; /******************************************************************************* * * acpi_rs_convert_csi2_serial_bus * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_csi2_serial_bus[14] = { { ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS, ACPI_RS_SIZE(struct acpi_resource_csi2_serialbus), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_csi2_serial_bus) }, { ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS, sizeof(struct aml_resource_csi2_serialbus), 0 }, { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id), AML_OFFSET(common_serial_bus.revision_id), 1 }, { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.csi2_serial_bus.type), AML_OFFSET(csi2_serial_bus.type), 1 }, { ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.csi2_serial_bus.producer_consumer), AML_OFFSET(csi2_serial_bus.flags), 1 }, { ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.csi2_serial_bus.slave_mode), AML_OFFSET(csi2_serial_bus.flags), 0 }, { ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.csi2_serial_bus.phy_type), AML_OFFSET(csi2_serial_bus.type_specific_flags), 0 }, { ACPI_RSC_6BITFLAG, ACPI_RS_OFFSET(data.csi2_serial_bus.local_port_instance), AML_OFFSET(csi2_serial_bus.type_specific_flags), 2 }, { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.csi2_serial_bus.type_revision_id), AML_OFFSET(csi2_serial_bus.type_revision_id), 1 }, /* Vendor data */ { ACPI_RSC_COUNT_SERIAL_VEN, ACPI_RS_OFFSET(data.csi2_serial_bus.vendor_length), AML_OFFSET(csi2_serial_bus.type_data_length), AML_RESOURCE_CSI2_MIN_DATA_LEN }, { ACPI_RSC_MOVE_SERIAL_VEN, ACPI_RS_OFFSET(data.csi2_serial_bus.vendor_data), 0, sizeof(struct aml_resource_csi2_serialbus) }, /* Resource Source */ { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.csi2_serial_bus.resource_source.index), AML_OFFSET(csi2_serial_bus.res_source_index), 1 }, { ACPI_RSC_COUNT_SERIAL_RES, ACPI_RS_OFFSET(data.csi2_serial_bus.resource_source.string_length), AML_OFFSET(csi2_serial_bus.type_data_length), sizeof(struct aml_resource_csi2_serialbus) }, { ACPI_RSC_MOVE_SERIAL_RES, ACPI_RS_OFFSET(data.csi2_serial_bus.resource_source.string_ptr), AML_OFFSET(csi2_serial_bus.type_data_length), sizeof(struct aml_resource_csi2_serialbus) }, }; /******************************************************************************* * * acpi_rs_convert_i2c_serial_bus * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[17] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS, ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS, sizeof(struct aml_resource_i2c_serialbus), 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id), AML_OFFSET(common_serial_bus.revision_id), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type), AML_OFFSET(common_serial_bus.type), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode), AML_OFFSET(common_serial_bus.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer), AML_OFFSET(common_serial_bus.flags), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing), AML_OFFSET(common_serial_bus.flags), 2}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id), AML_OFFSET(common_serial_bus.type_revision_id), 1}, {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.common_serial_bus.type_data_length), AML_OFFSET(common_serial_bus.type_data_length), 1}, /* Vendor data */ {ACPI_RSC_COUNT_SERIAL_VEN, ACPI_RS_OFFSET(data.common_serial_bus.vendor_length), AML_OFFSET(common_serial_bus.type_data_length), AML_RESOURCE_I2C_MIN_DATA_LEN}, {ACPI_RSC_MOVE_SERIAL_VEN, ACPI_RS_OFFSET(data.common_serial_bus.vendor_data), 0, sizeof(struct aml_resource_i2c_serialbus)}, /* Resource Source */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index), AML_OFFSET(common_serial_bus.res_source_index), 1}, {ACPI_RSC_COUNT_SERIAL_RES, ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length), AML_OFFSET(common_serial_bus.type_data_length), sizeof(struct aml_resource_common_serialbus)}, {ACPI_RSC_MOVE_SERIAL_RES, ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr), AML_OFFSET(common_serial_bus.type_data_length), sizeof(struct aml_resource_common_serialbus)}, /* I2C bus type specific */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode), AML_OFFSET(i2c_serial_bus.type_specific_flags), 0}, {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed), AML_OFFSET(i2c_serial_bus.connection_speed), 1}, {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address), AML_OFFSET(i2c_serial_bus.slave_address), 1}, }; /******************************************************************************* * * acpi_rs_convert_spi_serial_bus * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[21] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS, ACPI_RS_SIZE(struct acpi_resource_spi_serialbus), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS, sizeof(struct aml_resource_spi_serialbus), 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id), AML_OFFSET(common_serial_bus.revision_id), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type), AML_OFFSET(common_serial_bus.type), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode), AML_OFFSET(common_serial_bus.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer), AML_OFFSET(common_serial_bus.flags), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing), AML_OFFSET(common_serial_bus.flags), 2}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id), AML_OFFSET(common_serial_bus.type_revision_id), 1}, {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.common_serial_bus.type_data_length), AML_OFFSET(common_serial_bus.type_data_length), 1}, /* Vendor data */ {ACPI_RSC_COUNT_SERIAL_VEN, ACPI_RS_OFFSET(data.common_serial_bus.vendor_length), AML_OFFSET(common_serial_bus.type_data_length), AML_RESOURCE_SPI_MIN_DATA_LEN}, {ACPI_RSC_MOVE_SERIAL_VEN, ACPI_RS_OFFSET(data.common_serial_bus.vendor_data), 0, sizeof(struct aml_resource_spi_serialbus)}, /* Resource Source */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index), AML_OFFSET(common_serial_bus.res_source_index), 1}, {ACPI_RSC_COUNT_SERIAL_RES, ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length), AML_OFFSET(common_serial_bus.type_data_length), sizeof(struct aml_resource_common_serialbus)}, {ACPI_RSC_MOVE_SERIAL_RES, ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr), AML_OFFSET(common_serial_bus.type_data_length), sizeof(struct aml_resource_common_serialbus)}, /* Spi bus type specific */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode), AML_OFFSET(spi_serial_bus.type_specific_flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity), AML_OFFSET(spi_serial_bus.type_specific_flags), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length), AML_OFFSET(spi_serial_bus.data_bit_length), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase), AML_OFFSET(spi_serial_bus.clock_phase), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity), AML_OFFSET(spi_serial_bus.clock_polarity), 1}, {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection), AML_OFFSET(spi_serial_bus.device_selection), 1}, {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed), AML_OFFSET(spi_serial_bus.connection_speed), 1}, }; /******************************************************************************* * * acpi_rs_convert_uart_serial_bus * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[23] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS, ACPI_RS_SIZE(struct acpi_resource_uart_serialbus), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS, sizeof(struct aml_resource_uart_serialbus), 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id), AML_OFFSET(common_serial_bus.revision_id), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type), AML_OFFSET(common_serial_bus.type), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode), AML_OFFSET(common_serial_bus.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer), AML_OFFSET(common_serial_bus.flags), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing), AML_OFFSET(common_serial_bus.flags), 2}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id), AML_OFFSET(common_serial_bus.type_revision_id), 1}, {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.common_serial_bus.type_data_length), AML_OFFSET(common_serial_bus.type_data_length), 1}, /* Vendor data */ {ACPI_RSC_COUNT_SERIAL_VEN, ACPI_RS_OFFSET(data.common_serial_bus.vendor_length), AML_OFFSET(common_serial_bus.type_data_length), AML_RESOURCE_UART_MIN_DATA_LEN}, {ACPI_RSC_MOVE_SERIAL_VEN, ACPI_RS_OFFSET(data.common_serial_bus.vendor_data), 0, sizeof(struct aml_resource_uart_serialbus)}, /* Resource Source */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index), AML_OFFSET(common_serial_bus.res_source_index), 1}, {ACPI_RSC_COUNT_SERIAL_RES, ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length), AML_OFFSET(common_serial_bus.type_data_length), sizeof(struct aml_resource_common_serialbus)}, {ACPI_RSC_MOVE_SERIAL_RES, ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr), AML_OFFSET(common_serial_bus.type_data_length), sizeof(struct aml_resource_common_serialbus)}, /* Uart bus type specific */ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control), AML_OFFSET(uart_serial_bus.type_specific_flags), 0}, {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits), AML_OFFSET(uart_serial_bus.type_specific_flags), 2}, {ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits), AML_OFFSET(uart_serial_bus.type_specific_flags), 4}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian), AML_OFFSET(uart_serial_bus.type_specific_flags), 7}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity), AML_OFFSET(uart_serial_bus.parity), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled), AML_OFFSET(uart_serial_bus.lines_enabled), 1}, {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size), AML_OFFSET(uart_serial_bus.rx_fifo_size), 1}, {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size), AML_OFFSET(uart_serial_bus.tx_fifo_size), 1}, {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate), AML_OFFSET(uart_serial_bus.default_baud_rate), 1}, }; /******************************************************************************* * * acpi_rs_convert_pin_config * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_pin_config[14] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_CONFIG, ACPI_RS_SIZE(struct acpi_resource_pin_config), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_config)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_CONFIG, sizeof(struct aml_resource_pin_config), 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_config.revision_id), AML_OFFSET(pin_config.revision_id), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.shareable), AML_OFFSET(pin_config.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.producer_consumer), AML_OFFSET(pin_config.flags), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_config.pin_config_type), AML_OFFSET(pin_config.pin_config_type), 1}, {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.pin_config.pin_config_value), AML_OFFSET(pin_config.pin_config_value), 1}, /* Pin Table */ /* * It is OK to use GPIO operations here because none of them refer GPIO * structures directly but instead use offsets given here. */ {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.pin_config.pin_table_length), AML_OFFSET(pin_config.pin_table_offset), AML_OFFSET(pin_config.res_source_offset)}, {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.pin_config.pin_table), AML_OFFSET(pin_config.pin_table_offset), 0}, /* Resource Source */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_config.resource_source.index), AML_OFFSET(pin_config.res_source_index), 1}, {ACPI_RSC_COUNT_GPIO_RES, ACPI_RS_OFFSET(data.pin_config.resource_source.string_length), AML_OFFSET(pin_config.res_source_offset), AML_OFFSET(pin_config.vendor_offset)}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_config.resource_source.string_ptr), AML_OFFSET(pin_config.res_source_offset), 0}, /* Vendor Data */ {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.pin_config.vendor_length), AML_OFFSET(pin_config.vendor_length), 1}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_config.vendor_data), AML_OFFSET(pin_config.vendor_offset), 0}, }; /******************************************************************************* * * acpi_rs_convert_pin_group * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_pin_group[10] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_GROUP, ACPI_RS_SIZE(struct acpi_resource_pin_group), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_group)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_GROUP, sizeof(struct aml_resource_pin_group), 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group.revision_id), AML_OFFSET(pin_group.revision_id), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group.producer_consumer), AML_OFFSET(pin_group.flags), 0}, /* Pin Table */ /* * It is OK to use GPIO operations here because none of them refer GPIO * structures directly but instead use offsets given here. */ {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.pin_group.pin_table_length), AML_OFFSET(pin_group.pin_table_offset), AML_OFFSET(pin_group.label_offset)}, {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.pin_group.pin_table), AML_OFFSET(pin_group.pin_table_offset), 0}, /* Resource Label */ {ACPI_RSC_COUNT_GPIO_RES, ACPI_RS_OFFSET(data.pin_group.resource_label.string_length), AML_OFFSET(pin_group.label_offset), AML_OFFSET(pin_group.vendor_offset)}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_group.resource_label.string_ptr), AML_OFFSET(pin_group.label_offset), 0}, /* Vendor Data */ {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.pin_group.vendor_length), AML_OFFSET(pin_group.vendor_length), 1}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_group.vendor_data), AML_OFFSET(pin_group.vendor_offset), 0}, }; /******************************************************************************* * * acpi_rs_convert_pin_group_function * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_pin_group_function[13] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION, ACPI_RS_SIZE(struct acpi_resource_pin_group_function), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_group_function)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION, sizeof(struct aml_resource_pin_group_function), 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group_function.revision_id), AML_OFFSET(pin_group_function.revision_id), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_function.shareable), AML_OFFSET(pin_group_function.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_function.producer_consumer), AML_OFFSET(pin_group_function.flags), 1}, {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.pin_group_function.function_number), AML_OFFSET(pin_group_function.function_number), 1}, /* Resource Source */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group_function.resource_source.index), AML_OFFSET(pin_group_function.res_source_index), 1}, {ACPI_RSC_COUNT_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_function.resource_source.string_length), AML_OFFSET(pin_group_function.res_source_offset), AML_OFFSET(pin_group_function.res_source_label_offset)}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_function.resource_source.string_ptr), AML_OFFSET(pin_group_function.res_source_offset), 0}, /* Resource Source Label */ {ACPI_RSC_COUNT_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_function.resource_source_label. string_length), AML_OFFSET(pin_group_function.res_source_label_offset), AML_OFFSET(pin_group_function.vendor_offset)}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_function.resource_source_label. string_ptr), AML_OFFSET(pin_group_function.res_source_label_offset), 0}, /* Vendor Data */ {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.pin_group_function.vendor_length), AML_OFFSET(pin_group_function.vendor_length), 1}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_function.vendor_data), AML_OFFSET(pin_group_function.vendor_offset), 0}, }; /******************************************************************************* * * acpi_rs_convert_pin_group_config * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_pin_group_config[14] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG, ACPI_RS_SIZE(struct acpi_resource_pin_group_config), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_pin_group_config)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG, sizeof(struct aml_resource_pin_group_config), 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group_config.revision_id), AML_OFFSET(pin_group_config.revision_id), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_config.shareable), AML_OFFSET(pin_group_config.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_config.producer_consumer), AML_OFFSET(pin_group_config.flags), 1}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group_config.pin_config_type), AML_OFFSET(pin_group_config.pin_config_type), 1}, {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.pin_group_config.pin_config_value), AML_OFFSET(pin_group_config.pin_config_value), 1}, /* Resource Source */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.pin_group_config.resource_source.index), AML_OFFSET(pin_group_config.res_source_index), 1}, {ACPI_RSC_COUNT_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_config.resource_source.string_length), AML_OFFSET(pin_group_config.res_source_offset), AML_OFFSET(pin_group_config.res_source_label_offset)}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_config.resource_source.string_ptr), AML_OFFSET(pin_group_config.res_source_offset), 0}, /* Resource Source Label */ {ACPI_RSC_COUNT_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_config.resource_source_label. string_length), AML_OFFSET(pin_group_config.res_source_label_offset), AML_OFFSET(pin_group_config.vendor_offset)}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_config.resource_source_label.string_ptr), AML_OFFSET(pin_group_config.res_source_label_offset), 0}, /* Vendor Data */ {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.pin_group_config.vendor_length), AML_OFFSET(pin_group_config.vendor_length), 1}, {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.pin_group_config.vendor_data), AML_OFFSET(pin_group_config.vendor_offset), 0}, };
linux-master
drivers/acpi/acpica/rsserial.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exfield - AML execution - field_unit read/write * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acdispat.h" #include "acinterp.h" #include "amlcode.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exfield") /* * This table maps the various Attrib protocols to the byte transfer * length. Used for the generic serial bus. */ #define ACPI_INVALID_PROTOCOL_ID 0x80 #define ACPI_MAX_PROTOCOL_ID 0x0F static const u8 acpi_protocol_lengths[] = { ACPI_INVALID_PROTOCOL_ID, /* 0 - reserved */ ACPI_INVALID_PROTOCOL_ID, /* 1 - reserved */ 0x00, /* 2 - ATTRIB_QUICK */ ACPI_INVALID_PROTOCOL_ID, /* 3 - reserved */ 0x01, /* 4 - ATTRIB_SEND_RECEIVE */ ACPI_INVALID_PROTOCOL_ID, /* 5 - reserved */ 0x01, /* 6 - ATTRIB_BYTE */ ACPI_INVALID_PROTOCOL_ID, /* 7 - reserved */ 0x02, /* 8 - ATTRIB_WORD */ ACPI_INVALID_PROTOCOL_ID, /* 9 - reserved */ 0xFF, /* A - ATTRIB_BLOCK */ 0xFF, /* B - ATTRIB_BYTES */ 0x02, /* C - ATTRIB_PROCESS_CALL */ 0xFF, /* D - ATTRIB_BLOCK_PROCESS_CALL */ 0xFF, /* E - ATTRIB_RAW_BYTES */ 0xFF /* F - ATTRIB_RAW_PROCESS_BYTES */ }; #define PCC_MASTER_SUBSPACE 3 /* * The following macros determine a given offset is a COMD field. * According to the specification, generic subspaces (types 0-2) contains a * 2-byte COMD field at offset 4 and master subspaces (type 3) contains a 4-byte * COMD field starting at offset 12. */ #define GENERIC_SUBSPACE_COMMAND(a) (4 == a || a == 5) #define MASTER_SUBSPACE_COMMAND(a) (12 <= a && a <= 15) /******************************************************************************* * * FUNCTION: acpi_ex_get_protocol_buffer_length * * PARAMETERS: protocol_id - The type of the protocol indicated by region * field access attributes * return_length - Where the protocol byte transfer length is * returned * * RETURN: Status and decoded byte transfer length * * DESCRIPTION: This routine returns the length of the generic_serial_bus * protocol bytes * ******************************************************************************/ acpi_status acpi_ex_get_protocol_buffer_length(u32 protocol_id, u32 *return_length) { if ((protocol_id > ACPI_MAX_PROTOCOL_ID) || (acpi_protocol_lengths[protocol_id] == ACPI_INVALID_PROTOCOL_ID)) { ACPI_ERROR((AE_INFO, "Invalid Field/AccessAs protocol ID: 0x%4.4X", protocol_id)); return (AE_AML_PROTOCOL); } *return_length = acpi_protocol_lengths[protocol_id]; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_read_data_from_field * * PARAMETERS: walk_state - Current execution state * obj_desc - The named field * ret_buffer_desc - Where the return data object is stored * * RETURN: Status * * DESCRIPTION: Read from a named field. Returns either an Integer or a * Buffer, depending on the size of the field and whether if a * field is created by the create_field() operator. * ******************************************************************************/ acpi_status acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, union acpi_operand_object *obj_desc, union acpi_operand_object **ret_buffer_desc) { acpi_status status; union acpi_operand_object *buffer_desc; void *buffer; u32 buffer_length; ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc); /* Parameter validation */ if (!obj_desc) { return_ACPI_STATUS(AE_AML_NO_OPERAND); } if (!ret_buffer_desc) { return_ACPI_STATUS(AE_BAD_PARAMETER); } if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) { /* * If the buffer_field arguments have not been previously evaluated, * evaluate them now and save the results. */ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_buffer_field_arguments(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_SMBUS || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_IPMI || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_PLATFORM_RT || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)) { /* SMBus, GSBus, IPMI serial */ status = acpi_ex_read_serial_bus(obj_desc, ret_buffer_desc); return_ACPI_STATUS(status); } /* * Allocate a buffer for the contents of the field. * * If the field is larger than the current integer width, create * a BUFFER to hold it. Otherwise, use an INTEGER. This allows * the use of arithmetic operators on the returned value if the * field size is equal or smaller than an Integer. * * However, all buffer fields created by create_field operator needs to * remain as a buffer to match other AML interpreter implementations. * * Note: Field.length is in bits. */ buffer_length = (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length); if (buffer_length > acpi_gbl_integer_byte_width || (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD && obj_desc->buffer_field.is_create_field)) { /* Field is too large for an Integer, create a Buffer instead */ buffer_desc = acpi_ut_create_buffer_object(buffer_length); if (!buffer_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } buffer = buffer_desc->buffer.pointer; } else { /* Field will fit within an Integer (normal case) */ buffer_desc = acpi_ut_create_integer_object((u64) 0); if (!buffer_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } buffer_length = acpi_gbl_integer_byte_width; buffer = &buffer_desc->integer.value; } if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) { /* General Purpose I/O */ status = acpi_ex_read_gpio(obj_desc, buffer); goto exit; } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { /* * Reading from a PCC field unit does not require the handler because * it only requires reading from the internal_pcc_buffer. */ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "PCC FieldRead bits %u\n", obj_desc->field.bit_length)); memcpy(buffer, obj_desc->field.region_obj->field.internal_pcc_buffer + obj_desc->field.base_byte_offset, (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field. bit_length)); *ret_buffer_desc = buffer_desc; return AE_OK; } ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n", obj_desc, obj_desc->common.type, buffer, buffer_length)); ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n", obj_desc->common_field.bit_length, obj_desc->common_field.start_field_bit_offset, obj_desc->common_field.base_byte_offset)); /* Lock entire transaction if requested */ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags); /* Read from the field */ status = acpi_ex_extract_from_field(obj_desc, buffer, buffer_length); acpi_ex_release_global_lock(obj_desc->common_field.field_flags); exit: if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(buffer_desc); } else { *ret_buffer_desc = buffer_desc; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_write_data_to_field * * PARAMETERS: source_desc - Contains data to write * obj_desc - The named field * result_desc - Where the return value is returned, if any * * RETURN: Status * * DESCRIPTION: Write to a named field * ******************************************************************************/ acpi_status acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, union acpi_operand_object *obj_desc, union acpi_operand_object **result_desc) { acpi_status status; u32 buffer_length; u32 data_length; void *buffer; ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc); /* Parameter validation */ if (!source_desc || !obj_desc) { return_ACPI_STATUS(AE_AML_NO_OPERAND); } if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) { /* * If the buffer_field arguments have not been previously evaluated, * evaluate them now and save the results. */ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_buffer_field_arguments(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) { /* General Purpose I/O */ status = acpi_ex_write_gpio(source_desc, obj_desc, result_desc); return_ACPI_STATUS(status); } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_SMBUS || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_IPMI || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_PLATFORM_RT || obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)) { /* SMBus, GSBus, IPMI serial */ status = acpi_ex_write_serial_bus(source_desc, obj_desc, result_desc); return_ACPI_STATUS(status); } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && (obj_desc->field.region_obj->region.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { /* * According to the spec a write to the COMD field will invoke the * region handler. Otherwise, write to the pcc_internal buffer. This * implementation will use the offsets specified rather than the name * of the field. This is considered safer because some firmware tools * are known to obfiscate named objects. */ data_length = (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field. bit_length); memcpy(obj_desc->field.region_obj->field.internal_pcc_buffer + obj_desc->field.base_byte_offset, source_desc->buffer.pointer, data_length); if (MASTER_SUBSPACE_COMMAND(obj_desc->field.base_byte_offset)) { /* Perform the write */ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "PCC COMD field has been written. Invoking PCC handler now.\n")); status = acpi_ex_access_region(obj_desc, 0, (u64 *)obj_desc->field. region_obj->field. internal_pcc_buffer, ACPI_WRITE); return_ACPI_STATUS(status); } return (AE_OK); } /* Get a pointer to the data to be written */ switch (source_desc->common.type) { case ACPI_TYPE_INTEGER: buffer = &source_desc->integer.value; buffer_length = sizeof(source_desc->integer.value); break; case ACPI_TYPE_BUFFER: buffer = source_desc->buffer.pointer; buffer_length = source_desc->buffer.length; break; case ACPI_TYPE_STRING: buffer = source_desc->string.pointer; buffer_length = source_desc->string.length; break; default: return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n", source_desc, acpi_ut_get_type_name(source_desc->common.type), source_desc->common.type, buffer, buffer_length)); ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "FieldWrite [TO]: Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n", obj_desc, acpi_ut_get_type_name(obj_desc->common.type), obj_desc->common.type, obj_desc->common_field.bit_length, obj_desc->common_field.start_field_bit_offset, obj_desc->common_field.base_byte_offset)); /* Lock entire transaction if requested */ acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags); /* Write to the field */ status = acpi_ex_insert_into_field(obj_desc, buffer, buffer_length); acpi_ex_release_global_lock(obj_desc->common_field.field_flags); return_ACPI_STATUS(status); }
linux-master
drivers/acpi/acpica/exfield.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: nsxfeval - Public interfaces to the ACPI subsystem * ACPI Object evaluation interfaces * ******************************************************************************/ #define EXPORT_ACPI_INTERFACES #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acinterp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsxfeval") /* Local prototypes */ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info); /******************************************************************************* * * FUNCTION: acpi_evaluate_object_typed * * PARAMETERS: handle - Object handle (optional) * pathname - Object pathname (optional) * external_params - List of parameters to pass to a method, * terminated by NULL. May be NULL * if no parameters are being passed. * return_buffer - Where to put the object's return value (if * any). If NULL, no value is returned. * return_type - Expected type of return object * * RETURN: Status * * DESCRIPTION: Find and evaluate the given object, passing the given * parameters if necessary. One of "Handle" or "Pathname" must * be valid (non-null) * ******************************************************************************/ acpi_status acpi_evaluate_object_typed(acpi_handle handle, acpi_string pathname, struct acpi_object_list *external_params, struct acpi_buffer *return_buffer, acpi_object_type return_type) { acpi_status status; u8 free_buffer_on_error = FALSE; acpi_handle target_handle; char *full_pathname; ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed); /* Return buffer must be valid */ if (!return_buffer) { return_ACPI_STATUS(AE_BAD_PARAMETER); } if (return_buffer->length == ACPI_ALLOCATE_BUFFER) { free_buffer_on_error = TRUE; } /* Get a handle here, in order to build an error message if needed */ target_handle = handle; if (pathname) { status = acpi_get_handle(handle, pathname, &target_handle); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } full_pathname = acpi_ns_get_external_pathname(target_handle); if (!full_pathname) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Evaluate the object */ status = acpi_evaluate_object(target_handle, NULL, external_params, return_buffer); if (ACPI_FAILURE(status)) { goto exit; } /* Type ANY means "don't care about return value type" */ if (return_type == ACPI_TYPE_ANY) { goto exit; } if (return_buffer->length == 0) { /* Error because caller specifically asked for a return value */ ACPI_ERROR((AE_INFO, "%s did not return any object", full_pathname)); status = AE_NULL_OBJECT; goto exit; } /* Examine the object type returned from evaluate_object */ if (((union acpi_object *)return_buffer->pointer)->type == return_type) { goto exit; } /* Return object type does not match requested type */ ACPI_ERROR((AE_INFO, "Incorrect return type from %s - received [%s], requested [%s]", full_pathname, acpi_ut_get_type_name(((union acpi_object *)return_buffer-> pointer)->type), acpi_ut_get_type_name(return_type))); if (free_buffer_on_error) { /* * Free a buffer created via ACPI_ALLOCATE_BUFFER. * Note: We use acpi_os_free here because acpi_os_allocate was used * to allocate the buffer. This purposefully bypasses the * (optionally enabled) allocation tracking mechanism since we * only want to track internal allocations. */ acpi_os_free(return_buffer->pointer); return_buffer->pointer = NULL; } return_buffer->length = 0; status = AE_TYPE; exit: ACPI_FREE(full_pathname); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed) /******************************************************************************* * * FUNCTION: acpi_evaluate_object * * PARAMETERS: handle - Object handle (optional) * pathname - Object pathname (optional) * external_params - List of parameters to pass to method, * terminated by NULL. May be NULL * if no parameters are being passed. * return_buffer - Where to put method's return value (if * any). If NULL, no value is returned. * * RETURN: Status * * DESCRIPTION: Find and evaluate the given object, passing the given * parameters if necessary. One of "Handle" or "Pathname" must * be valid (non-null) * ******************************************************************************/ acpi_status acpi_evaluate_object(acpi_handle handle, acpi_string pathname, struct acpi_object_list *external_params, struct acpi_buffer *return_buffer) { acpi_status status; struct acpi_evaluate_info *info; acpi_size buffer_space_needed; u32 i; ACPI_FUNCTION_TRACE(acpi_evaluate_object); /* Allocate and initialize the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Convert and validate the device handle */ info->prefix_node = acpi_ns_validate_handle(handle); if (!info->prefix_node) { status = AE_BAD_PARAMETER; goto cleanup; } /* * Get the actual namespace node for the target object. * Handles these cases: * * 1) Null node, valid pathname from root (absolute path) * 2) Node and valid pathname (path relative to Node) * 3) Node, Null pathname */ if ((pathname) && (ACPI_IS_ROOT_PREFIX(pathname[0]))) { /* The path is fully qualified, just evaluate by name */ info->prefix_node = NULL; } else if (!handle) { /* * A handle is optional iff a fully qualified pathname is specified. * Since we've already handled fully qualified names above, this is * an error. */ if (!pathname) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Both Handle and Pathname are NULL")); } else { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Null Handle with relative pathname [%s]", pathname)); } status = AE_BAD_PARAMETER; goto cleanup; } info->relative_pathname = pathname; /* * Convert all external objects passed as arguments to the * internal version(s). */ if (external_params && external_params->count) { info->param_count = (u16)external_params->count; /* Warn on impossible argument count */ if (info->param_count > ACPI_METHOD_NUM_ARGS) { ACPI_WARN_PREDEFINED((AE_INFO, pathname, ACPI_WARN_ALWAYS, "Excess arguments (%u) - using only %u", info->param_count, ACPI_METHOD_NUM_ARGS)); info->param_count = ACPI_METHOD_NUM_ARGS; } /* * Allocate a new parameter block for the internal objects * Add 1 to count to allow for null terminated internal list */ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)info-> param_count + 1) * sizeof(void *)); if (!info->parameters) { status = AE_NO_MEMORY; goto cleanup; } /* Convert each external object in the list to an internal object */ for (i = 0; i < info->param_count; i++) { status = acpi_ut_copy_eobject_to_iobject(&external_params-> pointer[i], &info-> parameters[i]); if (ACPI_FAILURE(status)) { goto cleanup; } } info->parameters[info->param_count] = NULL; } #ifdef _FUTURE_FEATURE /* * Begin incoming argument count analysis. Check for too few args * and too many args. */ switch (acpi_ns_get_type(info->node)) { case ACPI_TYPE_METHOD: /* Check incoming argument count against the method definition */ if (info->obj_desc->method.param_count > info->param_count) { ACPI_ERROR((AE_INFO, "Insufficient arguments (%u) - %u are required", info->param_count, info->obj_desc->method.param_count)); status = AE_MISSING_ARGUMENTS; goto cleanup; } else if (info->obj_desc->method.param_count < info->param_count) { ACPI_WARNING((AE_INFO, "Excess arguments (%u) - only %u are required", info->param_count, info->obj_desc->method.param_count)); /* Just pass the required number of arguments */ info->param_count = info->obj_desc->method.param_count; } /* * Any incoming external objects to be passed as arguments to the * method must be converted to internal objects */ if (info->param_count) { /* * Allocate a new parameter block for the internal objects * Add 1 to count to allow for null terminated internal list */ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size) info-> param_count + 1) * sizeof(void *)); if (!info->parameters) { status = AE_NO_MEMORY; goto cleanup; } /* Convert each external object in the list to an internal object */ for (i = 0; i < info->param_count; i++) { status = acpi_ut_copy_eobject_to_iobject (&external_params->pointer[i], &info->parameters[i]); if (ACPI_FAILURE(status)) { goto cleanup; } } info->parameters[info->param_count] = NULL; } break; default: /* Warn if arguments passed to an object that is not a method */ if (info->param_count) { ACPI_WARNING((AE_INFO, "%u arguments were passed to a non-method ACPI object", info->param_count)); } break; } #endif /* Now we can evaluate the object */ status = acpi_ns_evaluate(info); /* * If we are expecting a return value, and all went well above, * copy the return value to an external object. */ if (!return_buffer) { goto cleanup_return_object; } if (!info->return_object) { return_buffer->length = 0; goto cleanup; } if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) == ACPI_DESC_TYPE_NAMED) { /* * If we received a NS Node as a return object, this means that * the object we are evaluating has nothing interesting to * return (such as a mutex, etc.) We return an error because * these types are essentially unsupported by this interface. * We don't check up front because this makes it easier to add * support for various types at a later date if necessary. */ status = AE_TYPE; info->return_object = NULL; /* No need to delete a NS Node */ return_buffer->length = 0; } if (ACPI_FAILURE(status)) { goto cleanup_return_object; } /* Dereference Index and ref_of references */ acpi_ns_resolve_references(info); /* Get the size of the returned object */ status = acpi_ut_get_object_size(info->return_object, &buffer_space_needed); if (ACPI_SUCCESS(status)) { /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(return_buffer, buffer_space_needed); if (ACPI_FAILURE(status)) { /* * Caller's buffer is too small or a new one can't * be allocated */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Needed buffer size %X, %s\n", (u32)buffer_space_needed, acpi_format_exception(status))); } else { /* We have enough space for the object, build it */ status = acpi_ut_copy_iobject_to_eobject(info->return_object, return_buffer); } } cleanup_return_object: if (info->return_object) { /* * Delete the internal return object. NOTE: Interpreter must be * locked to avoid race condition. */ acpi_ex_enter_interpreter(); /* Remove one reference on the return object (should delete it) */ acpi_ut_remove_reference(info->return_object); acpi_ex_exit_interpreter(); } cleanup: /* Free the input parameter list (if we created one) */ if (info->parameters) { /* Free the allocated parameter block */ acpi_ut_delete_internal_object_list(info->parameters); } ACPI_FREE(info); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_evaluate_object) /******************************************************************************* * * FUNCTION: acpi_ns_resolve_references * * PARAMETERS: info - Evaluation info block * * RETURN: Info->return_object is replaced with the dereferenced object * * DESCRIPTION: Dereference certain reference objects. Called before an * internal return object is converted to an external union acpi_object. * * Performs an automatic dereference of Index and ref_of reference objects. * These reference objects are not supported by the union acpi_object, so this is a * last resort effort to return something useful. Also, provides compatibility * with other ACPI implementations. * * NOTE: does not handle references within returned package objects or nested * references, but this support could be added later if found to be necessary. * ******************************************************************************/ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info) { union acpi_operand_object *obj_desc = NULL; struct acpi_namespace_node *node; /* We are interested in reference objects only */ if ((info->return_object)->common.type != ACPI_TYPE_LOCAL_REFERENCE) { return; } /* * Two types of references are supported - those created by Index and * ref_of operators. A name reference (AML_NAMEPATH_OP) can be converted * to a union acpi_object, so it is not dereferenced here. A ddb_handle * (AML_LOAD_OP) cannot be dereferenced, nor can it be converted to * a union acpi_object. */ switch (info->return_object->reference.class) { case ACPI_REFCLASS_INDEX: obj_desc = *(info->return_object->reference.where); break; case ACPI_REFCLASS_REFOF: node = info->return_object->reference.object; if (node) { obj_desc = node->object; } break; default: return; } /* Replace the existing reference object */ if (obj_desc) { acpi_ut_add_reference(obj_desc); acpi_ut_remove_reference(info->return_object); info->return_object = obj_desc; } return; } /******************************************************************************* * * FUNCTION: acpi_walk_namespace * * PARAMETERS: type - acpi_object_type to search for * start_object - Handle in namespace where search begins * max_depth - Depth to which search is to reach * descending_callback - Called during tree descent * when an object of "Type" is found * ascending_callback - Called during tree ascent * when an object of "Type" is found * context - Passed to user function(s) above * return_value - Location where return value of * user_function is put if terminated early * * RETURNS Return value from the user_function if terminated early. * Otherwise, returns NULL. * * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, * starting (and ending) at the object specified by start_handle. * The callback function is called whenever an object that matches * the type parameter is found. If the callback function returns * a non-zero value, the search is terminated immediately and this * value is returned to the caller. * * The point of this procedure is to provide a generic namespace * walk routine that can be called from multiple places to * provide multiple services; the callback function(s) can be * tailored to each task, whether it is a print function, * a compare function, etc. * ******************************************************************************/ acpi_status acpi_walk_namespace(acpi_object_type type, acpi_handle start_object, u32 max_depth, acpi_walk_callback descending_callback, acpi_walk_callback ascending_callback, void *context, void **return_value) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_walk_namespace); /* Parameter validation */ if ((type > ACPI_TYPE_LOCAL_MAX) || (!max_depth) || (!descending_callback && !ascending_callback)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Need to acquire the namespace reader lock to prevent interference * with any concurrent table unloads (which causes the deletion of * namespace objects). We cannot allow the deletion of a namespace node * while the user function is using it. The exception to this are the * nodes created and deleted during control method execution -- these * nodes are marked as temporary nodes and are ignored by the namespace * walk. Thus, control methods can be executed while holding the * namespace deletion lock (and the user function can execute control * methods.) */ status = acpi_ut_acquire_read_lock(&acpi_gbl_namespace_rw_lock); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Lock the namespace around the walk. The namespace will be * unlocked/locked around each call to the user function - since the user * function must be allowed to make ACPICA calls itself (for example, it * will typically execute control methods during device enumeration.) */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Now we can validate the starting node */ if (!acpi_ns_validate_handle(start_object)) { status = AE_BAD_PARAMETER; goto unlock_and_exit2; } status = acpi_ns_walk_namespace(type, start_object, max_depth, ACPI_NS_WALK_UNLOCK, descending_callback, ascending_callback, context, return_value); unlock_and_exit2: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); unlock_and_exit: (void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_walk_namespace) /******************************************************************************* * * FUNCTION: acpi_ns_get_device_callback * * PARAMETERS: Callback from acpi_get_device * * RETURN: Status * * DESCRIPTION: Takes callbacks from walk_namespace and filters out all non- * present devices, or if they specified a HID, it filters based * on that. * ******************************************************************************/ static acpi_status acpi_ns_get_device_callback(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { struct acpi_get_devices_info *info = context; acpi_status status; struct acpi_namespace_node *node; u32 flags; struct acpi_pnp_device_id *hid; struct acpi_pnp_device_id_list *cid; u32 i; u8 found; int no_match; status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } node = acpi_ns_validate_handle(obj_handle); status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } if (!node) { return (AE_BAD_PARAMETER); } /* * First, filter based on the device HID and CID. * * 01/2010: For this case where a specific HID is requested, we don't * want to run _STA until we have an actual HID match. Thus, we will * not unnecessarily execute _STA on devices for which the caller * doesn't care about. Previously, _STA was executed unconditionally * on all devices found here. * * A side-effect of this change is that now we will continue to search * for a matching HID even under device trees where the parent device * would have returned a _STA that indicates it is not present or * not functioning (thus aborting the search on that branch). */ if (info->hid != NULL) { status = acpi_ut_execute_HID(node, &hid); if (status == AE_NOT_FOUND) { return (AE_OK); } else if (ACPI_FAILURE(status)) { return (AE_CTRL_DEPTH); } no_match = strcmp(hid->string, info->hid); ACPI_FREE(hid); if (no_match) { /* * HID does not match, attempt match within the * list of Compatible IDs (CIDs) */ status = acpi_ut_execute_CID(node, &cid); if (status == AE_NOT_FOUND) { return (AE_OK); } else if (ACPI_FAILURE(status)) { return (AE_CTRL_DEPTH); } /* Walk the CID list */ found = FALSE; for (i = 0; i < cid->count; i++) { if (strcmp(cid->ids[i].string, info->hid) == 0) { /* Found a matching CID */ found = TRUE; break; } } ACPI_FREE(cid); if (!found) { return (AE_OK); } } } /* Run _STA to determine if device is present */ status = acpi_ut_execute_STA(node, &flags); if (ACPI_FAILURE(status)) { return (AE_CTRL_DEPTH); } if (!(flags & ACPI_STA_DEVICE_PRESENT) && !(flags & ACPI_STA_DEVICE_FUNCTIONING)) { /* * Don't examine the children of the device only when the * device is neither present nor functional. See ACPI spec, * description of _STA for more information. */ return (AE_CTRL_DEPTH); } /* We have a valid device, invoke the user function */ status = info->user_function(obj_handle, nesting_level, info->context, return_value); return (status); } /******************************************************************************* * * FUNCTION: acpi_get_devices * * PARAMETERS: HID - HID to search for. Can be NULL. * user_function - Called when a matching object is found * context - Passed to user function * return_value - Location where return value of * user_function is put if terminated early * * RETURNS Return value from the user_function if terminated early. * Otherwise, returns NULL. * * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, * starting (and ending) at the object specified by start_handle. * The user_function is called whenever an object of type * Device is found. If the user function returns * a non-zero value, the search is terminated immediately and this * value is returned to the caller. * * This is a wrapper for walk_namespace, but the callback performs * additional filtering. Please see acpi_ns_get_device_callback. * ******************************************************************************/ acpi_status acpi_get_devices(const char *HID, acpi_walk_callback user_function, void *context, void **return_value) { acpi_status status; struct acpi_get_devices_info info; ACPI_FUNCTION_TRACE(acpi_get_devices); /* Parameter validation */ if (!user_function) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * We're going to call their callback from OUR callback, so we need * to know what it is, and their context parameter. */ info.hid = HID; info.context = context; info.user_function = user_function; /* * Lock the namespace around the walk. * The namespace will be unlocked/locked around each call * to the user function - since this function * must be allowed to make Acpi calls itself. */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ns_get_device_callback, NULL, &info, return_value); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_get_devices) /******************************************************************************* * * FUNCTION: acpi_attach_data * * PARAMETERS: obj_handle - Namespace node * handler - Handler for this attachment * data - Pointer to data to be attached * * RETURN: Status * * DESCRIPTION: Attach arbitrary data and handler to a namespace node. * ******************************************************************************/ acpi_status acpi_attach_data(acpi_handle obj_handle, acpi_object_handler handler, void *data) { struct acpi_namespace_node *node; acpi_status status; /* Parameter validation */ if (!obj_handle || !handler || !data) { return (AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } /* Convert and validate the handle */ node = acpi_ns_validate_handle(obj_handle); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ns_attach_data(node, handler, data); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return (status); } ACPI_EXPORT_SYMBOL(acpi_attach_data) /******************************************************************************* * * FUNCTION: acpi_detach_data * * PARAMETERS: obj_handle - Namespace node handle * handler - Handler used in call to acpi_attach_data * * RETURN: Status * * DESCRIPTION: Remove data that was previously attached to a node. * ******************************************************************************/ acpi_status acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler) { struct acpi_namespace_node *node; acpi_status status; /* Parameter validation */ if (!obj_handle || !handler) { return (AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } /* Convert and validate the handle */ node = acpi_ns_validate_handle(obj_handle); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ns_detach_data(node, handler); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return (status); } ACPI_EXPORT_SYMBOL(acpi_detach_data) /******************************************************************************* * * FUNCTION: acpi_get_data_full * * PARAMETERS: obj_handle - Namespace node * handler - Handler used in call to attach_data * data - Where the data is returned * callback - function to execute before returning * * RETURN: Status * * DESCRIPTION: Retrieve data that was previously attached to a namespace node * and execute a callback before returning. * ******************************************************************************/ acpi_status acpi_get_data_full(acpi_handle obj_handle, acpi_object_handler handler, void **data, void (*callback)(void *)) { struct acpi_namespace_node *node; acpi_status status; /* Parameter validation */ if (!obj_handle || !handler || !data) { return (AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return (status); } /* Convert and validate the handle */ node = acpi_ns_validate_handle(obj_handle); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ns_get_attached_data(node, handler, data); if (ACPI_SUCCESS(status) && callback) { callback(*data); } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return (status); } ACPI_EXPORT_SYMBOL(acpi_get_data_full) /******************************************************************************* * * FUNCTION: acpi_get_data * * PARAMETERS: obj_handle - Namespace node * handler - Handler used in call to attach_data * data - Where the data is returned * * RETURN: Status * * DESCRIPTION: Retrieve data that was previously attached to a namespace node. * ******************************************************************************/ acpi_status acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data) { return acpi_get_data_full(obj_handle, handler, data, NULL); } ACPI_EXPORT_SYMBOL(acpi_get_data)
linux-master
drivers/acpi/acpica/nsxfeval.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exstorob - AML object store support, store to object * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exstorob") /******************************************************************************* * * FUNCTION: acpi_ex_store_buffer_to_buffer * * PARAMETERS: source_desc - Source object to copy * target_desc - Destination object of the copy * * RETURN: Status * * DESCRIPTION: Copy a buffer object to another buffer object. * ******************************************************************************/ acpi_status acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, union acpi_operand_object *target_desc) { u32 length; u8 *buffer; ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc); /* If Source and Target are the same, just return */ if (source_desc == target_desc) { return_ACPI_STATUS(AE_OK); } /* We know that source_desc is a buffer by now */ buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer); length = source_desc->buffer.length; /* * If target is a buffer of length zero or is a static buffer, * allocate a new buffer of the proper length */ if ((target_desc->buffer.length == 0) || (target_desc->common.flags & AOPOBJ_STATIC_POINTER)) { target_desc->buffer.pointer = ACPI_ALLOCATE(length); if (!target_desc->buffer.pointer) { return_ACPI_STATUS(AE_NO_MEMORY); } target_desc->buffer.length = length; } /* Copy source buffer to target buffer */ if (length <= target_desc->buffer.length) { /* Clear existing buffer and copy in the new one */ memset(target_desc->buffer.pointer, 0, target_desc->buffer.length); memcpy(target_desc->buffer.pointer, buffer, length); #ifdef ACPI_OBSOLETE_BEHAVIOR /* * NOTE: ACPI versions up to 3.0 specified that the buffer must be * truncated if the string is smaller than the buffer. However, "other" * implementations of ACPI never did this and thus became the defacto * standard. ACPI 3.0A changes this behavior such that the buffer * is no longer truncated. */ /* * OBSOLETE BEHAVIOR: * If the original source was a string, we must truncate the buffer, * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer * copy must not truncate the original buffer. */ if (original_src_type == ACPI_TYPE_STRING) { /* Set the new length of the target */ target_desc->buffer.length = length; } #endif } else { /* Truncate the source, copy only what will fit */ memcpy(target_desc->buffer.pointer, buffer, target_desc->buffer.length); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Truncating source buffer from %X to %X\n", length, target_desc->buffer.length)); } /* Copy flags */ target_desc->buffer.flags = source_desc->buffer.flags; target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_store_string_to_string * * PARAMETERS: source_desc - Source object to copy * target_desc - Destination object of the copy * * RETURN: Status * * DESCRIPTION: Copy a String object to another String object * ******************************************************************************/ acpi_status acpi_ex_store_string_to_string(union acpi_operand_object *source_desc, union acpi_operand_object *target_desc) { u32 length; u8 *buffer; ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc); /* If Source and Target are the same, just return */ if (source_desc == target_desc) { return_ACPI_STATUS(AE_OK); } /* We know that source_desc is a string by now */ buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer); length = source_desc->string.length; /* * Replace existing string value if it will fit and the string * pointer is not a static pointer (part of an ACPI table) */ if ((length < target_desc->string.length) && (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) { /* * String will fit in existing non-static buffer. * Clear old string and copy in the new one */ memset(target_desc->string.pointer, 0, (acpi_size)target_desc->string.length + 1); memcpy(target_desc->string.pointer, buffer, length); } else { /* * Free the current buffer, then allocate a new buffer * large enough to hold the value */ if (target_desc->string.pointer && (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) { /* Only free if not a pointer into the DSDT */ ACPI_FREE(target_desc->string.pointer); } target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size)length + 1); if (!target_desc->string.pointer) { return_ACPI_STATUS(AE_NO_MEMORY); } target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; memcpy(target_desc->string.pointer, buffer, length); } /* Set the new target length */ target_desc->string.length = length; return_ACPI_STATUS(AE_OK); }
linux-master
drivers/acpi/acpica/exstorob.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: utownerid - Support for Table/Method Owner IDs * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utownerid") /******************************************************************************* * * FUNCTION: acpi_ut_allocate_owner_id * * PARAMETERS: owner_id - Where the new owner ID is returned * * RETURN: Status * * DESCRIPTION: Allocate a table or method owner ID. The owner ID is used to * track objects created by the table or method, to be deleted * when the method exits or the table is unloaded. * ******************************************************************************/ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id) { u32 i; u32 j; u32 k; acpi_status status; ACPI_FUNCTION_TRACE(ut_allocate_owner_id); /* Guard against multiple allocations of ID to the same location */ if (*owner_id) { ACPI_ERROR((AE_INFO, "Owner ID [0x%3.3X] already exists", *owner_id)); return_ACPI_STATUS(AE_ALREADY_EXISTS); } /* Mutex for the global ID mask */ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Find a free owner ID, cycle through all possible IDs on repeated * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index * may have to be scanned twice. */ for (i = 0, j = acpi_gbl_last_owner_id_index; i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) { if (j >= ACPI_NUM_OWNERID_MASKS) { j = 0; /* Wraparound to start of mask array */ } for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) { if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) { /* There are no free IDs in this mask */ break; } /* * Note: the u32 cast ensures that 1 is stored as a unsigned * integer. Omitting the cast may result in 1 being stored as an * int. Some compilers or runtime error detection may flag this as * an error. */ if (!(acpi_gbl_owner_id_mask[j] & ((u32)1 << k))) { /* * Found a free ID. The actual ID is the bit index plus one, * making zero an invalid Owner ID. Save this as the last ID * allocated and update the global ID mask. */ acpi_gbl_owner_id_mask[j] |= ((u32)1 << k); acpi_gbl_last_owner_id_index = (u8)j; acpi_gbl_next_owner_id_offset = (u8)(k + 1); /* * Construct encoded ID from the index and bit position * * Note: Last [j].k (bit 4095) is never used and is marked * permanently allocated (prevents +1 overflow) */ *owner_id = (acpi_owner_id)((k + 1) + ACPI_MUL_32(j)); ACPI_DEBUG_PRINT((ACPI_DB_VALUES, "Allocated OwnerId: 0x%3.3X\n", (unsigned int)*owner_id)); goto exit; } } acpi_gbl_next_owner_id_offset = 0; } /* * All owner_ids have been allocated. This typically should * not happen since the IDs are reused after deallocation. The IDs are * allocated upon table load (one per table) and method execution, and * they are released when a table is unloaded or a method completes * execution. * * If this error happens, there may be very deep nesting of invoked * control methods, or there may be a bug where the IDs are not released. */ status = AE_OWNER_ID_LIMIT; ACPI_ERROR((AE_INFO, "Could not allocate new OwnerId (4095 max), AE_OWNER_ID_LIMIT")); exit: (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_release_owner_id * * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID * * RETURN: None. No error is returned because we are either exiting a * control method or unloading a table. Either way, we would * ignore any error anyway. * * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 * ******************************************************************************/ void acpi_ut_release_owner_id(acpi_owner_id *owner_id_ptr) { acpi_owner_id owner_id = *owner_id_ptr; acpi_status status; u32 index; u32 bit; ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id); /* Always clear the input owner_id (zero is an invalid ID) */ *owner_id_ptr = 0; /* Zero is not a valid owner_ID */ if (owner_id == 0) { ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%3.3X", owner_id)); return_VOID; } /* Mutex for the global ID mask */ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); if (ACPI_FAILURE(status)) { return_VOID; } /* Normalize the ID to zero */ owner_id--; /* Decode ID to index/offset pair */ index = ACPI_DIV_32(owner_id); bit = (u32)1 << ACPI_MOD_32(owner_id); /* Free the owner ID only if it is valid */ if (acpi_gbl_owner_id_mask[index] & bit) { acpi_gbl_owner_id_mask[index] ^= bit; } else { ACPI_ERROR((AE_INFO, "Attempted release of non-allocated OwnerId: 0x%3.3X", owner_id + 1)); } (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); return_VOID; }
linux-master
drivers/acpi/acpica/utownerid.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: utresdecode - Resource descriptor keyword strings * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utresdecode") #if defined (ACPI_DEBUG_OUTPUT) || \ defined (ACPI_DISASSEMBLER) || \ defined (ACPI_DEBUGGER) /* * Strings used to decode resource descriptors. * Used by both the disassembler and the debugger resource dump routines */ const char *acpi_gbl_bm_decode[] = { "NotBusMaster", "BusMaster" }; const char *acpi_gbl_config_decode[] = { "0 - Good Configuration", "1 - Acceptable Configuration", "2 - Suboptimal Configuration", "3 - ***Invalid Configuration***", }; const char *acpi_gbl_consume_decode[] = { "ResourceProducer", "ResourceConsumer" }; const char *acpi_gbl_dec_decode[] = { "PosDecode", "SubDecode" }; const char *acpi_gbl_he_decode[] = { "Level", "Edge" }; const char *acpi_gbl_io_decode[] = { "Decode10", "Decode16" }; const char *acpi_gbl_ll_decode[] = { "ActiveHigh", "ActiveLow", "ActiveBoth", "Reserved" }; const char *acpi_gbl_max_decode[] = { "MaxNotFixed", "MaxFixed" }; const char *acpi_gbl_mem_decode[] = { "NonCacheable", "Cacheable", "WriteCombining", "Prefetchable" }; const char *acpi_gbl_min_decode[] = { "MinNotFixed", "MinFixed" }; const char *acpi_gbl_mtp_decode[] = { "AddressRangeMemory", "AddressRangeReserved", "AddressRangeACPI", "AddressRangeNVS" }; const char *acpi_gbl_phy_decode[] = { "Type C", "Type D", "Unknown Type", "Unknown Type" }; const char *acpi_gbl_rng_decode[] = { "InvalidRanges", "NonISAOnlyRanges", "ISAOnlyRanges", "EntireRange" }; const char *acpi_gbl_rw_decode[] = { "ReadOnly", "ReadWrite" }; const char *acpi_gbl_shr_decode[] = { "Exclusive", "Shared", "ExclusiveAndWake", /* ACPI 5.0 */ "SharedAndWake" /* ACPI 5.0 */ }; const char *acpi_gbl_siz_decode[] = { "Transfer8", "Transfer8_16", "Transfer16", "InvalidSize" }; const char *acpi_gbl_trs_decode[] = { "DenseTranslation", "SparseTranslation" }; const char *acpi_gbl_ttp_decode[] = { "TypeStatic", "TypeTranslation" }; const char *acpi_gbl_typ_decode[] = { "Compatibility", "TypeA", "TypeB", "TypeF" }; const char *acpi_gbl_ppc_decode[] = { "PullDefault", "PullUp", "PullDown", "PullNone" }; const char *acpi_gbl_ior_decode[] = { "IoRestrictionNone", "IoRestrictionInputOnly", "IoRestrictionOutputOnly", "IoRestrictionNoneAndPreserve" }; const char *acpi_gbl_dts_decode[] = { "Width8bit", "Width16bit", "Width32bit", "Width64bit", "Width128bit", "Width256bit", }; /* GPIO connection type */ const char *acpi_gbl_ct_decode[] = { "Interrupt", "I/O" }; /* Serial bus type */ const char *acpi_gbl_sbt_decode[] = { "/* UNKNOWN serial bus type */", "I2C", "SPI", "UART", "CSI2" }; /* I2C serial bus access mode */ const char *acpi_gbl_am_decode[] = { "AddressingMode7Bit", "AddressingMode10Bit" }; /* I2C serial bus slave mode */ const char *acpi_gbl_sm_decode[] = { "ControllerInitiated", "DeviceInitiated" }; /* SPI serial bus wire mode */ const char *acpi_gbl_wm_decode[] = { "FourWireMode", "ThreeWireMode" }; /* SPI serial clock phase */ const char *acpi_gbl_cph_decode[] = { "ClockPhaseFirst", "ClockPhaseSecond" }; /* SPI serial bus clock polarity */ const char *acpi_gbl_cpo_decode[] = { "ClockPolarityLow", "ClockPolarityHigh" }; /* SPI serial bus device polarity */ const char *acpi_gbl_dp_decode[] = { "PolarityLow", "PolarityHigh" }; /* UART serial bus endian */ const char *acpi_gbl_ed_decode[] = { "LittleEndian", "BigEndian" }; /* UART serial bus bits per byte */ const char *acpi_gbl_bpb_decode[] = { "DataBitsFive", "DataBitsSix", "DataBitsSeven", "DataBitsEight", "DataBitsNine", "/* UNKNOWN Bits per byte */", "/* UNKNOWN Bits per byte */", "/* UNKNOWN Bits per byte */" }; /* UART serial bus stop bits */ const char *acpi_gbl_sb_decode[] = { "StopBitsZero", "StopBitsOne", "StopBitsOnePlusHalf", "StopBitsTwo" }; /* UART serial bus flow control */ const char *acpi_gbl_fc_decode[] = { "FlowControlNone", "FlowControlHardware", "FlowControlXON", "/* UNKNOWN flow control keyword */" }; /* UART serial bus parity type */ const char *acpi_gbl_pt_decode[] = { "ParityTypeNone", "ParityTypeEven", "ParityTypeOdd", "ParityTypeMark", "ParityTypeSpace", "/* UNKNOWN parity keyword */", "/* UNKNOWN parity keyword */", "/* UNKNOWN parity keyword */" }; /* pin_config type */ const char *acpi_gbl_ptyp_decode[] = { "Default", "Bias Pull-up", "Bias Pull-down", "Bias Default", "Bias Disable", "Bias High Impedance", "Bias Bus Hold", "Drive Open Drain", "Drive Open Source", "Drive Push Pull", "Drive Strength", "Slew Rate", "Input Debounce", "Input Schmitt Trigger", }; const char *acpi_gbl_clock_input_mode[] = { "Fixed", "Variable", }; const char *acpi_gbl_clock_input_scale[] = { "Hz", "KHz", "MHz", }; #endif
linux-master
drivers/acpi/acpica/utresdecode.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: dswscope - Scope stack manipulation * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acdispat.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dswscope") /**************************************************************************** * * FUNCTION: acpi_ds_scope_stack_clear * * PARAMETERS: walk_state - Current state * * RETURN: None * * DESCRIPTION: Pop (and free) everything on the scope stack except the * root scope object (which remains at the stack top.) * ***************************************************************************/ void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state) { union acpi_generic_state *scope_info; ACPI_FUNCTION_NAME(ds_scope_stack_clear); while (walk_state->scope_info) { /* Pop a scope off the stack */ scope_info = walk_state->scope_info; walk_state->scope_info = scope_info->scope.next; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Popped object type (%s)\n", acpi_ut_get_type_name(scope_info->common. value))); acpi_ut_delete_generic_state(scope_info); } } /**************************************************************************** * * FUNCTION: acpi_ds_scope_stack_push * * PARAMETERS: node - Name to be made current * type - Type of frame being pushed * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Push the current scope on the scope stack, and make the * passed Node current. * ***************************************************************************/ acpi_status acpi_ds_scope_stack_push(struct acpi_namespace_node *node, acpi_object_type type, struct acpi_walk_state *walk_state) { union acpi_generic_state *scope_info; union acpi_generic_state *old_scope_info; ACPI_FUNCTION_TRACE(ds_scope_stack_push); if (!node) { /* Invalid scope */ ACPI_ERROR((AE_INFO, "Null scope parameter")); return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Make sure object type is valid */ if (!acpi_ut_valid_object_type(type)) { ACPI_WARNING((AE_INFO, "Invalid object type: 0x%X", type)); } /* Allocate a new scope object */ scope_info = acpi_ut_create_generic_state(); if (!scope_info) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Init new scope object */ scope_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_WSCOPE; scope_info->scope.node = node; scope_info->common.value = (u16) type; walk_state->scope_depth++; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%.2d] Pushed scope ", (u32) walk_state->scope_depth)); old_scope_info = walk_state->scope_info; if (old_scope_info) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[%4.4s] (%s)", acpi_ut_get_node_name(old_scope_info-> scope.node), acpi_ut_get_type_name(old_scope_info-> common.value))); } else { ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, ACPI_NAMESPACE_ROOT)); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, ", New scope -> [%4.4s] (%s)\n", acpi_ut_get_node_name(scope_info->scope.node), acpi_ut_get_type_name(scope_info->common.value))); /* Push new scope object onto stack */ acpi_ut_push_generic_state(&walk_state->scope_info, scope_info); return_ACPI_STATUS(AE_OK); } /**************************************************************************** * * FUNCTION: acpi_ds_scope_stack_pop * * PARAMETERS: walk_state - Current state * * RETURN: Status * * DESCRIPTION: Pop the scope stack once. * ***************************************************************************/ acpi_status acpi_ds_scope_stack_pop(struct acpi_walk_state *walk_state) { union acpi_generic_state *scope_info; union acpi_generic_state *new_scope_info; ACPI_FUNCTION_TRACE(ds_scope_stack_pop); /* * Pop scope info object off the stack. */ scope_info = acpi_ut_pop_generic_state(&walk_state->scope_info); if (!scope_info) { return_ACPI_STATUS(AE_STACK_UNDERFLOW); } walk_state->scope_depth--; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%.2d] Popped scope [%4.4s] (%s), New scope -> ", (u32) walk_state->scope_depth, acpi_ut_get_node_name(scope_info->scope.node), acpi_ut_get_type_name(scope_info->common.value))); new_scope_info = walk_state->scope_info; if (new_scope_info) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "[%4.4s] (%s)\n", acpi_ut_get_node_name(new_scope_info-> scope.node), acpi_ut_get_type_name(new_scope_info-> common.value))); } else { ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "%s\n", ACPI_NAMESPACE_ROOT)); } acpi_ut_delete_generic_state(scope_info); return_ACPI_STATUS(AE_OK); }
linux-master
drivers/acpi/acpica/dswscope.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: tbprint - Table output utilities * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" #include "acutils.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbprint") /* Local prototypes */ static void acpi_tb_fix_string(char *string, acpi_size length); static void acpi_tb_cleanup_table_header(struct acpi_table_header *out_header, struct acpi_table_header *header); /******************************************************************************* * * FUNCTION: acpi_tb_fix_string * * PARAMETERS: string - String to be repaired * length - Maximum length * * RETURN: None * * DESCRIPTION: Replace every non-printable or non-ascii byte in the string * with a question mark '?'. * ******************************************************************************/ static void acpi_tb_fix_string(char *string, acpi_size length) { while (length && *string) { if (!isprint((int)(u8)*string)) { *string = '?'; } string++; length--; } } /******************************************************************************* * * FUNCTION: acpi_tb_cleanup_table_header * * PARAMETERS: out_header - Where the cleaned header is returned * header - Input ACPI table header * * RETURN: Returns the cleaned header in out_header * * DESCRIPTION: Copy the table header and ensure that all "string" fields in * the header consist of printable characters. * ******************************************************************************/ static void acpi_tb_cleanup_table_header(struct acpi_table_header *out_header, struct acpi_table_header *header) { memcpy(out_header, header, sizeof(struct acpi_table_header)); acpi_tb_fix_string(out_header->signature, ACPI_NAMESEG_SIZE); acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE); acpi_tb_fix_string(out_header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE); acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAMESEG_SIZE); } /******************************************************************************* * * FUNCTION: acpi_tb_print_table_header * * PARAMETERS: address - Table physical address * header - Table header * * RETURN: None * * DESCRIPTION: Print an ACPI table header. Special cases for FACS and RSDP. * ******************************************************************************/ void acpi_tb_print_table_header(acpi_physical_address address, struct acpi_table_header *header) { struct acpi_table_header local_header; if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) { /* FACS only has signature and length fields */ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X", header->signature, ACPI_FORMAT_UINT64(address), header->length)); } else if (ACPI_VALIDATE_RSDP_SIG(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->signature)) { /* RSDP has no common fields */ memcpy(local_header.oem_id, ACPI_CAST_PTR(struct acpi_table_rsdp, header)->oem_id, ACPI_OEM_ID_SIZE); acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE); ACPI_INFO(("RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)", ACPI_FORMAT_UINT64(address), (ACPI_CAST_PTR(struct acpi_table_rsdp, header)-> revision > 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp, header)->length : 20, ACPI_CAST_PTR(struct acpi_table_rsdp, header)->revision, local_header.oem_id)); } else { /* Standard ACPI table with full common header */ acpi_tb_cleanup_table_header(&local_header, header); ACPI_INFO(("%-4.4s 0x%8.8X%8.8X" " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)", local_header.signature, ACPI_FORMAT_UINT64(address), local_header.length, local_header.revision, local_header.oem_id, local_header.oem_table_id, local_header.oem_revision, local_header.asl_compiler_id, local_header.asl_compiler_revision)); } }
linux-master
drivers/acpi/acpica/tbprint.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: dbdisply - debug display commands * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acdispat.h" #include "acnamesp.h" #include "acparser.h" #include "acinterp.h" #include "acevents.h" #include "acdebug.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbdisply") /* Local prototypes */ static void acpi_db_dump_parser_descriptor(union acpi_parse_object *op); static void *acpi_db_get_pointer(void *target); static acpi_status acpi_db_display_non_root_handlers(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value); /* * System handler information. * Used for Handlers command, in acpi_db_display_handlers. */ #define ACPI_PREDEFINED_PREFIX "%25s (%.2X) : " #define ACPI_HANDLER_NAME_STRING "%30s : " #define ACPI_HANDLER_PRESENT_STRING "%-9s (%p)\n" #define ACPI_HANDLER_PRESENT_STRING2 "%-9s (%p)" #define ACPI_HANDLER_NOT_PRESENT_STRING "%-9s\n" /* All predefined Address Space IDs */ static acpi_adr_space_type acpi_gbl_space_id_list[] = { ACPI_ADR_SPACE_SYSTEM_MEMORY, ACPI_ADR_SPACE_SYSTEM_IO, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_ADR_SPACE_EC, ACPI_ADR_SPACE_SMBUS, ACPI_ADR_SPACE_CMOS, ACPI_ADR_SPACE_PCI_BAR_TARGET, ACPI_ADR_SPACE_IPMI, ACPI_ADR_SPACE_GPIO, ACPI_ADR_SPACE_GSBUS, ACPI_ADR_SPACE_PLATFORM_COMM, ACPI_ADR_SPACE_PLATFORM_RT, ACPI_ADR_SPACE_DATA_TABLE, ACPI_ADR_SPACE_FIXED_HARDWARE }; /* Global handler information */ typedef struct acpi_handler_info { void *handler; char *name; } acpi_handler_info; static struct acpi_handler_info acpi_gbl_handler_list[] = { {&acpi_gbl_global_notify[0].handler, "System Notifications"}, {&acpi_gbl_global_notify[1].handler, "Device Notifications"}, {&acpi_gbl_table_handler, "ACPI Table Events"}, {&acpi_gbl_exception_handler, "Control Method Exceptions"}, {&acpi_gbl_interface_handler, "OSI Invocations"} }; /******************************************************************************* * * FUNCTION: acpi_db_get_pointer * * PARAMETERS: target - Pointer to string to be converted * * RETURN: Converted pointer * * DESCRIPTION: Convert an ascii pointer value to a real value * ******************************************************************************/ static void *acpi_db_get_pointer(void *target) { void *obj_ptr; acpi_size address; address = strtoul(target, NULL, 16); obj_ptr = ACPI_TO_POINTER(address); return (obj_ptr); } /******************************************************************************* * * FUNCTION: acpi_db_dump_parser_descriptor * * PARAMETERS: op - A parser Op descriptor * * RETURN: None * * DESCRIPTION: Display a formatted parser object * ******************************************************************************/ static void acpi_db_dump_parser_descriptor(union acpi_parse_object *op) { const struct acpi_opcode_info *info; info = acpi_ps_get_opcode_info(op->common.aml_opcode); acpi_os_printf("Parser Op Descriptor:\n"); acpi_os_printf("%20.20s : %4.4X\n", "Opcode", op->common.aml_opcode); ACPI_DEBUG_ONLY_MEMBERS(acpi_os_printf("%20.20s : %s\n", "Opcode Name", info->name)); acpi_os_printf("%20.20s : %p\n", "Value/ArgList", op->common.value.arg); acpi_os_printf("%20.20s : %p\n", "Parent", op->common.parent); acpi_os_printf("%20.20s : %p\n", "NextOp", op->common.next); } /******************************************************************************* * * FUNCTION: acpi_db_decode_and_display_object * * PARAMETERS: target - String with object to be displayed. Names * and hex pointers are supported. * output_type - Byte, Word, Dword, or Qword (B|W|D|Q) * * RETURN: None * * DESCRIPTION: Display a formatted ACPI object * ******************************************************************************/ void acpi_db_decode_and_display_object(char *target, char *output_type) { void *obj_ptr; struct acpi_namespace_node *node; union acpi_operand_object *obj_desc; u32 display = DB_BYTE_DISPLAY; char buffer[80]; struct acpi_buffer ret_buf; acpi_status status; u32 size; if (!target) { return; } /* Decode the output type */ if (output_type) { acpi_ut_strupr(output_type); if (output_type[0] == 'W') { display = DB_WORD_DISPLAY; } else if (output_type[0] == 'D') { display = DB_DWORD_DISPLAY; } else if (output_type[0] == 'Q') { display = DB_QWORD_DISPLAY; } } ret_buf.length = sizeof(buffer); ret_buf.pointer = buffer; /* Differentiate between a number and a name */ if ((target[0] >= 0x30) && (target[0] <= 0x39)) { obj_ptr = acpi_db_get_pointer(target); if (!acpi_os_readable(obj_ptr, 16)) { acpi_os_printf ("Address %p is invalid in this address space\n", obj_ptr); return; } /* Decode the object type */ switch (ACPI_GET_DESCRIPTOR_TYPE(obj_ptr)) { case ACPI_DESC_TYPE_NAMED: /* This is a namespace Node */ if (!acpi_os_readable (obj_ptr, sizeof(struct acpi_namespace_node))) { acpi_os_printf ("Cannot read entire Named object at address %p\n", obj_ptr); return; } node = obj_ptr; goto dump_node; case ACPI_DESC_TYPE_OPERAND: /* This is a ACPI OPERAND OBJECT */ if (!acpi_os_readable (obj_ptr, sizeof(union acpi_operand_object))) { acpi_os_printf ("Cannot read entire ACPI object at address %p\n", obj_ptr); return; } acpi_ut_debug_dump_buffer(obj_ptr, sizeof(union acpi_operand_object), display, ACPI_UINT32_MAX); acpi_ex_dump_object_descriptor(obj_ptr, 1); break; case ACPI_DESC_TYPE_PARSER: /* This is a Parser Op object */ if (!acpi_os_readable (obj_ptr, sizeof(union acpi_parse_object))) { acpi_os_printf ("Cannot read entire Parser object at address %p\n", obj_ptr); return; } acpi_ut_debug_dump_buffer(obj_ptr, sizeof(union acpi_parse_object), display, ACPI_UINT32_MAX); acpi_db_dump_parser_descriptor((union acpi_parse_object *)obj_ptr); break; default: /* Is not a recognizable object */ acpi_os_printf ("Not a known ACPI internal object, descriptor type %2.2X\n", ACPI_GET_DESCRIPTOR_TYPE(obj_ptr)); size = 16; if (acpi_os_readable(obj_ptr, 64)) { size = 64; } /* Just dump some memory */ acpi_ut_debug_dump_buffer(obj_ptr, size, display, ACPI_UINT32_MAX); break; } return; } /* The parameter is a name string that must be resolved to a Named obj */ node = acpi_db_local_ns_lookup(target); if (!node) { return; } dump_node: /* Now dump the NS node */ status = acpi_get_name(node, ACPI_FULL_PATHNAME_NO_TRAILING, &ret_buf); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not convert name to pathname\n"); } else { acpi_os_printf("Object %p: Namespace Node - Pathname: %s\n", node, (char *)ret_buf.pointer); } if (!acpi_os_readable(node, sizeof(struct acpi_namespace_node))) { acpi_os_printf("Invalid Named object at address %p\n", node); return; } acpi_ut_debug_dump_buffer((void *)node, sizeof(struct acpi_namespace_node), display, ACPI_UINT32_MAX); acpi_ex_dump_namespace_node(node, 1); obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { acpi_os_printf("\nAttached Object %p:", obj_desc); if (!acpi_os_readable (obj_desc, sizeof(union acpi_operand_object))) { acpi_os_printf ("Invalid internal ACPI Object at address %p\n", obj_desc); return; } if (ACPI_GET_DESCRIPTOR_TYPE(((struct acpi_namespace_node *) obj_desc)) == ACPI_DESC_TYPE_NAMED) { acpi_os_printf(" Namespace Node - "); status = acpi_get_name((struct acpi_namespace_node *) obj_desc, ACPI_FULL_PATHNAME_NO_TRAILING, &ret_buf); if (ACPI_FAILURE(status)) { acpi_os_printf ("Could not convert name to pathname\n"); } else { acpi_os_printf("Pathname: %s", (char *)ret_buf.pointer); } acpi_os_printf("\n"); acpi_ut_debug_dump_buffer((void *)obj_desc, sizeof(struct acpi_namespace_node), display, ACPI_UINT32_MAX); } else { acpi_os_printf("\n"); acpi_ut_debug_dump_buffer((void *)obj_desc, sizeof(union acpi_operand_object), display, ACPI_UINT32_MAX); } acpi_ex_dump_object_descriptor(obj_desc, 1); } } /******************************************************************************* * * FUNCTION: acpi_db_display_method_info * * PARAMETERS: start_op - Root of the control method parse tree * * RETURN: None * * DESCRIPTION: Display information about the current method * ******************************************************************************/ void acpi_db_display_method_info(union acpi_parse_object *start_op) { struct acpi_walk_state *walk_state; union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; union acpi_parse_object *root_op; union acpi_parse_object *op; const struct acpi_opcode_info *op_info; u32 num_ops = 0; u32 num_operands = 0; u32 num_operators = 0; u32 num_remaining_ops = 0; u32 num_remaining_operands = 0; u32 num_remaining_operators = 0; u8 count_remaining = FALSE; walk_state = acpi_ds_get_current_walk_state(acpi_gbl_current_walk_list); if (!walk_state) { acpi_os_printf("There is no method currently executing\n"); return; } obj_desc = walk_state->method_desc; node = walk_state->method_node; acpi_os_printf("Currently executing control method is [%4.4s]\n", acpi_ut_get_node_name(node)); acpi_os_printf("%X Arguments, SyncLevel = %X\n", (u32)obj_desc->method.param_count, (u32)obj_desc->method.sync_level); root_op = start_op; while (root_op->common.parent) { root_op = root_op->common.parent; } op = root_op; while (op) { if (op == start_op) { count_remaining = TRUE; } num_ops++; if (count_remaining) { num_remaining_ops++; } /* Decode the opcode */ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); switch (op_info->class) { case AML_CLASS_ARGUMENT: if (count_remaining) { num_remaining_operands++; } num_operands++; break; case AML_CLASS_UNKNOWN: /* Bad opcode or ASCII character */ continue; default: if (count_remaining) { num_remaining_operators++; } num_operators++; break; } op = acpi_ps_get_depth_next(start_op, op); } acpi_os_printf ("Method contains: %X AML Opcodes - %X Operators, %X Operands\n", num_ops, num_operators, num_operands); acpi_os_printf ("Remaining to execute: %X AML Opcodes - %X Operators, %X Operands\n", num_remaining_ops, num_remaining_operators, num_remaining_operands); } /******************************************************************************* * * FUNCTION: acpi_db_display_locals * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Display all locals for the currently running control method * ******************************************************************************/ void acpi_db_display_locals(void) { struct acpi_walk_state *walk_state; walk_state = acpi_ds_get_current_walk_state(acpi_gbl_current_walk_list); if (!walk_state) { acpi_os_printf("There is no method currently executing\n"); return; } acpi_db_decode_locals(walk_state); } /******************************************************************************* * * FUNCTION: acpi_db_display_arguments * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Display all arguments for the currently running control method * ******************************************************************************/ void acpi_db_display_arguments(void) { struct acpi_walk_state *walk_state; walk_state = acpi_ds_get_current_walk_state(acpi_gbl_current_walk_list); if (!walk_state) { acpi_os_printf("There is no method currently executing\n"); return; } acpi_db_decode_arguments(walk_state); } /******************************************************************************* * * FUNCTION: acpi_db_display_results * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Display current contents of a method result stack * ******************************************************************************/ void acpi_db_display_results(void) { u32 i; struct acpi_walk_state *walk_state; union acpi_operand_object *obj_desc; u32 result_count = 0; struct acpi_namespace_node *node; union acpi_generic_state *frame; u32 index; /* Index onto current frame */ walk_state = acpi_ds_get_current_walk_state(acpi_gbl_current_walk_list); if (!walk_state) { acpi_os_printf("There is no method currently executing\n"); return; } node = walk_state->method_node; if (walk_state->results) { result_count = walk_state->result_count; } acpi_os_printf("Method [%4.4s] has %X stacked result objects\n", acpi_ut_get_node_name(node), result_count); /* From the top element of result stack */ frame = walk_state->results; index = (result_count - 1) % ACPI_RESULTS_FRAME_OBJ_NUM; for (i = 0; i < result_count; i++) { obj_desc = frame->results.obj_desc[index]; acpi_os_printf("Result%u: ", i); acpi_db_display_internal_object(obj_desc, walk_state); if (index == 0) { frame = frame->results.next; index = ACPI_RESULTS_FRAME_OBJ_NUM; } index--; } } /******************************************************************************* * * FUNCTION: acpi_db_display_calling_tree * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Display current calling tree of nested control methods * ******************************************************************************/ void acpi_db_display_calling_tree(void) { struct acpi_walk_state *walk_state; struct acpi_namespace_node *node; walk_state = acpi_ds_get_current_walk_state(acpi_gbl_current_walk_list); if (!walk_state) { acpi_os_printf("There is no method currently executing\n"); return; } acpi_os_printf("Current Control Method Call Tree\n"); while (walk_state) { node = walk_state->method_node; acpi_os_printf(" [%4.4s]\n", acpi_ut_get_node_name(node)); walk_state = walk_state->next; } } /******************************************************************************* * * FUNCTION: acpi_db_display_object_type * * PARAMETERS: object_arg - User entered NS node handle * * RETURN: None * * DESCRIPTION: Display type of an arbitrary NS node * ******************************************************************************/ void acpi_db_display_object_type(char *object_arg) { acpi_size arg; acpi_handle handle; struct acpi_device_info *info; acpi_status status; u32 i; arg = strtoul(object_arg, NULL, 16); handle = ACPI_TO_POINTER(arg); status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not get object info, %s\n", acpi_format_exception(status)); return; } acpi_os_printf("ADR: %8.8X%8.8X, Flags: %X\n", ACPI_FORMAT_UINT64(info->address), info->flags); acpi_os_printf("S1D-%2.2X S2D-%2.2X S3D-%2.2X S4D-%2.2X\n", info->highest_dstates[0], info->highest_dstates[1], info->highest_dstates[2], info->highest_dstates[3]); acpi_os_printf("S0W-%2.2X S1W-%2.2X S2W-%2.2X S3W-%2.2X S4W-%2.2X\n", info->lowest_dstates[0], info->lowest_dstates[1], info->lowest_dstates[2], info->lowest_dstates[3], info->lowest_dstates[4]); if (info->valid & ACPI_VALID_HID) { acpi_os_printf("HID: %s\n", info->hardware_id.string); } if (info->valid & ACPI_VALID_UID) { acpi_os_printf("UID: %s\n", info->unique_id.string); } if (info->valid & ACPI_VALID_CID) { for (i = 0; i < info->compatible_id_list.count; i++) { acpi_os_printf("CID %u: %s\n", i, info->compatible_id_list.ids[i].string); } } ACPI_FREE(info); } /******************************************************************************* * * FUNCTION: acpi_db_display_result_object * * PARAMETERS: obj_desc - Object to be displayed * walk_state - Current walk state * * RETURN: None * * DESCRIPTION: Display the result of an AML opcode * * Note: Currently only displays the result object if we are single stepping. * However, this output may be useful in other contexts and could be enabled * to do so if needed. * ******************************************************************************/ void acpi_db_display_result_object(union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { #ifndef ACPI_APPLICATION if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) { return; } #endif /* Only display if single stepping */ if (!acpi_gbl_cm_single_step) { return; } acpi_os_printf("ResultObj: "); acpi_db_display_internal_object(obj_desc, walk_state); acpi_os_printf("\n"); } /******************************************************************************* * * FUNCTION: acpi_db_display_argument_object * * PARAMETERS: obj_desc - Object to be displayed * walk_state - Current walk state * * RETURN: None * * DESCRIPTION: Display the result of an AML opcode * ******************************************************************************/ void acpi_db_display_argument_object(union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { #ifndef ACPI_APPLICATION if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) { return; } #endif if (!acpi_gbl_cm_single_step) { return; } acpi_os_printf("ArgObj: "); acpi_db_display_internal_object(obj_desc, walk_state); } #if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_db_display_gpes * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Display the current GPE structures * ******************************************************************************/ void acpi_db_display_gpes(void) { struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_xrupt_info *gpe_xrupt_info; struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_register_info *gpe_register_info; char *gpe_type; struct acpi_gpe_notify_info *notify; u32 gpe_index; u32 block = 0; u32 i; u32 j; u32 count; char buffer[80]; struct acpi_buffer ret_buf; acpi_status status; ret_buf.length = sizeof(buffer); ret_buf.pointer = buffer; block = 0; /* Walk the GPE lists */ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; while (gpe_xrupt_info) { gpe_block = gpe_xrupt_info->gpe_block_list_head; while (gpe_block) { status = acpi_get_name(gpe_block->node, ACPI_FULL_PATHNAME_NO_TRAILING, &ret_buf); if (ACPI_FAILURE(status)) { acpi_os_printf ("Could not convert name to pathname\n"); } if (gpe_block->node == acpi_gbl_fadt_gpe_device) { gpe_type = "FADT-defined GPE block"; } else { gpe_type = "GPE Block Device"; } acpi_os_printf ("\nBlock %u - Info %p DeviceNode %p [%s] - %s\n", block, gpe_block, gpe_block->node, buffer, gpe_type); acpi_os_printf(" Registers: %u (%u GPEs)\n", gpe_block->register_count, gpe_block->gpe_count); acpi_os_printf (" GPE range: 0x%X to 0x%X on interrupt %u\n", gpe_block->block_base_number, gpe_block->block_base_number + (gpe_block->gpe_count - 1), gpe_xrupt_info->interrupt_number); acpi_os_printf (" RegisterInfo: %p Status %8.8X%8.8X Enable %8.8X%8.8X\n", gpe_block->register_info, ACPI_FORMAT_UINT64(gpe_block->register_info-> status_address.address), ACPI_FORMAT_UINT64(gpe_block->register_info-> enable_address.address)); acpi_os_printf(" EventInfo: %p\n", gpe_block->event_info); /* Examine each GPE Register within the block */ for (i = 0; i < gpe_block->register_count; i++) { gpe_register_info = &gpe_block->register_info[i]; acpi_os_printf(" Reg %u: (GPE %.2X-%.2X) " "RunEnable %2.2X WakeEnable %2.2X" " Status %8.8X%8.8X Enable %8.8X%8.8X\n", i, gpe_register_info-> base_gpe_number, gpe_register_info-> base_gpe_number + (ACPI_GPE_REGISTER_WIDTH - 1), gpe_register_info-> enable_for_run, gpe_register_info-> enable_for_wake, ACPI_FORMAT_UINT64 (gpe_register_info-> status_address.address), ACPI_FORMAT_UINT64 (gpe_register_info-> enable_address.address)); /* Now look at the individual GPEs in this byte register */ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; gpe_event_info = &gpe_block->event_info[gpe_index]; if (ACPI_GPE_DISPATCH_TYPE (gpe_event_info->flags) == ACPI_GPE_DISPATCH_NONE) { /* This GPE is not used (no method or handler), ignore it */ continue; } acpi_os_printf (" GPE %.2X: %p RunRefs %2.2X Flags %2.2X (", gpe_block->block_base_number + gpe_index, gpe_event_info, gpe_event_info->runtime_count, gpe_event_info->flags); /* Decode the flags byte */ if (gpe_event_info-> flags & ACPI_GPE_LEVEL_TRIGGERED) { acpi_os_printf("Level, "); } else { acpi_os_printf("Edge, "); } if (gpe_event_info-> flags & ACPI_GPE_CAN_WAKE) { acpi_os_printf("CanWake, "); } else { acpi_os_printf("RunOnly, "); } switch (ACPI_GPE_DISPATCH_TYPE (gpe_event_info->flags)) { case ACPI_GPE_DISPATCH_NONE: acpi_os_printf("NotUsed"); break; case ACPI_GPE_DISPATCH_METHOD: acpi_os_printf("Method"); break; case ACPI_GPE_DISPATCH_HANDLER: acpi_os_printf("Handler"); break; case ACPI_GPE_DISPATCH_NOTIFY: count = 0; notify = gpe_event_info->dispatch. notify_list; while (notify) { count++; notify = notify->next; } acpi_os_printf ("Implicit Notify on %u devices", count); break; case ACPI_GPE_DISPATCH_RAW_HANDLER: acpi_os_printf("RawHandler"); break; default: acpi_os_printf("UNKNOWN: %X", ACPI_GPE_DISPATCH_TYPE (gpe_event_info-> flags)); break; } acpi_os_printf(")\n"); } } block++; gpe_block = gpe_block->next; } gpe_xrupt_info = gpe_xrupt_info->next; } } #endif /* !ACPI_REDUCED_HARDWARE */ /******************************************************************************* * * FUNCTION: acpi_db_display_handlers * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Display the currently installed global handlers * ******************************************************************************/ void acpi_db_display_handlers(void) { union acpi_operand_object *obj_desc; union acpi_operand_object *handler_obj; acpi_adr_space_type space_id; u32 i; /* Operation region handlers */ acpi_os_printf("\nOperation Region Handlers at the namespace root:\n"); obj_desc = acpi_ns_get_attached_object(acpi_gbl_root_node); if (obj_desc) { for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_gbl_space_id_list); i++) { space_id = acpi_gbl_space_id_list[i]; acpi_os_printf(ACPI_PREDEFINED_PREFIX, acpi_ut_get_region_name((u8)space_id), space_id); handler_obj = acpi_ev_find_region_handler(space_id, obj_desc->common_notify. handler); if (handler_obj) { acpi_os_printf(ACPI_HANDLER_PRESENT_STRING, (handler_obj->address_space. handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) ? "Default" : "User", handler_obj->address_space. handler); goto found_handler; } /* There is no handler for this space_id */ acpi_os_printf("None\n"); found_handler: ; } /* Find all handlers for user-defined space_IDs */ handler_obj = obj_desc->common_notify.handler; while (handler_obj) { if (handler_obj->address_space.space_id >= ACPI_USER_REGION_BEGIN) { acpi_os_printf(ACPI_PREDEFINED_PREFIX, "User-defined ID", handler_obj->address_space. space_id); acpi_os_printf(ACPI_HANDLER_PRESENT_STRING, (handler_obj->address_space. handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) ? "Default" : "User", handler_obj->address_space. handler); } handler_obj = handler_obj->address_space.next; } } #if (!ACPI_REDUCED_HARDWARE) /* Fixed event handlers */ acpi_os_printf("\nFixed Event Handlers:\n"); for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { acpi_os_printf(ACPI_PREDEFINED_PREFIX, acpi_ut_get_event_name(i), i); if (acpi_gbl_fixed_event_handlers[i].handler) { acpi_os_printf(ACPI_HANDLER_PRESENT_STRING, "User", acpi_gbl_fixed_event_handlers[i]. handler); } else { acpi_os_printf(ACPI_HANDLER_NOT_PRESENT_STRING, "None"); } } #endif /* !ACPI_REDUCED_HARDWARE */ /* Miscellaneous global handlers */ acpi_os_printf("\nMiscellaneous Global Handlers:\n"); for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_gbl_handler_list); i++) { acpi_os_printf(ACPI_HANDLER_NAME_STRING, acpi_gbl_handler_list[i].name); if (acpi_gbl_handler_list[i].handler) { acpi_os_printf(ACPI_HANDLER_PRESENT_STRING, "User", acpi_gbl_handler_list[i].handler); } else { acpi_os_printf(ACPI_HANDLER_NOT_PRESENT_STRING, "None"); } } /* Other handlers that are installed throughout the namespace */ acpi_os_printf("\nOperation Region Handlers for specific devices:\n"); (void)acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_db_display_non_root_handlers, NULL, NULL, NULL); } /******************************************************************************* * * FUNCTION: acpi_db_display_non_root_handlers * * PARAMETERS: acpi_walk_callback * * RETURN: Status * * DESCRIPTION: Display information about all handlers installed for a * device object. * ******************************************************************************/ static acpi_status acpi_db_display_non_root_handlers(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { struct acpi_namespace_node *node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle); union acpi_operand_object *obj_desc; union acpi_operand_object *handler_obj; char *pathname; obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { return (AE_OK); } pathname = acpi_ns_get_normalized_pathname(node, TRUE); if (!pathname) { return (AE_OK); } /* Display all handlers associated with this device */ handler_obj = obj_desc->common_notify.handler; while (handler_obj) { acpi_os_printf(ACPI_PREDEFINED_PREFIX, acpi_ut_get_region_name((u8)handler_obj-> address_space.space_id), handler_obj->address_space.space_id); acpi_os_printf(ACPI_HANDLER_PRESENT_STRING2, (handler_obj->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) ? "Default" : "User", handler_obj->address_space.handler); acpi_os_printf(" Device Name: %s (%p)\n", pathname, node); handler_obj = handler_obj->address_space.next; } ACPI_FREE(pathname); return (AE_OK); }
linux-master
drivers/acpi/acpica/dbdisply.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exconcat - Concatenate-type AML operators * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlresrc.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exconcat") /* Local Prototypes */ static acpi_status acpi_ex_convert_to_object_type_string(union acpi_operand_object *obj_desc, union acpi_operand_object **result_desc); /******************************************************************************* * * FUNCTION: acpi_ex_do_concatenate * * PARAMETERS: operand0 - First source object * operand1 - Second source object * actual_return_desc - Where to place the return object * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Concatenate two objects with the ACPI-defined conversion * rules as necessary. * NOTE: * Per the ACPI spec (up to 6.1), Concatenate only supports Integer, * String, and Buffer objects. However, we support all objects here * as an extension. This improves the usefulness of both Concatenate * and the Printf/Fprintf macros. The extension returns a string * describing the object type for the other objects. * 02/2016. * ******************************************************************************/ acpi_status acpi_ex_do_concatenate(union acpi_operand_object *operand0, union acpi_operand_object *operand1, union acpi_operand_object **actual_return_desc, struct acpi_walk_state *walk_state) { union acpi_operand_object *local_operand0 = operand0; union acpi_operand_object *local_operand1 = operand1; union acpi_operand_object *temp_operand1 = NULL; union acpi_operand_object *return_desc; char *buffer; acpi_object_type operand0_type; acpi_object_type operand1_type; acpi_status status; ACPI_FUNCTION_TRACE(ex_do_concatenate); /* Operand 0 preprocessing */ switch (operand0->common.type) { case ACPI_TYPE_INTEGER: case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: operand0_type = operand0->common.type; break; default: /* For all other types, get the "object type" string */ status = acpi_ex_convert_to_object_type_string(operand0, &local_operand0); if (ACPI_FAILURE(status)) { goto cleanup; } operand0_type = ACPI_TYPE_STRING; break; } /* Operand 1 preprocessing */ switch (operand1->common.type) { case ACPI_TYPE_INTEGER: case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: operand1_type = operand1->common.type; break; default: /* For all other types, get the "object type" string */ status = acpi_ex_convert_to_object_type_string(operand1, &local_operand1); if (ACPI_FAILURE(status)) { goto cleanup; } operand1_type = ACPI_TYPE_STRING; break; } /* * Convert the second operand if necessary. The first operand (0) * determines the type of the second operand (1) (See the Data Types * section of the ACPI specification). Both object types are * guaranteed to be either Integer/String/Buffer by the operand * resolution mechanism. */ switch (operand0_type) { case ACPI_TYPE_INTEGER: status = acpi_ex_convert_to_integer(local_operand1, &temp_operand1, ACPI_IMPLICIT_CONVERSION); break; case ACPI_TYPE_BUFFER: status = acpi_ex_convert_to_buffer(local_operand1, &temp_operand1); break; case ACPI_TYPE_STRING: switch (operand1_type) { case ACPI_TYPE_INTEGER: case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: /* Other types have already been converted to string */ status = acpi_ex_convert_to_string(local_operand1, &temp_operand1, ACPI_IMPLICIT_CONVERT_HEX); break; default: status = AE_OK; break; } break; default: ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X", operand0->common.type)); status = AE_AML_INTERNAL; } if (ACPI_FAILURE(status)) { goto cleanup; } /* Take care with any newly created operand objects */ if ((local_operand1 != operand1) && (local_operand1 != temp_operand1)) { acpi_ut_remove_reference(local_operand1); } local_operand1 = temp_operand1; /* * Both operands are now known to be the same object type * (Both are Integer, String, or Buffer), and we can now perform * the concatenation. * * There are three cases to handle, as per the ACPI spec: * * 1) Two Integers concatenated to produce a new Buffer * 2) Two Strings concatenated to produce a new String * 3) Two Buffers concatenated to produce a new Buffer */ switch (operand0_type) { case ACPI_TYPE_INTEGER: /* Result of two Integers is a Buffer */ /* Need enough buffer space for two integers */ return_desc = acpi_ut_create_buffer_object((acpi_size) ACPI_MUL_2 (acpi_gbl_integer_byte_width)); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } buffer = (char *)return_desc->buffer.pointer; /* Copy the first integer, LSB first */ memcpy(buffer, &operand0->integer.value, acpi_gbl_integer_byte_width); /* Copy the second integer (LSB first) after the first */ memcpy(buffer + acpi_gbl_integer_byte_width, &local_operand1->integer.value, acpi_gbl_integer_byte_width); break; case ACPI_TYPE_STRING: /* Result of two Strings is a String */ return_desc = acpi_ut_create_string_object(((acpi_size) local_operand0-> string.length + local_operand1-> string.length)); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } buffer = return_desc->string.pointer; /* Concatenate the strings */ strcpy(buffer, local_operand0->string.pointer); strcat(buffer, local_operand1->string.pointer); break; case ACPI_TYPE_BUFFER: /* Result of two Buffers is a Buffer */ return_desc = acpi_ut_create_buffer_object(((acpi_size) operand0->buffer. length + local_operand1-> buffer.length)); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } buffer = (char *)return_desc->buffer.pointer; /* Concatenate the buffers */ memcpy(buffer, operand0->buffer.pointer, operand0->buffer.length); memcpy(buffer + operand0->buffer.length, local_operand1->buffer.pointer, local_operand1->buffer.length); break; default: /* Invalid object type, should not happen here */ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X", operand0->common.type)); status = AE_AML_INTERNAL; goto cleanup; } *actual_return_desc = return_desc; cleanup: if (local_operand0 != operand0) { acpi_ut_remove_reference(local_operand0); } if (local_operand1 != operand1) { acpi_ut_remove_reference(local_operand1); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_convert_to_object_type_string * * PARAMETERS: obj_desc - Object to be converted * return_desc - Where to place the return object * * RETURN: Status * * DESCRIPTION: Convert an object of arbitrary type to a string object that * contains the namestring for the object. Used for the * concatenate operator. * ******************************************************************************/ static acpi_status acpi_ex_convert_to_object_type_string(union acpi_operand_object *obj_desc, union acpi_operand_object **result_desc) { union acpi_operand_object *return_desc; const char *type_string; type_string = acpi_ut_get_type_name(obj_desc->common.type); return_desc = acpi_ut_create_string_object(((acpi_size)strlen(type_string) + 9)); /* 9 For "[ Object]" */ if (!return_desc) { return (AE_NO_MEMORY); } strcpy(return_desc->string.pointer, "["); strcat(return_desc->string.pointer, type_string); strcat(return_desc->string.pointer, " Object]"); *result_desc = return_desc; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_concat_template * * PARAMETERS: operand0 - First source object * operand1 - Second source object * actual_return_desc - Where to place the return object * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Concatenate two resource templates * ******************************************************************************/ acpi_status acpi_ex_concat_template(union acpi_operand_object *operand0, union acpi_operand_object *operand1, union acpi_operand_object **actual_return_desc, struct acpi_walk_state *walk_state) { acpi_status status; union acpi_operand_object *return_desc; u8 *new_buf; u8 *end_tag; acpi_size length0; acpi_size length1; acpi_size new_length; ACPI_FUNCTION_TRACE(ex_concat_template); /* * Find the end_tag descriptor in each resource template. * Note1: returned pointers point TO the end_tag, not past it. * Note2: zero-length buffers are allowed; treated like one end_tag */ /* Get the length of the first resource template */ status = acpi_ut_get_resource_end_tag(operand0, &end_tag); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer); /* Get the length of the second resource template */ status = acpi_ut_get_resource_end_tag(operand1, &end_tag); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer); /* Combine both lengths, minimum size will be 2 for end_tag */ new_length = length0 + length1 + sizeof(struct aml_resource_end_tag); /* Create a new buffer object for the result (with one end_tag) */ return_desc = acpi_ut_create_buffer_object(new_length); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* * Copy the templates to the new buffer, 0 first, then 1 follows. One * end_tag descriptor is copied from Operand1. */ new_buf = return_desc->buffer.pointer; memcpy(new_buf, operand0->buffer.pointer, length0); memcpy(new_buf + length0, operand1->buffer.pointer, length1); /* Insert end_tag and set the checksum to zero, means "ignore checksum" */ new_buf[new_length - 1] = 0; new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1; /* Return the completed resource template */ *actual_return_desc = return_desc; return_ACPI_STATUS(AE_OK); }
linux-master
drivers/acpi/acpica/exconcat.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: nsalloc - Namespace allocation and deletion utilities * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsalloc") /******************************************************************************* * * FUNCTION: acpi_ns_create_node * * PARAMETERS: name - Name of the new node (4 char ACPI name) * * RETURN: New namespace node (Null on failure) * * DESCRIPTION: Create a namespace node * ******************************************************************************/ struct acpi_namespace_node *acpi_ns_create_node(u32 name) { struct acpi_namespace_node *node; #ifdef ACPI_DBG_TRACK_ALLOCATIONS u32 temp; #endif ACPI_FUNCTION_TRACE(ns_create_node); node = acpi_os_acquire_object(acpi_gbl_namespace_cache); if (!node) { return_PTR(NULL); } ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++); #ifdef ACPI_DBG_TRACK_ALLOCATIONS temp = acpi_gbl_ns_node_list->total_allocated - acpi_gbl_ns_node_list->total_freed; if (temp > acpi_gbl_ns_node_list->max_occupied) { acpi_gbl_ns_node_list->max_occupied = temp; } #endif node->name.integer = name; ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED); return_PTR(node); } /******************************************************************************* * * FUNCTION: acpi_ns_delete_node * * PARAMETERS: node - Node to be deleted * * RETURN: None * * DESCRIPTION: Delete a namespace node. All node deletions must come through * here. Detaches any attached objects, including any attached * data. If a handler is associated with attached data, it is * invoked before the node is deleted. * ******************************************************************************/ void acpi_ns_delete_node(struct acpi_namespace_node *node) { union acpi_operand_object *obj_desc; union acpi_operand_object *next_desc; ACPI_FUNCTION_NAME(ns_delete_node); if (!node) { return_VOID; } /* Detach an object if there is one */ acpi_ns_detach_object(node); /* * Delete an attached data object list if present (objects that were * attached via acpi_attach_data). Note: After any normal object is * detached above, the only possible remaining object(s) are data * objects, in a linked list. */ obj_desc = node->object; while (obj_desc && (obj_desc->common.type == ACPI_TYPE_LOCAL_DATA)) { /* Invoke the attached data deletion handler if present */ if (obj_desc->data.handler) { obj_desc->data.handler(node, obj_desc->data.pointer); } next_desc = obj_desc->common.next_object; acpi_ut_remove_reference(obj_desc); obj_desc = next_desc; } /* Special case for the statically allocated root node */ if (node == acpi_gbl_root_node) { return; } /* Now we can delete the node */ (void)acpi_os_release_object(acpi_gbl_namespace_cache, node); ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++); ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Node %p, Remaining %X\n", node, acpi_gbl_current_node_count)); } /******************************************************************************* * * FUNCTION: acpi_ns_remove_node * * PARAMETERS: node - Node to be removed/deleted * * RETURN: None * * DESCRIPTION: Remove (unlink) and delete a namespace node * ******************************************************************************/ void acpi_ns_remove_node(struct acpi_namespace_node *node) { struct acpi_namespace_node *parent_node; struct acpi_namespace_node *prev_node; struct acpi_namespace_node *next_node; ACPI_FUNCTION_TRACE_PTR(ns_remove_node, node); parent_node = node->parent; prev_node = NULL; next_node = parent_node->child; /* Find the node that is the previous peer in the parent's child list */ while (next_node != node) { prev_node = next_node; next_node = next_node->peer; } if (prev_node) { /* Node is not first child, unlink it */ prev_node->peer = node->peer; } else { /* * Node is first child (has no previous peer). * Link peer list to parent */ parent_node->child = node->peer; } /* Delete the node and any attached objects */ acpi_ns_delete_node(node); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ns_install_node * * PARAMETERS: walk_state - Current state of the walk * parent_node - The parent of the new Node * node - The new Node to install * type - ACPI object type of the new Node * * RETURN: None * * DESCRIPTION: Initialize a new namespace node and install it amongst * its peers. * * Note: Current namespace lookup is linear search. This appears * to be sufficient as namespace searches consume only a small * fraction of the execution time of the ACPI subsystem. * ******************************************************************************/ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namespace_node *parent_node, /* Parent */ struct acpi_namespace_node *node, /* New Child */ acpi_object_type type) { acpi_owner_id owner_id = 0; struct acpi_namespace_node *child_node; ACPI_FUNCTION_TRACE(ns_install_node); if (walk_state) { /* * Get the owner ID from the Walk state. The owner ID is used to * track table deletion and deletion of objects created by methods. */ owner_id = walk_state->owner_id; if ((walk_state->method_desc) && (parent_node != walk_state->method_node)) { /* * A method is creating a new node that is not a child of the * method (it is non-local). Mark the executing method as having * modified the namespace. This is used for cleanup when the * method exits. */ walk_state->method_desc->method.info_flags |= ACPI_METHOD_MODIFIED_NAMESPACE; } } /* Link the new entry into the parent and existing children */ node->peer = NULL; node->parent = parent_node; child_node = parent_node->child; if (!child_node) { parent_node->child = node; } else { /* Add node to the end of the peer list */ while (child_node->peer) { child_node = child_node->peer; } child_node->peer = node; } /* Init the new entry */ node->owner_id = owner_id; node->type = (u8) type; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%4.4s (%s) [Node %p Owner %3.3X] added to %4.4s (%s) [Node %p]\n", acpi_ut_get_node_name(node), acpi_ut_get_type_name(node->type), node, owner_id, acpi_ut_get_node_name(parent_node), acpi_ut_get_type_name(parent_node->type), parent_node)); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ns_delete_children * * PARAMETERS: parent_node - Delete this objects children * * RETURN: None. * * DESCRIPTION: Delete all children of the parent object. In other words, * deletes a "scope". * ******************************************************************************/ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node) { struct acpi_namespace_node *next_node; struct acpi_namespace_node *node_to_delete; ACPI_FUNCTION_TRACE_PTR(ns_delete_children, parent_node); if (!parent_node) { return_VOID; } /* Deallocate all children at this level */ next_node = parent_node->child; while (next_node) { /* Grandchildren should have all been deleted already */ if (next_node->child) { ACPI_ERROR((AE_INFO, "Found a grandchild! P=%p C=%p", parent_node, next_node)); } /* * Delete this child node and move on to the next child in the list. * No need to unlink the node since we are deleting the entire branch. */ node_to_delete = next_node; next_node = next_node->peer; acpi_ns_delete_node(node_to_delete); } /* Clear the parent's child pointer */ parent_node->child = NULL; return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ns_delete_namespace_subtree * * PARAMETERS: parent_node - Root of the subtree to be deleted * * RETURN: None. * * DESCRIPTION: Delete a subtree of the namespace. This includes all objects * stored within the subtree. * ******************************************************************************/ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node) { struct acpi_namespace_node *child_node = NULL; u32 level = 1; acpi_status status; ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree); if (!parent_node) { return_VOID; } /* Lock namespace for possible update */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_VOID; } /* * Traverse the tree of objects until we bubble back up * to where we started. */ while (level > 0) { /* Get the next node in this scope (NULL if none) */ child_node = acpi_ns_get_next_node(parent_node, child_node); if (child_node) { /* Found a child node - detach any attached object */ acpi_ns_detach_object(child_node); /* Check if this node has any children */ if (child_node->child) { /* * There is at least one child of this node, * visit the node */ level++; parent_node = child_node; child_node = NULL; } } else { /* * No more children of this parent node. * Move up to the grandparent. */ level--; /* * Now delete all of the children of this parent * all at the same time. */ acpi_ns_delete_children(parent_node); /* New "last child" is this parent node */ child_node = parent_node; /* Move up the tree to the grandparent */ parent_node = parent_node->parent; } } (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ns_delete_namespace_by_owner * * PARAMETERS: owner_id - All nodes with this owner will be deleted * * RETURN: Status * * DESCRIPTION: Delete entries within the namespace that are owned by a * specific ID. Used to delete entire ACPI tables. All * reference counts are updated. * * MUTEX: Locks namespace during deletion walk. * ******************************************************************************/ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id) { struct acpi_namespace_node *child_node; struct acpi_namespace_node *deletion_node; struct acpi_namespace_node *parent_node; u32 level; acpi_status status; ACPI_FUNCTION_TRACE_U32(ns_delete_namespace_by_owner, owner_id); if (owner_id == 0) { return_VOID; } /* Lock namespace for possible update */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_VOID; } deletion_node = NULL; parent_node = acpi_gbl_root_node; child_node = NULL; level = 1; /* * Traverse the tree of nodes until we bubble back up * to where we started. */ while (level > 0) { /* * Get the next child of this parent node. When child_node is NULL, * the first child of the parent is returned */ child_node = acpi_ns_get_next_node(parent_node, child_node); if (deletion_node) { acpi_ns_delete_children(deletion_node); acpi_ns_remove_node(deletion_node); deletion_node = NULL; } if (child_node) { if (child_node->owner_id == owner_id) { /* Found a matching child node - detach any attached object */ acpi_ns_detach_object(child_node); } /* Check if this node has any children */ if (child_node->child) { /* * There is at least one child of this node, * visit the node */ level++; parent_node = child_node; child_node = NULL; } else if (child_node->owner_id == owner_id) { deletion_node = child_node; } } else { /* * No more children of this parent node. * Move up to the grandparent. */ level--; if (level != 0) { if (parent_node->owner_id == owner_id) { deletion_node = parent_node; } } /* New "last child" is this parent node */ child_node = parent_node; /* Move up the tree to the grandparent */ parent_node = parent_node->parent; } } (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_VOID; }
linux-master
drivers/acpi/acpica/nsalloc.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the * extended FADT-V5 sleep registers. * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwesleep") /******************************************************************************* * * FUNCTION: acpi_hw_execute_sleep_method * * PARAMETERS: method_pathname - Pathname of method to execute * integer_argument - Argument to pass to the method * * RETURN: None * * DESCRIPTION: Execute a sleep/wake related method with one integer argument * and no return value. * ******************************************************************************/ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument) { struct acpi_object_list arg_list; union acpi_object arg; acpi_status status; ACPI_FUNCTION_TRACE(hw_execute_sleep_method); /* One argument, integer_argument; No return value expected */ arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = (u64)integer_argument; status = acpi_evaluate_object(NULL, method_pathname, &arg_list, NULL); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { ACPI_EXCEPTION((AE_INFO, status, "While executing method %s", method_pathname)); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_hw_extended_sleep * * PARAMETERS: sleep_state - Which sleep state to enter * * RETURN: Status * * DESCRIPTION: Enter a system sleep state via the extended FADT sleep * registers (V5 FADT). * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ acpi_status acpi_hw_extended_sleep(u8 sleep_state) { acpi_status status; u8 sleep_control; u64 sleep_status; ACPI_FUNCTION_TRACE(hw_extended_sleep); /* Extended sleep registers must be valid */ if (!acpi_gbl_FADT.sleep_control.address || !acpi_gbl_FADT.sleep_status.address) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Clear wake status (WAK_STS) */ status = acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_gbl_system_awake_and_running = FALSE; /* * Set the SLP_TYP and SLP_EN bits. * * Note: We only use the first value returned by the \_Sx method * (acpi_gbl_sleep_type_a) - As per ACPI specification. */ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Entering sleep state [S%u]\n", sleep_state)); sleep_control = ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & ACPI_X_SLEEP_TYPE_MASK) | ACPI_X_SLEEP_ENABLE; /* Flush caches, as per ACPI specification */ if (sleep_state < ACPI_STATE_S4) { ACPI_FLUSH_CPU_CACHE(); } status = acpi_os_enter_sleep(sleep_state, sleep_control, 0); if (status == AE_CTRL_TERMINATE) { return_ACPI_STATUS(AE_OK); } if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_write((u64)sleep_control, &acpi_gbl_FADT.sleep_control); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Wait for transition back to Working State */ do { status = acpi_read(&sleep_status, &acpi_gbl_FADT.sleep_status); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } while (!(((u8)sleep_status) & ACPI_X_WAKE_STATUS)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_hw_extended_wake_prep * * PARAMETERS: sleep_state - Which sleep state we just exited * * RETURN: Status * * DESCRIPTION: Perform first part of OS-independent ACPI cleanup after * a sleep. Called with interrupts ENABLED. * ******************************************************************************/ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state) { u8 sleep_type_value; ACPI_FUNCTION_TRACE(hw_extended_wake_prep); if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) { sleep_type_value = ((acpi_gbl_sleep_type_a_s0 << ACPI_X_SLEEP_TYPE_POSITION) & ACPI_X_SLEEP_TYPE_MASK); (void)acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE), &acpi_gbl_FADT.sleep_control); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_hw_extended_wake * * PARAMETERS: sleep_state - Which sleep state we just exited * * RETURN: Status * * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep * Called with interrupts ENABLED. * ******************************************************************************/ acpi_status acpi_hw_extended_wake(u8 sleep_state) { ACPI_FUNCTION_TRACE(hw_extended_wake); /* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */ acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID; /* Execute the wake methods */ acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING); acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state); /* * Some BIOS code assumes that WAK_STS will be cleared on resume * and use it to determine whether the system is rebooting or * resuming. Clear WAK_STS for compatibility. */ (void)acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status); acpi_gbl_system_awake_and_running = TRUE; acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING); return_ACPI_STATUS(AE_OK); }
linux-master
drivers/acpi/acpica/hwesleep.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: utexcep - Exception code support * ******************************************************************************/ #define EXPORT_ACPI_INTERFACES #define ACPI_DEFINE_EXCEPTION_TABLE #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utexcep") /******************************************************************************* * * FUNCTION: acpi_format_exception * * PARAMETERS: status - The acpi_status code to be formatted * * RETURN: A string containing the exception text. A valid pointer is * always returned. * * DESCRIPTION: This function translates an ACPI exception into an ASCII * string. Returns "unknown status" string for invalid codes. * ******************************************************************************/ const char *acpi_format_exception(acpi_status status) { const struct acpi_exception_info *exception; ACPI_FUNCTION_ENTRY(); exception = acpi_ut_validate_exception(status); if (!exception) { /* Exception code was not recognized */ ACPI_ERROR((AE_INFO, "Unknown exception code: 0x%8.8X", status)); return ("UNKNOWN_STATUS_CODE"); } return (exception->name); } ACPI_EXPORT_SYMBOL(acpi_format_exception) /******************************************************************************* * * FUNCTION: acpi_ut_validate_exception * * PARAMETERS: status - The acpi_status code to be formatted * * RETURN: A string containing the exception text. NULL if exception is * not valid. * * DESCRIPTION: This function validates and translates an ACPI exception into * an ASCII string. * ******************************************************************************/ const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status status) { u32 sub_status; const struct acpi_exception_info *exception = NULL; ACPI_FUNCTION_ENTRY(); /* * Status is composed of two parts, a "type" and an actual code */ sub_status = (status & ~AE_CODE_MASK); switch (status & AE_CODE_MASK) { case AE_CODE_ENVIRONMENTAL: if (sub_status <= AE_CODE_ENV_MAX) { exception = &acpi_gbl_exception_names_env[sub_status]; } break; case AE_CODE_PROGRAMMER: if (sub_status <= AE_CODE_PGM_MAX) { exception = &acpi_gbl_exception_names_pgm[sub_status]; } break; case AE_CODE_ACPI_TABLES: if (sub_status <= AE_CODE_TBL_MAX) { exception = &acpi_gbl_exception_names_tbl[sub_status]; } break; case AE_CODE_AML: if (sub_status <= AE_CODE_AML_MAX) { exception = &acpi_gbl_exception_names_aml[sub_status]; } break; case AE_CODE_CONTROL: if (sub_status <= AE_CODE_CTRL_MAX) { exception = &acpi_gbl_exception_names_ctrl[sub_status]; } break; default: break; } if (!exception || !exception->name) { return (NULL); } return (exception); }
linux-master
drivers/acpi/acpica/utexcep.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: dswexec - Dispatcher method execution callbacks; * dispatch to interpreter. * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "amlcode.h" #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" #include "acdebug.h" #ifdef ACPI_EXEC_APP #include "aecommon.h" #endif #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dswexec") /* * Dispatch table for opcode classes */ static acpi_execute_op acpi_gbl_op_type_dispatch[] = { acpi_ex_opcode_0A_0T_1R, acpi_ex_opcode_1A_0T_0R, acpi_ex_opcode_1A_0T_1R, NULL, /* Was: acpi_ex_opcode_1A_0T_0R (Was for Load operator) */ acpi_ex_opcode_1A_1T_1R, acpi_ex_opcode_2A_0T_0R, acpi_ex_opcode_2A_0T_1R, acpi_ex_opcode_2A_1T_1R, acpi_ex_opcode_2A_2T_1R, acpi_ex_opcode_3A_0T_0R, acpi_ex_opcode_3A_1T_1R, acpi_ex_opcode_6A_0T_1R }; /***************************************************************************** * * FUNCTION: acpi_ds_get_predicate_value * * PARAMETERS: walk_state - Current state of the parse tree walk * result_obj - if non-zero, pop result from result stack * * RETURN: Status * * DESCRIPTION: Get the result of a predicate evaluation * ****************************************************************************/ acpi_status acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, union acpi_operand_object *result_obj) { acpi_status status = AE_OK; union acpi_operand_object *obj_desc; union acpi_operand_object *local_obj_desc = NULL; ACPI_FUNCTION_TRACE_PTR(ds_get_predicate_value, walk_state); walk_state->control_state->common.state = 0; if (result_obj) { status = acpi_ds_result_pop(&obj_desc, walk_state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not get result from predicate evaluation")); return_ACPI_STATUS(status); } } else { status = acpi_ds_create_operand(walk_state, walk_state->op, 0); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_ex_resolve_to_value(&walk_state->operands[0], walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } obj_desc = walk_state->operands[0]; } if (!obj_desc) { ACPI_ERROR((AE_INFO, "No predicate ObjDesc=%p State=%p", obj_desc, walk_state)); return_ACPI_STATUS(AE_AML_NO_OPERAND); } /* * Result of predicate evaluation must be an Integer * object. Implicitly convert the argument if necessary. */ status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc, ACPI_IMPLICIT_CONVERSION); if (ACPI_FAILURE(status)) { goto cleanup; } if (local_obj_desc->common.type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "Bad predicate (not an integer) ObjDesc=%p State=%p Type=0x%X", obj_desc, walk_state, obj_desc->common.type)); status = AE_AML_OPERAND_TYPE; goto cleanup; } /* Truncate the predicate to 32-bits if necessary */ (void)acpi_ex_truncate_for32bit_table(local_obj_desc); /* * Save the result of the predicate evaluation on * the control stack */ if (local_obj_desc->integer.value) { walk_state->control_state->common.value = TRUE; } else { /* * Predicate is FALSE, we will just toss the * rest of the package */ walk_state->control_state->common.value = FALSE; status = AE_CTRL_FALSE; } /* Predicate can be used for an implicit return value */ (void)acpi_ds_do_implicit_return(local_obj_desc, walk_state, TRUE); cleanup: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Completed a predicate eval=%X Op=%p\n", walk_state->control_state->common.value, walk_state->op)); /* Break to debugger to display result */ acpi_db_display_result_object(local_obj_desc, walk_state); /* * Delete the predicate result object (we know that * we don't need it anymore) */ if (local_obj_desc != obj_desc) { acpi_ut_remove_reference(local_obj_desc); } acpi_ut_remove_reference(obj_desc); walk_state->control_state->common.state = ACPI_CONTROL_NORMAL; return_ACPI_STATUS(status); } /***************************************************************************** * * FUNCTION: acpi_ds_exec_begin_op * * PARAMETERS: walk_state - Current state of the parse tree walk * out_op - Where to return op if a new one is created * * RETURN: Status * * DESCRIPTION: Descending callback used during the execution of control * methods. This is where most operators and operands are * dispatched to the interpreter. * ****************************************************************************/ acpi_status acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, union acpi_parse_object **out_op) { union acpi_parse_object *op; acpi_status status = AE_OK; u32 opcode_class; ACPI_FUNCTION_TRACE_PTR(ds_exec_begin_op, walk_state); op = walk_state->op; if (!op) { status = acpi_ds_load2_begin_op(walk_state, out_op); if (ACPI_FAILURE(status)) { goto error_exit; } op = *out_op; walk_state->op = op; walk_state->opcode = op->common.aml_opcode; walk_state->op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); if (acpi_ns_opens_scope(walk_state->op_info->object_type)) { ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "(%s) Popping scope for Op %p\n", acpi_ut_get_type_name(walk_state-> op_info-> object_type), op)); status = acpi_ds_scope_stack_pop(walk_state); if (ACPI_FAILURE(status)) { goto error_exit; } } } if (op == walk_state->origin) { if (out_op) { *out_op = op; } return_ACPI_STATUS(AE_OK); } /* * If the previous opcode was a conditional, this opcode * must be the beginning of the associated predicate. * Save this knowledge in the current scope descriptor */ if ((walk_state->control_state) && (walk_state->control_state->common.state == ACPI_CONTROL_CONDITIONAL_EXECUTING)) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Exec predicate Op=%p State=%p\n", op, walk_state)); walk_state->control_state->common.state = ACPI_CONTROL_PREDICATE_EXECUTING; /* Save start of predicate */ walk_state->control_state->control.predicate_op = op; } opcode_class = walk_state->op_info->class; /* We want to send namepaths to the load code */ if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) { opcode_class = AML_CLASS_NAMED_OBJECT; } /* * Handle the opcode based upon the opcode type */ switch (opcode_class) { case AML_CLASS_CONTROL: status = acpi_ds_exec_begin_control_op(walk_state, op); break; case AML_CLASS_NAMED_OBJECT: if (walk_state->walk_type & ACPI_WALK_METHOD) { /* * Found a named object declaration during method execution; * we must enter this object into the namespace. The created * object is temporary and will be deleted upon completion of * the execution of this method. * * Note 10/2010: Except for the Scope() op. This opcode does * not actually create a new object, it refers to an existing * object. However, for Scope(), we want to indeed open a * new scope. */ if (op->common.aml_opcode != AML_SCOPE_OP) { status = acpi_ds_load2_begin_op(walk_state, NULL); } else { status = acpi_ds_scope_stack_push(op->named.node, op->named.node-> type, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } } break; case AML_CLASS_EXECUTE: case AML_CLASS_CREATE: break; default: break; } /* Nothing to do here during method execution */ return_ACPI_STATUS(status); error_exit: status = acpi_ds_method_error(status, walk_state); return_ACPI_STATUS(status); } /***************************************************************************** * * FUNCTION: acpi_ds_exec_end_op * * PARAMETERS: walk_state - Current state of the parse tree walk * * RETURN: Status * * DESCRIPTION: Ascending callback used during the execution of control * methods. The only thing we really need to do here is to * notice the beginning of IF, ELSE, and WHILE blocks. * ****************************************************************************/ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) { union acpi_parse_object *op; acpi_status status = AE_OK; u32 op_type; u32 op_class; union acpi_parse_object *next_op; union acpi_parse_object *first_arg; #ifdef ACPI_EXEC_APP char *namepath; union acpi_operand_object *obj_desc; #endif ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state); op = walk_state->op; op_type = walk_state->op_info->type; op_class = walk_state->op_info->class; if (op_class == AML_CLASS_UNKNOWN) { ACPI_ERROR((AE_INFO, "Unknown opcode 0x%X", op->common.aml_opcode)); return_ACPI_STATUS(AE_NOT_IMPLEMENTED); } first_arg = op->common.value.arg; /* Init the walk state */ walk_state->num_operands = 0; walk_state->operand_index = 0; walk_state->return_desc = NULL; walk_state->result_obj = NULL; /* Call debugger for single step support (DEBUG build only) */ status = acpi_db_single_step(walk_state, op, op_class); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Decode the Opcode Class */ switch (op_class) { case AML_CLASS_ARGUMENT: /* Constants, literals, etc. */ if (walk_state->opcode == AML_INT_NAMEPATH_OP) { status = acpi_ds_evaluate_name_path(walk_state); if (ACPI_FAILURE(status)) { goto cleanup; } } break; case AML_CLASS_EXECUTE: /* Most operators with arguments */ /* Build resolved operand stack */ status = acpi_ds_create_operands(walk_state, first_arg); if (ACPI_FAILURE(status)) { goto cleanup; } /* * All opcodes require operand resolution, with the only exceptions * being the object_type and size_of operators as well as opcodes that * take no arguments. */ if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE) && (walk_state->op_info->flags & AML_HAS_ARGS)) { /* Resolve all operands */ status = acpi_ex_resolve_operands(walk_state->opcode, &(walk_state-> operands [walk_state-> num_operands - 1]), walk_state); } if (ACPI_SUCCESS(status)) { /* * Dispatch the request to the appropriate interpreter handler * routine. There is one routine per opcode "type" based upon the * number of opcode arguments and return type. */ status = acpi_gbl_op_type_dispatch[op_type] (walk_state); } else { /* * Treat constructs of the form "Store(LocalX,LocalX)" as noops when the * Local is uninitialized. */ if ((status == AE_AML_UNINITIALIZED_LOCAL) && (walk_state->opcode == AML_STORE_OP) && (walk_state->operands[0]->common.type == ACPI_TYPE_LOCAL_REFERENCE) && (walk_state->operands[1]->common.type == ACPI_TYPE_LOCAL_REFERENCE) && (walk_state->operands[0]->reference.class == walk_state->operands[1]->reference.class) && (walk_state->operands[0]->reference.value == walk_state->operands[1]->reference.value)) { status = AE_OK; } else { ACPI_EXCEPTION((AE_INFO, status, "While resolving operands for [%s]", acpi_ps_get_opcode_name (walk_state->opcode))); } } /* Always delete the argument objects and clear the operand stack */ acpi_ds_clear_operands(walk_state); /* * If a result object was returned from above, push it on the * current result stack */ if (ACPI_SUCCESS(status) && walk_state->result_obj) { status = acpi_ds_result_push(walk_state->result_obj, walk_state); } break; default: switch (op_type) { case AML_TYPE_CONTROL: /* Type 1 opcode, IF/ELSE/WHILE/NOOP */ /* 1 Operand, 0 external_result, 0 internal_result */ status = acpi_ds_exec_end_control_op(walk_state, op); break; case AML_TYPE_METHOD_CALL: /* * If the method is referenced from within a package * declaration, it is not a invocation of the method, just * a reference to it. */ if ((op->asl.parent) && ((op->asl.parent->asl.aml_opcode == AML_PACKAGE_OP) || (op->asl.parent->asl.aml_opcode == AML_VARIABLE_PACKAGE_OP))) { ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Method Reference in a Package, Op=%p\n", op)); op->common.node = (struct acpi_namespace_node *) op->asl.value.arg->asl.node; acpi_ut_add_reference(op->asl.value.arg->asl. node->object); return_ACPI_STATUS(AE_OK); } ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Method invocation, Op=%p\n", op)); /* * (AML_METHODCALL) Op->Asl.Value.Arg->Asl.Node contains * the method Node pointer */ /* next_op points to the op that holds the method name */ next_op = first_arg; /* next_op points to first argument op */ next_op = next_op->common.next; /* * Get the method's arguments and put them on the operand stack */ status = acpi_ds_create_operands(walk_state, next_op); if (ACPI_FAILURE(status)) { break; } /* * Since the operands will be passed to another control method, * we must resolve all local references here (Local variables, * arguments to *this* method, etc.) */ status = acpi_ds_resolve_operands(walk_state); if (ACPI_FAILURE(status)) { /* On error, clear all resolved operands */ acpi_ds_clear_operands(walk_state); break; } /* * Tell the walk loop to preempt this running method and * execute the new method */ status = AE_CTRL_TRANSFER; /* * Return now; we don't want to disturb anything, * especially the operand count! */ return_ACPI_STATUS(status); case AML_TYPE_CREATE_FIELD: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Executing CreateField Buffer/Index Op=%p\n", op)); status = acpi_ds_load2_end_op(walk_state); if (ACPI_FAILURE(status)) { break; } status = acpi_ds_eval_buffer_field_operands(walk_state, op); if (ACPI_FAILURE(status)) { break; } #ifdef ACPI_EXEC_APP /* * acpi_exec support for namespace initialization file (initialize * buffer_fields in this code.) */ namepath = acpi_ns_get_external_pathname(op->common.node); status = ae_lookup_init_file_entry(namepath, &obj_desc); if (ACPI_SUCCESS(status)) { status = acpi_ex_write_data_to_field(obj_desc, op->common. node->object, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While writing to buffer field")); } } ACPI_FREE(namepath); status = AE_OK; #endif break; case AML_TYPE_CREATE_OBJECT: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Executing CreateObject (Buffer/Package) Op=%p Child=%p ParentOpcode=%4.4X\n", op, op->named.value.arg, op->common.parent->common. aml_opcode)); switch (op->common.parent->common.aml_opcode) { case AML_NAME_OP: /* * Put the Node on the object stack (Contains the ACPI Name * of this object) */ walk_state->operands[0] = (void *) op->common.parent->common.node; walk_state->num_operands = 1; status = acpi_ds_create_node(walk_state, op->common.parent-> common.node, op->common.parent); if (ACPI_FAILURE(status)) { break; } ACPI_FALLTHROUGH; case AML_INT_EVAL_SUBTREE_OP: status = acpi_ds_eval_data_object_operands (walk_state, op, acpi_ns_get_attached_object(op->common. parent->common. node)); break; default: status = acpi_ds_eval_data_object_operands (walk_state, op, NULL); break; } /* * If a result object was returned from above, push it on the * current result stack */ if (walk_state->result_obj) { status = acpi_ds_result_push(walk_state->result_obj, walk_state); } break; case AML_TYPE_NAMED_FIELD: case AML_TYPE_NAMED_COMPLEX: case AML_TYPE_NAMED_SIMPLE: case AML_TYPE_NAMED_NO_OBJ: status = acpi_ds_load2_end_op(walk_state); if (ACPI_FAILURE(status)) { break; } if (op->common.aml_opcode == AML_REGION_OP) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Executing OpRegion Address/Length Op=%p\n", op)); status = acpi_ds_eval_region_operands(walk_state, op); if (ACPI_FAILURE(status)) { break; } } else if (op->common.aml_opcode == AML_DATA_REGION_OP) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Executing DataTableRegion Strings Op=%p\n", op)); status = acpi_ds_eval_table_region_operands (walk_state, op); if (ACPI_FAILURE(status)) { break; } } else if (op->common.aml_opcode == AML_BANK_FIELD_OP) { ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Executing BankField Op=%p\n", op)); status = acpi_ds_eval_bank_field_operands(walk_state, op); if (ACPI_FAILURE(status)) { break; } } break; case AML_TYPE_UNDEFINED: ACPI_ERROR((AE_INFO, "Undefined opcode type Op=%p", op)); return_ACPI_STATUS(AE_NOT_IMPLEMENTED); case AML_TYPE_BOGUS: ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Internal opcode=%X type Op=%p\n", walk_state->opcode, op)); break; default: ACPI_ERROR((AE_INFO, "Unimplemented opcode, class=0x%X " "type=0x%X Opcode=0x%X Op=%p", op_class, op_type, op->common.aml_opcode, op)); status = AE_NOT_IMPLEMENTED; break; } } /* * ACPI 2.0 support for 64-bit integers: Truncate numeric * result value if we are executing from a 32-bit ACPI table */ (void)acpi_ex_truncate_for32bit_table(walk_state->result_obj); /* * Check if we just completed the evaluation of a * conditional predicate */ if ((ACPI_SUCCESS(status)) && (walk_state->control_state) && (walk_state->control_state->common.state == ACPI_CONTROL_PREDICATE_EXECUTING) && (walk_state->control_state->control.predicate_op == op)) { status = acpi_ds_get_predicate_value(walk_state, walk_state->result_obj); walk_state->result_obj = NULL; } cleanup: if (walk_state->result_obj) { /* Break to debugger to display result */ acpi_db_display_result_object(walk_state->result_obj, walk_state); /* * Delete the result op if and only if: * Parent will not use the result -- such as any * non-nested type2 op in a method (parent will be method) */ acpi_ds_delete_result_if_not_used(op, walk_state->result_obj, walk_state); } #ifdef _UNDER_DEVELOPMENT if (walk_state->parser_state.aml == walk_state->parser_state.aml_end) { acpi_db_method_end(walk_state); } #endif /* Invoke exception handler on error */ if (ACPI_FAILURE(status)) { status = acpi_ds_method_error(status, walk_state); } /* Always clear the object stack */ walk_state->num_operands = 0; return_ACPI_STATUS(status); }
linux-master
drivers/acpi/acpica/dswexec.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: rsdump - AML debugger support for resource structures. * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rsdump") /* * All functions in this module are used by the AML Debugger only */ /* Local prototypes */ static void acpi_rs_out_string(const char *title, const char *value); static void acpi_rs_out_integer8(const char *title, u8 value); static void acpi_rs_out_integer16(const char *title, u16 value); static void acpi_rs_out_integer32(const char *title, u32 value); static void acpi_rs_out_integer64(const char *title, u64 value); static void acpi_rs_out_title(const char *title); static void acpi_rs_dump_byte_list(u16 length, u8 *data); static void acpi_rs_dump_word_list(u16 length, u16 *data); static void acpi_rs_dump_dword_list(u8 length, u32 *data); static void acpi_rs_dump_short_byte_list(u8 length, u8 *data); static void acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source); static void acpi_rs_dump_resource_label(char *title, struct acpi_resource_label *resource_label); static void acpi_rs_dump_address_common(union acpi_resource_data *resource); static void acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table); /******************************************************************************* * * FUNCTION: acpi_rs_dump_resource_list * * PARAMETERS: resource_list - Pointer to a resource descriptor list * * RETURN: None * * DESCRIPTION: Dispatches the structure to the correct dump routine. * ******************************************************************************/ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list) { u32 count = 0; u32 type; ACPI_FUNCTION_ENTRY(); /* Check if debug output enabled */ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) { return; } /* Walk list and dump all resource descriptors (END_TAG terminates) */ do { acpi_os_printf("\n[%02X] ", count); count++; /* Validate Type before dispatch */ type = resource_list->type; if (type > ACPI_RESOURCE_TYPE_MAX) { acpi_os_printf ("Invalid descriptor type (%X) in resource list\n", resource_list->type); return; } else if (!resource_list->type) { ACPI_ERROR((AE_INFO, "Invalid Zero Resource Type")); return; } /* Sanity check the length. It must not be zero, or we loop forever */ if (!resource_list->length) { acpi_os_printf ("Invalid zero length descriptor in resource list\n"); return; } /* Dump the resource descriptor */ if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { acpi_rs_dump_descriptor(&resource_list->data, acpi_gbl_dump_serial_bus_dispatch [resource_list->data. common_serial_bus.type]); } else { acpi_rs_dump_descriptor(&resource_list->data, acpi_gbl_dump_resource_dispatch [type]); } /* Point to the next resource structure */ resource_list = ACPI_NEXT_RESOURCE(resource_list); /* Exit when END_TAG descriptor is reached */ } while (type != ACPI_RESOURCE_TYPE_END_TAG); } /******************************************************************************* * * FUNCTION: acpi_rs_dump_irq_list * * PARAMETERS: route_table - Pointer to the routing table to dump. * * RETURN: None * * DESCRIPTION: Print IRQ routing table * ******************************************************************************/ void acpi_rs_dump_irq_list(u8 *route_table) { struct acpi_pci_routing_table *prt_element; u8 count; ACPI_FUNCTION_ENTRY(); /* Check if debug output enabled */ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_RESOURCES, _COMPONENT)) { return; } prt_element = ACPI_CAST_PTR(struct acpi_pci_routing_table, route_table); /* Dump all table elements, Exit on zero length element */ for (count = 0; prt_element->length; count++) { acpi_os_printf("\n[%02X] PCI IRQ Routing Table Package\n", count); acpi_rs_dump_descriptor(prt_element, acpi_rs_dump_prt); prt_element = ACPI_ADD_PTR(struct acpi_pci_routing_table, prt_element, prt_element->length); } } /******************************************************************************* * * FUNCTION: acpi_rs_dump_descriptor * * PARAMETERS: resource - Buffer containing the resource * table - Table entry to decode the resource * * RETURN: None * * DESCRIPTION: Dump a resource descriptor based on a dump table entry. * ******************************************************************************/ static void acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table) { u8 *target = NULL; u8 *previous_target; const char *name; u8 count; /* First table entry must contain the table length (# of table entries) */ count = table->offset; while (count) { previous_target = target; target = ACPI_ADD_PTR(u8, resource, table->offset); name = table->name; switch (table->opcode) { case ACPI_RSD_TITLE: /* * Optional resource title */ if (table->name) { acpi_os_printf("%s Resource\n", name); } break; /* Strings */ case ACPI_RSD_LITERAL: acpi_rs_out_string(name, ACPI_CAST_PTR(char, table->pointer)); break; case ACPI_RSD_STRING: acpi_rs_out_string(name, ACPI_CAST_PTR(char, target)); break; /* Data items, 8/16/32/64 bit */ case ACPI_RSD_UINT8: if (table->pointer) { acpi_rs_out_string(name, table->pointer[*target]); } else { acpi_rs_out_integer8(name, ACPI_GET8(target)); } break; case ACPI_RSD_UINT16: acpi_rs_out_integer16(name, ACPI_GET16(target)); break; case ACPI_RSD_UINT32: acpi_rs_out_integer32(name, ACPI_GET32(target)); break; case ACPI_RSD_UINT64: acpi_rs_out_integer64(name, ACPI_GET64(target)); break; /* Flags: 1-bit and 2-bit flags supported */ case ACPI_RSD_1BITFLAG: acpi_rs_out_string(name, table->pointer[*target & 0x01]); break; case ACPI_RSD_2BITFLAG: acpi_rs_out_string(name, table->pointer[*target & 0x03]); break; case ACPI_RSD_3BITFLAG: acpi_rs_out_string(name, table->pointer[*target & 0x07]); break; case ACPI_RSD_6BITFLAG: acpi_rs_out_integer8(name, (ACPI_GET8(target) & 0x3F)); break; case ACPI_RSD_SHORTLIST: /* * Short byte list (single line output) for DMA and IRQ resources * Note: The list length is obtained from the previous table entry */ if (previous_target) { acpi_rs_out_title(name); acpi_rs_dump_short_byte_list(*previous_target, target); } break; case ACPI_RSD_SHORTLISTX: /* * Short byte list (single line output) for GPIO vendor data * Note: The list length is obtained from the previous table entry */ if (previous_target) { acpi_rs_out_title(name); acpi_rs_dump_short_byte_list(*previous_target, * (ACPI_CAST_INDIRECT_PTR (u8, target))); } break; case ACPI_RSD_LONGLIST: /* * Long byte list for Vendor resource data * Note: The list length is obtained from the previous table entry */ if (previous_target) { acpi_rs_dump_byte_list(ACPI_GET16 (previous_target), target); } break; case ACPI_RSD_DWORDLIST: /* * Dword list for Extended Interrupt resources * Note: The list length is obtained from the previous table entry */ if (previous_target) { acpi_rs_dump_dword_list(*previous_target, ACPI_CAST_PTR(u32, target)); } break; case ACPI_RSD_WORDLIST: /* * Word list for GPIO Pin Table * Note: The list length is obtained from the previous table entry */ if (previous_target) { acpi_rs_dump_word_list(*previous_target, *(ACPI_CAST_INDIRECT_PTR (u16, target))); } break; case ACPI_RSD_ADDRESS: /* * Common flags for all Address resources */ acpi_rs_dump_address_common(ACPI_CAST_PTR (union acpi_resource_data, target)); break; case ACPI_RSD_SOURCE: /* * Optional resource_source for Address resources */ acpi_rs_dump_resource_source(ACPI_CAST_PTR (struct acpi_resource_source, target)); break; case ACPI_RSD_LABEL: /* * resource_label */ acpi_rs_dump_resource_label("Resource Label", ACPI_CAST_PTR(struct acpi_resource_label, target)); break; case ACPI_RSD_SOURCE_LABEL: /* * resource_source_label */ acpi_rs_dump_resource_label("Resource Source Label", ACPI_CAST_PTR(struct acpi_resource_label, target)); break; default: acpi_os_printf("**** Invalid table opcode [%X] ****\n", table->opcode); return; } table++; count--; } } /******************************************************************************* * * FUNCTION: acpi_rs_dump_resource_source * * PARAMETERS: resource_source - Pointer to a Resource Source struct * * RETURN: None * * DESCRIPTION: Common routine for dumping the optional resource_source and the * corresponding resource_source_index. * ******************************************************************************/ static void acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source) { ACPI_FUNCTION_ENTRY(); if (resource_source->index == 0xFF) { return; } acpi_rs_out_integer8("Resource Source Index", resource_source->index); acpi_rs_out_string("Resource Source", resource_source->string_ptr ? resource_source->string_ptr : "[Not Specified]"); } /******************************************************************************* * * FUNCTION: acpi_rs_dump_resource_label * * PARAMETERS: title - Title of the dumped resource field * resource_label - Pointer to a Resource Label struct * * RETURN: None * * DESCRIPTION: Common routine for dumping the resource_label * ******************************************************************************/ static void acpi_rs_dump_resource_label(char *title, struct acpi_resource_label *resource_label) { ACPI_FUNCTION_ENTRY(); acpi_rs_out_string(title, resource_label->string_ptr ? resource_label->string_ptr : "[Not Specified]"); } /******************************************************************************* * * FUNCTION: acpi_rs_dump_address_common * * PARAMETERS: resource - Pointer to an internal resource descriptor * * RETURN: None * * DESCRIPTION: Dump the fields that are common to all Address resource * descriptors * ******************************************************************************/ static void acpi_rs_dump_address_common(union acpi_resource_data *resource) { ACPI_FUNCTION_ENTRY(); /* Decode the type-specific flags */ switch (resource->address.resource_type) { case ACPI_MEMORY_RANGE: acpi_rs_dump_descriptor(resource, acpi_rs_dump_memory_flags); break; case ACPI_IO_RANGE: acpi_rs_dump_descriptor(resource, acpi_rs_dump_io_flags); break; case ACPI_BUS_NUMBER_RANGE: acpi_rs_out_string("Resource Type", "Bus Number Range"); break; default: acpi_rs_out_integer8("Resource Type", (u8) resource->address.resource_type); break; } /* Decode the general flags */ acpi_rs_dump_descriptor(resource, acpi_rs_dump_general_flags); } /******************************************************************************* * * FUNCTION: acpi_rs_out* * * PARAMETERS: title - Name of the resource field * value - Value of the resource field * * RETURN: None * * DESCRIPTION: Miscellaneous helper functions to consistently format the * output of the resource dump routines * ******************************************************************************/ static void acpi_rs_out_string(const char *title, const char *value) { acpi_os_printf("%27s : %s", title, value); if (!*value) { acpi_os_printf("[NULL NAMESTRING]"); } acpi_os_printf("\n"); } static void acpi_rs_out_integer8(const char *title, u8 value) { acpi_os_printf("%27s : %2.2X\n", title, value); } static void acpi_rs_out_integer16(const char *title, u16 value) { acpi_os_printf("%27s : %4.4X\n", title, value); } static void acpi_rs_out_integer32(const char *title, u32 value) { acpi_os_printf("%27s : %8.8X\n", title, value); } static void acpi_rs_out_integer64(const char *title, u64 value) { acpi_os_printf("%27s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value)); } static void acpi_rs_out_title(const char *title) { acpi_os_printf("%27s : ", title); } /******************************************************************************* * * FUNCTION: acpi_rs_dump*List * * PARAMETERS: length - Number of elements in the list * data - Start of the list * * RETURN: None * * DESCRIPTION: Miscellaneous functions to dump lists of raw data * ******************************************************************************/ static void acpi_rs_dump_byte_list(u16 length, u8 * data) { u16 i; for (i = 0; i < length; i++) { acpi_os_printf("%25s%2.2X : %2.2X\n", "Byte", i, data[i]); } } static void acpi_rs_dump_short_byte_list(u8 length, u8 * data) { u8 i; for (i = 0; i < length; i++) { acpi_os_printf("%X ", data[i]); } acpi_os_printf("\n"); } static void acpi_rs_dump_dword_list(u8 length, u32 * data) { u8 i; for (i = 0; i < length; i++) { acpi_os_printf("%25s%2.2X : %8.8X\n", "Dword", i, data[i]); } } static void acpi_rs_dump_word_list(u16 length, u16 *data) { u16 i; for (i = 0; i < length; i++) { acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]); } }
linux-master
drivers/acpi/acpica/rsdump.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: utmath - Integer math support routines * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utmath") /* Structures used only for 64-bit divide */ typedef struct uint64_struct { u32 lo; u32 hi; } uint64_struct; typedef union uint64_overlay { u64 full; struct uint64_struct part; } uint64_overlay; /* * Optional support for 64-bit double-precision integer multiply and shift. * This code is configurable and is implemented in order to support 32-bit * kernel environments where a 64-bit double-precision math library is not * available. */ #ifndef ACPI_USE_NATIVE_MATH64 /******************************************************************************* * * FUNCTION: acpi_ut_short_multiply * * PARAMETERS: multiplicand - 64-bit multiplicand * multiplier - 32-bit multiplier * out_product - Pointer to where the product is returned * * DESCRIPTION: Perform a short multiply. * ******************************************************************************/ acpi_status acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product) { union uint64_overlay multiplicand_ovl; union uint64_overlay product; u32 carry32; ACPI_FUNCTION_TRACE(ut_short_multiply); multiplicand_ovl.full = multiplicand; /* * The Product is 64 bits, the carry is always 32 bits, * and is generated by the second multiply. */ ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.hi, multiplier, product.part.hi, carry32); ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.lo, multiplier, product.part.lo, carry32); product.part.hi += carry32; /* Return only what was requested */ if (out_product) { *out_product = product.full; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_short_shift_left * * PARAMETERS: operand - 64-bit shift operand * count - 32-bit shift count * out_result - Pointer to where the result is returned * * DESCRIPTION: Perform a short left shift. * ******************************************************************************/ acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result) { union uint64_overlay operand_ovl; ACPI_FUNCTION_TRACE(ut_short_shift_left); operand_ovl.full = operand; if ((count & 63) >= 32) { operand_ovl.part.hi = operand_ovl.part.lo; operand_ovl.part.lo = 0; count = (count & 63) - 32; } ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi, operand_ovl.part.lo, count); /* Return only what was requested */ if (out_result) { *out_result = operand_ovl.full; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_short_shift_right * * PARAMETERS: operand - 64-bit shift operand * count - 32-bit shift count * out_result - Pointer to where the result is returned * * DESCRIPTION: Perform a short right shift. * ******************************************************************************/ acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result) { union uint64_overlay operand_ovl; ACPI_FUNCTION_TRACE(ut_short_shift_right); operand_ovl.full = operand; if ((count & 63) >= 32) { operand_ovl.part.lo = operand_ovl.part.hi; operand_ovl.part.hi = 0; count = (count & 63) - 32; } ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi, operand_ovl.part.lo, count); /* Return only what was requested */ if (out_result) { *out_result = operand_ovl.full; } return_ACPI_STATUS(AE_OK); } #else /******************************************************************************* * * FUNCTION: acpi_ut_short_multiply * * PARAMETERS: See function headers above * * DESCRIPTION: Native version of the ut_short_multiply function. * ******************************************************************************/ acpi_status acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product) { ACPI_FUNCTION_TRACE(ut_short_multiply); /* Return only what was requested */ if (out_product) { *out_product = multiplicand * multiplier; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_short_shift_left * * PARAMETERS: See function headers above * * DESCRIPTION: Native version of the ut_short_shift_left function. * ******************************************************************************/ acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result) { ACPI_FUNCTION_TRACE(ut_short_shift_left); /* Return only what was requested */ if (out_result) { *out_result = operand << count; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_short_shift_right * * PARAMETERS: See function headers above * * DESCRIPTION: Native version of the ut_short_shift_right function. * ******************************************************************************/ acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result) { ACPI_FUNCTION_TRACE(ut_short_shift_right); /* Return only what was requested */ if (out_result) { *out_result = operand >> count; } return_ACPI_STATUS(AE_OK); } #endif /* * Optional support for 64-bit double-precision integer divide. This code * is configurable and is implemented in order to support 32-bit kernel * environments where a 64-bit double-precision math library is not available. * * Support for a more normal 64-bit divide/modulo (with check for a divide- * by-zero) appears after this optional section of code. */ #ifndef ACPI_USE_NATIVE_DIVIDE /******************************************************************************* * * FUNCTION: acpi_ut_short_divide * * PARAMETERS: dividend - 64-bit dividend * divisor - 32-bit divisor * out_quotient - Pointer to where the quotient is returned * out_remainder - Pointer to where the remainder is returned * * RETURN: Status (Checks for divide-by-zero) * * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits) * divide and modulo. The result is a 64-bit quotient and a * 32-bit remainder. * ******************************************************************************/ acpi_status acpi_ut_short_divide(u64 dividend, u32 divisor, u64 *out_quotient, u32 *out_remainder) { union uint64_overlay dividend_ovl; union uint64_overlay quotient; u32 remainder32; ACPI_FUNCTION_TRACE(ut_short_divide); /* Always check for a zero divisor */ if (divisor == 0) { ACPI_ERROR((AE_INFO, "Divide by zero")); return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); } dividend_ovl.full = dividend; /* * The quotient is 64 bits, the remainder is always 32 bits, * and is generated by the second divide. */ ACPI_DIV_64_BY_32(0, dividend_ovl.part.hi, divisor, quotient.part.hi, remainder32); ACPI_DIV_64_BY_32(remainder32, dividend_ovl.part.lo, divisor, quotient.part.lo, remainder32); /* Return only what was requested */ if (out_quotient) { *out_quotient = quotient.full; } if (out_remainder) { *out_remainder = remainder32; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_divide * * PARAMETERS: in_dividend - Dividend * in_divisor - Divisor * out_quotient - Pointer to where the quotient is returned * out_remainder - Pointer to where the remainder is returned * * RETURN: Status (Checks for divide-by-zero) * * DESCRIPTION: Perform a divide and modulo. * ******************************************************************************/ acpi_status acpi_ut_divide(u64 in_dividend, u64 in_divisor, u64 *out_quotient, u64 *out_remainder) { union uint64_overlay dividend; union uint64_overlay divisor; union uint64_overlay quotient; union uint64_overlay remainder; union uint64_overlay normalized_dividend; union uint64_overlay normalized_divisor; u32 partial1; union uint64_overlay partial2; union uint64_overlay partial3; ACPI_FUNCTION_TRACE(ut_divide); /* Always check for a zero divisor */ if (in_divisor == 0) { ACPI_ERROR((AE_INFO, "Divide by zero")); return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); } divisor.full = in_divisor; dividend.full = in_dividend; if (divisor.part.hi == 0) { /* * 1) Simplest case is where the divisor is 32 bits, we can * just do two divides */ remainder.part.hi = 0; /* * The quotient is 64 bits, the remainder is always 32 bits, * and is generated by the second divide. */ ACPI_DIV_64_BY_32(0, dividend.part.hi, divisor.part.lo, quotient.part.hi, partial1); ACPI_DIV_64_BY_32(partial1, dividend.part.lo, divisor.part.lo, quotient.part.lo, remainder.part.lo); } else { /* * 2) The general case where the divisor is a full 64 bits * is more difficult */ quotient.part.hi = 0; normalized_dividend = dividend; normalized_divisor = divisor; /* Normalize the operands (shift until the divisor is < 32 bits) */ do { ACPI_SHIFT_RIGHT_64(normalized_divisor.part.hi, normalized_divisor.part.lo); ACPI_SHIFT_RIGHT_64(normalized_dividend.part.hi, normalized_dividend.part.lo); } while (normalized_divisor.part.hi != 0); /* Partial divide */ ACPI_DIV_64_BY_32(normalized_dividend.part.hi, normalized_dividend.part.lo, normalized_divisor.part.lo, quotient.part.lo, partial1); /* * The quotient is always 32 bits, and simply requires * adjustment. The 64-bit remainder must be generated. */ partial1 = quotient.part.lo * divisor.part.hi; partial2.full = (u64) quotient.part.lo * divisor.part.lo; partial3.full = (u64) partial2.part.hi + partial1; remainder.part.hi = partial3.part.lo; remainder.part.lo = partial2.part.lo; if (partial3.part.hi == 0) { if (partial3.part.lo >= dividend.part.hi) { if (partial3.part.lo == dividend.part.hi) { if (partial2.part.lo > dividend.part.lo) { quotient.part.lo--; remainder.full -= divisor.full; } } else { quotient.part.lo--; remainder.full -= divisor.full; } } remainder.full = remainder.full - dividend.full; remainder.part.hi = (u32)-((s32)remainder.part.hi); remainder.part.lo = (u32)-((s32)remainder.part.lo); if (remainder.part.lo) { remainder.part.hi--; } } } /* Return only what was requested */ if (out_quotient) { *out_quotient = quotient.full; } if (out_remainder) { *out_remainder = remainder.full; } return_ACPI_STATUS(AE_OK); } #else /******************************************************************************* * * FUNCTION: acpi_ut_short_divide, acpi_ut_divide * * PARAMETERS: See function headers above * * DESCRIPTION: Native versions of the ut_divide functions. Use these if either * 1) The target is a 64-bit platform and therefore 64-bit * integer math is supported directly by the machine. * 2) The target is a 32-bit or 16-bit platform, and the * double-precision integer math library is available to * perform the divide. * ******************************************************************************/ acpi_status acpi_ut_short_divide(u64 in_dividend, u32 divisor, u64 *out_quotient, u32 *out_remainder) { ACPI_FUNCTION_TRACE(ut_short_divide); /* Always check for a zero divisor */ if (divisor == 0) { ACPI_ERROR((AE_INFO, "Divide by zero")); return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); } /* Return only what was requested */ if (out_quotient) { *out_quotient = in_dividend / divisor; } if (out_remainder) { *out_remainder = (u32) (in_dividend % divisor); } return_ACPI_STATUS(AE_OK); } acpi_status acpi_ut_divide(u64 in_dividend, u64 in_divisor, u64 *out_quotient, u64 *out_remainder) { ACPI_FUNCTION_TRACE(ut_divide); /* Always check for a zero divisor */ if (in_divisor == 0) { ACPI_ERROR((AE_INFO, "Divide by zero")); return_ACPI_STATUS(AE_AML_DIVIDE_BY_ZERO); } /* Return only what was requested */ if (out_quotient) { *out_quotient = in_dividend / in_divisor; } if (out_remainder) { *out_remainder = in_dividend % in_divisor; } return_ACPI_STATUS(AE_OK); } #endif
linux-master
drivers/acpi/acpica/utmath.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: dspkginit - Completion of deferred package initialization * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "amlcode.h" #include "acdispat.h" #include "acinterp.h" #include "acparser.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("dspkginit") /* Local prototypes */ static void acpi_ds_resolve_package_element(union acpi_operand_object **element); /******************************************************************************* * * FUNCTION: acpi_ds_build_internal_package_obj * * PARAMETERS: walk_state - Current walk state * op - Parser object to be translated * element_count - Number of elements in the package - this is * the num_elements argument to Package() * obj_desc_ptr - Where the ACPI internal object is returned * * RETURN: Status * * DESCRIPTION: Translate a parser Op package object to the equivalent * namespace object * * NOTE: The number of elements in the package will be always be the num_elements * count, regardless of the number of elements in the package list. If * num_elements is smaller, only that many package list elements are used. * if num_elements is larger, the Package object is padded out with * objects of type Uninitialized (as per ACPI spec.) * * Even though the ASL compilers do not allow num_elements to be smaller * than the Package list length (for the fixed length package opcode), some * BIOS code modifies the AML on the fly to adjust the num_elements, and * this code compensates for that. This also provides compatibility with * other AML interpreters. * ******************************************************************************/ acpi_status acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, union acpi_parse_object *op, u32 element_count, union acpi_operand_object **obj_desc_ptr) { union acpi_parse_object *arg; union acpi_parse_object *parent; union acpi_operand_object *obj_desc = NULL; acpi_status status = AE_OK; u8 module_level_code = FALSE; u16 reference_count; u32 index; u32 i; ACPI_FUNCTION_TRACE(ds_build_internal_package_obj); /* Check if we are executing module level code */ if (walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL) { module_level_code = TRUE; } /* Find the parent of a possibly nested package */ parent = op->common.parent; while ((parent->common.aml_opcode == AML_PACKAGE_OP) || (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) { parent = parent->common.parent; } /* * If we are evaluating a Named package object of the form: * Name (xxxx, Package) * the package object already exists, otherwise it must be created. */ obj_desc = *obj_desc_ptr; if (!obj_desc) { obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE); *obj_desc_ptr = obj_desc; if (!obj_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } obj_desc->package.node = parent->common.node; } if (obj_desc->package.flags & AOPOBJ_DATA_VALID) { /* Just in case */ return_ACPI_STATUS(AE_OK); } /* * Allocate the element array (array of pointers to the individual * objects) if necessary. the count is based on the num_elements * parameter. Add an extra pointer slot so that the list is always * null terminated. */ if (!obj_desc->package.elements) { obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) element_count + 1) * sizeof(void *)); if (!obj_desc->package.elements) { acpi_ut_delete_object_desc(obj_desc); return_ACPI_STATUS(AE_NO_MEMORY); } obj_desc->package.count = element_count; } /* First arg is element count. Second arg begins the initializer list */ arg = op->common.value.arg; arg = arg->common.next; /* * If we are executing module-level code, we will defer the * full resolution of the package elements in order to support * forward references from the elements. This provides * compatibility with other ACPI implementations. */ if (module_level_code) { obj_desc->package.aml_start = walk_state->aml; obj_desc->package.aml_length = 0; ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE, "%s: Deferring resolution of Package elements\n", ACPI_GET_FUNCTION_NAME)); } /* * Initialize the elements of the package, up to the num_elements count. * Package is automatically padded with uninitialized (NULL) elements * if num_elements is greater than the package list length. Likewise, * Package is truncated if num_elements is less than the list length. */ for (i = 0; arg && (i < element_count); i++) { if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { if (!arg->common.node) { /* * This is the case where an expression has returned a value. * The use of expressions (term_args) within individual * package elements is not supported by the AML interpreter, * even though the ASL grammar supports it. Example: * * Name (INT1, 0x1234) * * Name (PKG3, Package () { * Add (INT1, 0xAAAA0000) * }) * * 1) No known AML interpreter supports this type of construct * 2) This fixes a fault if the construct is encountered */ ACPI_EXCEPTION((AE_INFO, AE_SUPPORT, "Expressions within package elements are not supported")); /* Cleanup the return object, it is not needed */ acpi_ut_remove_reference(walk_state->results-> results.obj_desc[0]); return_ACPI_STATUS(AE_SUPPORT); } if (arg->common.node->type == ACPI_TYPE_METHOD) { /* * A method reference "looks" to the parser to be a method * invocation, so we special case it here */ arg->common.aml_opcode = AML_INT_NAMEPATH_OP; status = acpi_ds_build_internal_object(walk_state, arg, &obj_desc-> package. elements[i]); } else { /* This package element is already built, just get it */ obj_desc->package.elements[i] = ACPI_CAST_PTR(union acpi_operand_object, arg->common.node); } } else { status = acpi_ds_build_internal_object(walk_state, arg, &obj_desc->package. elements[i]); if (status == AE_NOT_FOUND) { ACPI_ERROR((AE_INFO, "%-48s", "****DS namepath not found")); } if (!module_level_code) { /* * Initialize this package element. This function handles the * resolution of named references within the package. * Forward references from module-level code are deferred * until all ACPI tables are loaded. */ acpi_ds_init_package_element(0, obj_desc->package. elements[i], NULL, &obj_desc->package. elements[i]); } } if (*obj_desc_ptr) { /* Existing package, get existing reference count */ reference_count = (*obj_desc_ptr)->common.reference_count; if (reference_count > 1) { /* Make new element ref count match original ref count */ /* TBD: Probably need an acpi_ut_add_references function */ for (index = 0; index < ((u32)reference_count - 1); index++) { acpi_ut_add_reference((obj_desc-> package. elements[i])); } } } arg = arg->common.next; } /* Check for match between num_elements and actual length of package_list */ if (arg) { /* * num_elements was exhausted, but there are remaining elements in * the package_list. Truncate the package to num_elements. * * Note: technically, this is an error, from ACPI spec: "It is an * error for NumElements to be less than the number of elements in * the PackageList". However, we just print a message and no * exception is returned. This provides compatibility with other * ACPI implementations. Some firmware implementations will alter * the num_elements on the fly, possibly creating this type of * ill-formed package object. */ while (arg) { /* * We must delete any package elements that were created earlier * and are not going to be used because of the package truncation. */ if (arg->common.node) { acpi_ut_remove_reference(ACPI_CAST_PTR (union acpi_operand_object, arg->common.node)); arg->common.node = NULL; } /* Find out how many elements there really are */ i++; arg = arg->common.next; } ACPI_INFO(("Actual Package length (%u) is larger than " "NumElements field (%u), truncated", i, element_count)); } else if (i < element_count) { /* * Arg list (elements) was exhausted, but we did not reach * num_elements count. * * Note: this is not an error, the package is padded out * with NULLs as per the ACPI specification. */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, "%s: Package List length (%u) smaller than NumElements " "count (%u), padded with null elements\n", ACPI_GET_FUNCTION_NAME, i, element_count)); } /* Module-level packages will be resolved later */ if (!module_level_code) { obj_desc->package.flags |= AOPOBJ_DATA_VALID; } op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_init_package_element * * PARAMETERS: acpi_pkg_callback * * RETURN: Status * * DESCRIPTION: Resolve a named reference element within a package object * ******************************************************************************/ acpi_status acpi_ds_init_package_element(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context) { union acpi_operand_object **element_ptr; ACPI_FUNCTION_TRACE(ds_init_package_element); if (!source_object) { return_ACPI_STATUS(AE_OK); } /* * The following code is a bit of a hack to workaround a (current) * limitation of the acpi_pkg_callback interface. We need a pointer * to the location within the element array because a new object * may be created and stored there. */ if (context) { /* A direct call was made to this function */ element_ptr = (union acpi_operand_object **)context; } else { /* Call came from acpi_ut_walk_package_tree */ element_ptr = state->pkg.this_target_obj; } /* We are only interested in reference objects/elements */ if (source_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) { /* Attempt to resolve the (named) reference to a namespace node */ acpi_ds_resolve_package_element(element_ptr); } else if (source_object->common.type == ACPI_TYPE_PACKAGE) { source_object->package.flags |= AOPOBJ_DATA_VALID; } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_resolve_package_element * * PARAMETERS: element_ptr - Pointer to a reference object * * RETURN: Possible new element is stored to the indirect element_ptr * * DESCRIPTION: Resolve a package element that is a reference to a named * object. * ******************************************************************************/ static void acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr) { acpi_status status; acpi_status status2; union acpi_generic_state scope_info; union acpi_operand_object *element = *element_ptr; struct acpi_namespace_node *resolved_node; struct acpi_namespace_node *original_node; char *external_path = ""; acpi_object_type type; ACPI_FUNCTION_TRACE(ds_resolve_package_element); /* Check if reference element is already resolved */ if (element->reference.resolved) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE, "%s: Package element is already resolved\n", ACPI_GET_FUNCTION_NAME)); return_VOID; } /* Element must be a reference object of correct type */ scope_info.scope.node = element->reference.node; /* Prefix node */ status = acpi_ns_lookup(&scope_info, (char *)element->reference.aml, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, NULL, &resolved_node); if (ACPI_FAILURE(status)) { if ((status == AE_NOT_FOUND) && acpi_gbl_ignore_package_resolution_errors) { /* * Optionally be silent about the NOT_FOUND case for the referenced * name. Although this is potentially a serious problem, * it can generate a lot of noise/errors on platforms whose * firmware carries around a bunch of unused Package objects. * To disable these errors, set this global to TRUE: * acpi_gbl_ignore_package_resolution_errors * * If the AML actually tries to use such a package, the unresolved * element(s) will be replaced with NULL elements. */ /* Referenced name not found, set the element to NULL */ acpi_ut_remove_reference(*element_ptr); *element_ptr = NULL; return_VOID; } status2 = acpi_ns_externalize_name(ACPI_UINT32_MAX, (char *)element->reference. aml, NULL, &external_path); ACPI_EXCEPTION((AE_INFO, status, "While resolving a named reference package element - %s", external_path)); if (ACPI_SUCCESS(status2)) { ACPI_FREE(external_path); } /* Could not resolve name, set the element to NULL */ acpi_ut_remove_reference(*element_ptr); *element_ptr = NULL; return_VOID; } else if (resolved_node->type == ACPI_TYPE_ANY) { /* Named reference not resolved, return a NULL package element */ ACPI_ERROR((AE_INFO, "Could not resolve named package element [%4.4s] in [%4.4s]", resolved_node->name.ascii, scope_info.scope.node->name.ascii)); *element_ptr = NULL; return_VOID; } /* * Special handling for Alias objects. We need resolved_node to point * to the Alias target. This effectively "resolves" the alias. */ if (resolved_node->type == ACPI_TYPE_LOCAL_ALIAS) { resolved_node = ACPI_CAST_PTR(struct acpi_namespace_node, resolved_node->object); } /* Update the reference object */ element->reference.resolved = TRUE; element->reference.node = resolved_node; type = element->reference.node->type; /* * Attempt to resolve the node to a value before we insert it into * the package. If this is a reference to a common data type, * resolve it immediately. According to the ACPI spec, package * elements can only be "data objects" or method references. * Attempt to resolve to an Integer, Buffer, String or Package. * If cannot, return the named reference (for things like Devices, * Methods, etc.) Buffer Fields and Fields will resolve to simple * objects (int/buf/str/pkg). * * NOTE: References to things like Devices, Methods, Mutexes, etc. * will remain as named references. This behavior is not described * in the ACPI spec, but it appears to be an oversight. */ original_node = resolved_node; status = acpi_ex_resolve_node_to_value(&resolved_node, NULL); if (ACPI_FAILURE(status)) { return_VOID; } switch (type) { /* * These object types are a result of named references, so we will * leave them as reference objects. In other words, these types * have no intrinsic "value". */ case ACPI_TYPE_DEVICE: case ACPI_TYPE_THERMAL: case ACPI_TYPE_METHOD: break; case ACPI_TYPE_MUTEX: case ACPI_TYPE_POWER: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_EVENT: case ACPI_TYPE_REGION: /* acpi_ex_resolve_node_to_value gave these an extra reference */ acpi_ut_remove_reference(original_node->object); break; default: /* * For all other types - the node was resolved to an actual * operand object with a value, return the object. Remove * a reference on the existing object. */ acpi_ut_remove_reference(element); *element_ptr = (union acpi_operand_object *)resolved_node; break; } return_VOID; }
linux-master
drivers/acpi/acpica/dspkginit.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: dbxface - AML Debugger external interfaces * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acdebug.h" #include "acinterp.h" #include "acparser.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbxface") /* Local prototypes */ static acpi_status acpi_db_start_command(struct acpi_walk_state *walk_state, union acpi_parse_object *op); #ifdef ACPI_OBSOLETE_FUNCTIONS void acpi_db_method_end(struct acpi_walk_state *walk_state); #endif #ifdef ACPI_DISASSEMBLER static union acpi_parse_object *acpi_db_get_display_op(struct acpi_walk_state *walk_state, union acpi_parse_object *op); #endif /******************************************************************************* * * FUNCTION: acpi_db_start_command * * PARAMETERS: walk_state - Current walk * op - Current executing Op, from AML interpreter * * RETURN: Status * * DESCRIPTION: Enter debugger command loop * ******************************************************************************/ static acpi_status acpi_db_start_command(struct acpi_walk_state *walk_state, union acpi_parse_object *op) { acpi_status status; /* TBD: [Investigate] are there namespace locking issues here? */ /* acpi_ut_release_mutex (ACPI_MTX_NAMESPACE); */ /* Go into the command loop and await next user command */ acpi_gbl_method_executing = TRUE; status = AE_CTRL_TRUE; while (status == AE_CTRL_TRUE) { /* Notify the completion of the command */ status = acpi_os_notify_command_complete(); if (ACPI_FAILURE(status)) { goto error_exit; } /* Wait the readiness of the command */ status = acpi_os_wait_command_ready(); if (ACPI_FAILURE(status)) { goto error_exit; } status = acpi_db_command_dispatch(acpi_gbl_db_line_buf, walk_state, op); } /* acpi_ut_acquire_mutex (ACPI_MTX_NAMESPACE); */ error_exit: if (ACPI_FAILURE(status) && status != AE_CTRL_TERMINATE) { ACPI_EXCEPTION((AE_INFO, status, "While parsing/handling command line")); } return (status); } /******************************************************************************* * * FUNCTION: acpi_db_signal_break_point * * PARAMETERS: walk_state - Current walk * * RETURN: Status * * DESCRIPTION: Called for AML_BREAKPOINT_OP * ******************************************************************************/ void acpi_db_signal_break_point(struct acpi_walk_state *walk_state) { #ifndef ACPI_APPLICATION if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) { return; } #endif /* * Set the single-step flag. This will cause the debugger (if present) * to break to the console within the AML debugger at the start of the * next AML instruction. */ acpi_gbl_cm_single_step = TRUE; acpi_os_printf("**break** Executed AML BreakPoint opcode\n"); } #ifdef ACPI_DISASSEMBLER /******************************************************************************* * * FUNCTION: acpi_db_get_display_op * * PARAMETERS: walk_state - Current walk * op - Current executing op (from aml interpreter) * * RETURN: Opcode to display * * DESCRIPTION: Find the opcode to display during single stepping * ******************************************************************************/ static union acpi_parse_object *acpi_db_get_display_op(struct acpi_walk_state *walk_state, union acpi_parse_object *op) { union acpi_parse_object *display_op; union acpi_parse_object *parent_op; display_op = op; parent_op = op->common.parent; if (parent_op) { if ((walk_state->control_state) && (walk_state->control_state->common.state == ACPI_CONTROL_PREDICATE_EXECUTING)) { /* * We are executing the predicate of an IF or WHILE statement * Search upwards for the containing IF or WHILE so that the * entire predicate can be displayed. */ while (parent_op) { if ((parent_op->common.aml_opcode == AML_IF_OP) || (parent_op->common.aml_opcode == AML_WHILE_OP)) { display_op = parent_op; break; } parent_op = parent_op->common.parent; } } else { while (parent_op) { if ((parent_op->common.aml_opcode == AML_IF_OP) || (parent_op->common.aml_opcode == AML_ELSE_OP) || (parent_op->common.aml_opcode == AML_SCOPE_OP) || (parent_op->common.aml_opcode == AML_METHOD_OP) || (parent_op->common.aml_opcode == AML_WHILE_OP)) { break; } display_op = parent_op; parent_op = parent_op->common.parent; } } } return display_op; } #endif /******************************************************************************* * * FUNCTION: acpi_db_single_step * * PARAMETERS: walk_state - Current walk * op - Current executing op (from aml interpreter) * opcode_class - Class of the current AML Opcode * * RETURN: Status * * DESCRIPTION: Called just before execution of an AML opcode. * ******************************************************************************/ acpi_status acpi_db_single_step(struct acpi_walk_state *walk_state, union acpi_parse_object *op, u32 opcode_class) { union acpi_parse_object *next; acpi_status status = AE_OK; u32 original_debug_level; u32 aml_offset; ACPI_FUNCTION_ENTRY(); #ifndef ACPI_APPLICATION if (acpi_gbl_db_thread_id != acpi_os_get_thread_id()) { return (AE_OK); } #endif /* Check the abort flag */ if (acpi_gbl_abort_method) { acpi_gbl_abort_method = FALSE; return (AE_ABORT_METHOD); } aml_offset = (u32)ACPI_PTR_DIFF(op->common.aml, walk_state->parser_state.aml_start); /* Check for single-step breakpoint */ if (walk_state->method_breakpoint && (walk_state->method_breakpoint <= aml_offset)) { /* Check if the breakpoint has been reached or passed */ /* Hit the breakpoint, resume single step, reset breakpoint */ acpi_os_printf("***Break*** at AML offset %X\n", aml_offset); acpi_gbl_cm_single_step = TRUE; acpi_gbl_step_to_next_call = FALSE; walk_state->method_breakpoint = 0; } /* Check for user breakpoint (Must be on exact Aml offset) */ else if (walk_state->user_breakpoint && (walk_state->user_breakpoint == aml_offset)) { acpi_os_printf("***UserBreakpoint*** at AML offset %X\n", aml_offset); acpi_gbl_cm_single_step = TRUE; acpi_gbl_step_to_next_call = FALSE; walk_state->method_breakpoint = 0; } /* * Check if this is an opcode that we are interested in -- * namely, opcodes that have arguments */ if (op->common.aml_opcode == AML_INT_NAMEDFIELD_OP) { return (AE_OK); } switch (opcode_class) { case AML_CLASS_UNKNOWN: case AML_CLASS_ARGUMENT: /* constants, literals, etc. do nothing */ return (AE_OK); default: /* All other opcodes -- continue */ break; } /* * Under certain debug conditions, display this opcode and its operands */ if ((acpi_gbl_db_output_to_file) || (acpi_gbl_cm_single_step) || (acpi_dbg_level & ACPI_LV_PARSE)) { if ((acpi_gbl_db_output_to_file) || (acpi_dbg_level & ACPI_LV_PARSE)) { acpi_os_printf ("\nAML Debug: Next AML Opcode to execute:\n"); } /* * Display this op (and only this op - zero out the NEXT field * temporarily, and disable parser trace output for the duration of * the display because we don't want the extraneous debug output) */ original_debug_level = acpi_dbg_level; acpi_dbg_level &= ~(ACPI_LV_PARSE | ACPI_LV_FUNCTIONS); next = op->common.next; op->common.next = NULL; /* Now we can disassemble and display it */ #ifdef ACPI_DISASSEMBLER acpi_dm_disassemble(walk_state, acpi_db_get_display_op(walk_state, op), ACPI_UINT32_MAX); #else /* * The AML Disassembler is not configured - at least we can * display the opcode value and name */ acpi_os_printf("AML Opcode: %4.4X %s\n", op->common.aml_opcode, acpi_ps_get_opcode_name(op->common.aml_opcode)); #endif if ((op->common.aml_opcode == AML_IF_OP) || (op->common.aml_opcode == AML_WHILE_OP)) { if (walk_state->control_state->common.value) { acpi_os_printf ("Predicate = [True], IF block was executed\n"); } else { acpi_os_printf ("Predicate = [False], Skipping IF block\n"); } } else if (op->common.aml_opcode == AML_ELSE_OP) { acpi_os_printf ("Predicate = [False], ELSE block was executed\n"); } /* Restore everything */ op->common.next = next; acpi_os_printf("\n"); if ((acpi_gbl_db_output_to_file) || (acpi_dbg_level & ACPI_LV_PARSE)) { acpi_os_printf("\n"); } acpi_dbg_level = original_debug_level; } /* If we are not single stepping, just continue executing the method */ if (!acpi_gbl_cm_single_step) { return (AE_OK); } /* * If we are executing a step-to-call command, * Check if this is a method call. */ if (acpi_gbl_step_to_next_call) { if (op->common.aml_opcode != AML_INT_METHODCALL_OP) { /* Not a method call, just keep executing */ return (AE_OK); } /* Found a method call, stop executing */ acpi_gbl_step_to_next_call = FALSE; } /* * If the next opcode is a method call, we will "step over" it * by default. */ if (op->common.aml_opcode == AML_INT_METHODCALL_OP) { /* Force no more single stepping while executing called method */ acpi_gbl_cm_single_step = FALSE; /* * Set the breakpoint on/before the call, it will stop execution * as soon as we return */ walk_state->method_breakpoint = 1; /* Must be non-zero! */ } acpi_ex_exit_interpreter(); status = acpi_db_start_command(walk_state, op); acpi_ex_enter_interpreter(); /* User commands complete, continue execution of the interrupted method */ return (status); } /******************************************************************************* * * FUNCTION: acpi_initialize_debugger * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Init and start debugger * ******************************************************************************/ acpi_status acpi_initialize_debugger(void) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_initialize_debugger); /* Init globals */ acpi_gbl_db_buffer = NULL; acpi_gbl_db_filename = NULL; acpi_gbl_db_output_to_file = FALSE; acpi_gbl_db_debug_level = ACPI_LV_VERBOSITY2; acpi_gbl_db_console_debug_level = ACPI_NORMAL_DEFAULT | ACPI_LV_TABLES; acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT; acpi_gbl_db_opt_no_ini_methods = FALSE; acpi_gbl_db_opt_no_region_support = FALSE; acpi_gbl_db_buffer = acpi_os_allocate(ACPI_DEBUG_BUFFER_SIZE); if (!acpi_gbl_db_buffer) { return_ACPI_STATUS(AE_NO_MEMORY); } memset(acpi_gbl_db_buffer, 0, ACPI_DEBUG_BUFFER_SIZE); /* Initial scope is the root */ acpi_gbl_db_scope_buf[0] = AML_ROOT_PREFIX; acpi_gbl_db_scope_buf[1] = 0; acpi_gbl_db_scope_node = acpi_gbl_root_node; /* Initialize user commands loop */ acpi_gbl_db_terminate_loop = FALSE; /* * If configured for multi-thread support, the debug executor runs in * a separate thread so that the front end can be in another address * space, environment, or even another machine. */ if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) { /* These were created with one unit, grab it */ status = acpi_os_initialize_debugger(); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not get debugger mutex\n"); return_ACPI_STATUS(status); } /* Create the debug execution thread to execute commands */ acpi_gbl_db_threads_terminated = FALSE; status = acpi_os_execute(OSL_DEBUGGER_MAIN_THREAD, acpi_db_execute_thread, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not start debugger thread")); acpi_gbl_db_threads_terminated = TRUE; return_ACPI_STATUS(status); } } else { acpi_gbl_db_thread_id = acpi_os_get_thread_id(); } return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_initialize_debugger) /******************************************************************************* * * FUNCTION: acpi_terminate_debugger * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Stop debugger * ******************************************************************************/ void acpi_terminate_debugger(void) { /* Terminate the AML Debugger */ acpi_gbl_db_terminate_loop = TRUE; if (acpi_gbl_debugger_configuration & DEBUGGER_MULTI_THREADED) { /* Wait the AML Debugger threads */ while (!acpi_gbl_db_threads_terminated) { acpi_os_sleep(100); } acpi_os_terminate_debugger(); } if (acpi_gbl_db_buffer) { acpi_os_free(acpi_gbl_db_buffer); acpi_gbl_db_buffer = NULL; } /* Ensure that debug output is now disabled */ acpi_gbl_db_output_flags = ACPI_DB_DISABLE_OUTPUT; } ACPI_EXPORT_SYMBOL(acpi_terminate_debugger) /******************************************************************************* * * FUNCTION: acpi_set_debugger_thread_id * * PARAMETERS: thread_id - Debugger thread ID * * RETURN: None * * DESCRIPTION: Set debugger thread ID * ******************************************************************************/ void acpi_set_debugger_thread_id(acpi_thread_id thread_id) { acpi_gbl_db_thread_id = thread_id; } ACPI_EXPORT_SYMBOL(acpi_set_debugger_thread_id)
linux-master
drivers/acpi/acpica/dbxface.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: nsnames - Name manipulation and search * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acnamesp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsnames") /******************************************************************************* * * FUNCTION: acpi_ns_get_external_pathname * * PARAMETERS: node - Namespace node whose pathname is needed * * RETURN: Pointer to storage containing the fully qualified name of * the node, In external format (name segments separated by path * separators.) * * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually * for error and debug statements. * ******************************************************************************/ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) { char *name_buffer; ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node); name_buffer = acpi_ns_get_normalized_pathname(node, FALSE); return_PTR(name_buffer); } /******************************************************************************* * * FUNCTION: acpi_ns_get_pathname_length * * PARAMETERS: node - Namespace node * * RETURN: Length of path, including prefix * * DESCRIPTION: Get the length of the pathname string for this node * ******************************************************************************/ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) { acpi_size size; /* Validate the Node */ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) { ACPI_ERROR((AE_INFO, "Invalid/cached reference target node: %p, descriptor type %d", node, ACPI_GET_DESCRIPTOR_TYPE(node))); return (0); } size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE); return (size); } /******************************************************************************* * * FUNCTION: acpi_ns_handle_to_name * * PARAMETERS: target_handle - Handle of named object whose name is * to be found * buffer - Where the name is returned * * RETURN: Status, Buffer is filled with name if status is AE_OK * * DESCRIPTION: Build and return a full namespace name * ******************************************************************************/ acpi_status acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer) { acpi_status status; struct acpi_namespace_node *node; const char *node_name; ACPI_FUNCTION_TRACE_PTR(ns_handle_to_name, target_handle); node = acpi_ns_validate_handle(target_handle); if (!node) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Just copy the ACPI name from the Node and zero terminate it */ node_name = acpi_ut_get_node_name(node); ACPI_COPY_NAMESEG(buffer->pointer, node_name); ((char *)buffer->pointer)[ACPI_NAMESEG_SIZE] = 0; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%4.4s\n", (char *)buffer->pointer)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_handle_to_pathname * * PARAMETERS: target_handle - Handle of named object whose name is * to be found * buffer - Where the pathname is returned * no_trailing - Remove trailing '_' for each name * segment * * RETURN: Status, Buffer is filled with pathname if status is AE_OK * * DESCRIPTION: Build and return a full namespace pathname * ******************************************************************************/ acpi_status acpi_ns_handle_to_pathname(acpi_handle target_handle, struct acpi_buffer *buffer, u8 no_trailing) { acpi_status status; struct acpi_namespace_node *node; acpi_size required_size; ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle); node = acpi_ns_validate_handle(target_handle); if (!node) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Determine size required for the caller buffer */ required_size = acpi_ns_build_normalized_path(node, NULL, 0, no_trailing); if (!required_size) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(buffer, required_size); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Build the path in the caller buffer */ (void)acpi_ns_build_normalized_path(node, buffer->pointer, (u32)required_size, no_trailing); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n", (char *)buffer->pointer, (u32) required_size)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_build_normalized_path * * PARAMETERS: node - Namespace node * full_path - Where the path name is returned * path_size - Size of returned path name buffer * no_trailing - Remove trailing '_' from each name segment * * RETURN: Return 1 if the AML path is empty, otherwise returning (length * of pathname + 1) which means the 'FullPath' contains a trailing * null. * * DESCRIPTION: Build and return a full namespace pathname. * Note that if the size of 'FullPath' isn't large enough to * contain the namespace node's path name, the actual required * buffer length is returned, and it should be greater than * 'PathSize'. So callers are able to check the returning value * to determine the buffer size of 'FullPath'. * ******************************************************************************/ u32 acpi_ns_build_normalized_path(struct acpi_namespace_node *node, char *full_path, u32 path_size, u8 no_trailing) { u32 length = 0, i; char name[ACPI_NAMESEG_SIZE]; u8 do_no_trailing; char c, *left, *right; struct acpi_namespace_node *next_node; ACPI_FUNCTION_TRACE_PTR(ns_build_normalized_path, node); #define ACPI_PATH_PUT8(path, size, byte, length) \ do { \ if ((length) < (size)) \ { \ (path)[(length)] = (byte); \ } \ (length)++; \ } while (0) /* * Make sure the path_size is correct, so that we don't need to * validate both full_path and path_size. */ if (!full_path) { path_size = 0; } if (!node) { goto build_trailing_null; } next_node = node; while (next_node && next_node != acpi_gbl_root_node) { if (next_node != node) { ACPI_PATH_PUT8(full_path, path_size, AML_DUAL_NAME_PREFIX, length); } ACPI_MOVE_32_TO_32(name, &next_node->name); do_no_trailing = no_trailing; for (i = 0; i < 4; i++) { c = name[4 - i - 1]; if (do_no_trailing && c != '_') { do_no_trailing = FALSE; } if (!do_no_trailing) { ACPI_PATH_PUT8(full_path, path_size, c, length); } } next_node = next_node->parent; } ACPI_PATH_PUT8(full_path, path_size, AML_ROOT_PREFIX, length); /* Reverse the path string */ if (length <= path_size) { left = full_path; right = full_path + length - 1; while (left < right) { c = *left; *left++ = *right; *right-- = c; } } /* Append the trailing null */ build_trailing_null: ACPI_PATH_PUT8(full_path, path_size, '\0', length); #undef ACPI_PATH_PUT8 return_UINT32(length); } /******************************************************************************* * * FUNCTION: acpi_ns_get_normalized_pathname * * PARAMETERS: node - Namespace node whose pathname is needed * no_trailing - Remove trailing '_' from each name segment * * RETURN: Pointer to storage containing the fully qualified name of * the node, In external format (name segments separated by path * separators.) * * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually * for error and debug statements. All trailing '_' will be * removed from the full pathname if 'NoTrailing' is specified.. * ******************************************************************************/ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, u8 no_trailing) { char *name_buffer; acpi_size size; ACPI_FUNCTION_TRACE_PTR(ns_get_normalized_pathname, node); /* Calculate required buffer size based on depth below root */ size = acpi_ns_build_normalized_path(node, NULL, 0, no_trailing); if (!size) { return_PTR(NULL); } /* Allocate a buffer to be returned to caller */ name_buffer = ACPI_ALLOCATE_ZEROED(size); if (!name_buffer) { ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size)); return_PTR(NULL); } /* Build the path in the allocated buffer */ (void)acpi_ns_build_normalized_path(node, name_buffer, (u32)size, no_trailing); ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES, "%s: Path \"%s\"\n", ACPI_GET_FUNCTION_NAME, name_buffer)); return_PTR(name_buffer); } /******************************************************************************* * * FUNCTION: acpi_ns_build_prefixed_pathname * * PARAMETERS: prefix_scope - Scope/Path that prefixes the internal path * internal_path - Name or path of the namespace node * * RETURN: None * * DESCRIPTION: Construct a fully qualified pathname from a concatenation of: * 1) Path associated with the prefix_scope namespace node * 2) External path representation of the Internal path * ******************************************************************************/ char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope, const char *internal_path) { acpi_status status; char *full_path = NULL; char *external_path = NULL; char *prefix_path = NULL; acpi_size prefix_path_length = 0; /* If there is a prefix, get the pathname to it */ if (prefix_scope && prefix_scope->scope.node) { prefix_path = acpi_ns_get_normalized_pathname(prefix_scope->scope.node, TRUE); if (prefix_path) { prefix_path_length = strlen(prefix_path); } } status = acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_path, NULL, &external_path); if (ACPI_FAILURE(status)) { goto cleanup; } /* Merge the prefix path and the path. 2 is for one dot and trailing null */ full_path = ACPI_ALLOCATE_ZEROED(prefix_path_length + strlen(external_path) + 2); if (!full_path) { goto cleanup; } /* Don't merge if the External path is already fully qualified */ if (prefix_path && (*external_path != '\\') && (*external_path != '^')) { strcat(full_path, prefix_path); if (prefix_path[1]) { strcat(full_path, "."); } } acpi_ns_normalize_pathname(external_path); strcat(full_path, external_path); cleanup: if (prefix_path) { ACPI_FREE(prefix_path); } if (external_path) { ACPI_FREE(external_path); } return (full_path); } /******************************************************************************* * * FUNCTION: acpi_ns_normalize_pathname * * PARAMETERS: original_path - Path to be normalized, in External format * * RETURN: The original path is processed in-place * * DESCRIPTION: Remove trailing underscores from each element of a path. * * For example: \A___.B___.C___ becomes \A.B.C * ******************************************************************************/ void acpi_ns_normalize_pathname(char *original_path) { char *input_path = original_path; char *new_path_buffer; char *new_path; u32 i; /* Allocate a temp buffer in which to construct the new path */ new_path_buffer = ACPI_ALLOCATE_ZEROED(strlen(input_path) + 1); new_path = new_path_buffer; if (!new_path_buffer) { return; } /* Special characters may appear at the beginning of the path */ if (*input_path == '\\') { *new_path = *input_path; new_path++; input_path++; } while (*input_path == '^') { *new_path = *input_path; new_path++; input_path++; } /* Remainder of the path */ while (*input_path) { /* Do one nameseg at a time */ for (i = 0; (i < ACPI_NAMESEG_SIZE) && *input_path; i++) { if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */ *new_path = *input_path; new_path++; } input_path++; } /* Dot means that there are more namesegs to come */ if (*input_path == '.') { *new_path = *input_path; new_path++; input_path++; } } *new_path = 0; strcpy(original_path, new_path_buffer); ACPI_FREE(new_path_buffer); }
linux-master
drivers/acpi/acpica/nsnames.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: rscreate - Create resource lists/tables * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #include "acnamesp.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rscreate") /******************************************************************************* * * FUNCTION: acpi_buffer_to_resource * * PARAMETERS: aml_buffer - Pointer to the resource byte stream * aml_buffer_length - Length of the aml_buffer * resource_ptr - Where the converted resource is returned * * RETURN: Status * * DESCRIPTION: Convert a raw AML buffer to a resource list * ******************************************************************************/ acpi_status acpi_buffer_to_resource(u8 *aml_buffer, u16 aml_buffer_length, struct acpi_resource **resource_ptr) { acpi_status status; acpi_size list_size_needed; void *resource; void *current_resource_ptr; ACPI_FUNCTION_TRACE(acpi_buffer_to_resource); /* * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag * is not required here. */ /* Get the required length for the converted resource */ status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length, &list_size_needed); if (status == AE_AML_NO_RESOURCE_END_TAG) { status = AE_OK; } if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Allocate a buffer for the converted resource */ resource = ACPI_ALLOCATE_ZEROED(list_size_needed); current_resource_ptr = resource; if (!resource) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Perform the AML-to-Resource conversion */ status = acpi_ut_walk_aml_resources(NULL, aml_buffer, aml_buffer_length, acpi_rs_convert_aml_to_resources, &current_resource_ptr); if (status == AE_AML_NO_RESOURCE_END_TAG) { status = AE_OK; } if (ACPI_FAILURE(status)) { ACPI_FREE(resource); } else { *resource_ptr = resource; } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_buffer_to_resource) /******************************************************************************* * * FUNCTION: acpi_rs_create_resource_list * * PARAMETERS: aml_buffer - Pointer to the resource byte stream * output_buffer - Pointer to the user's buffer * * RETURN: Status: AE_OK if okay, else a valid acpi_status code * If output_buffer is not large enough, output_buffer_length * indicates how large output_buffer should be, else it * indicates how may u8 elements of output_buffer are valid. * * DESCRIPTION: Takes the byte stream returned from a _CRS, _PRS control method * execution and parses the stream to create a linked list * of device resources. * ******************************************************************************/ acpi_status acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer, struct acpi_buffer *output_buffer) { acpi_status status; u8 *aml_start; acpi_size list_size_needed = 0; u32 aml_buffer_length; void *resource; ACPI_FUNCTION_TRACE(rs_create_resource_list); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlBuffer = %p\n", aml_buffer)); /* Params already validated, so we don't re-validate here */ aml_buffer_length = aml_buffer->buffer.length; aml_start = aml_buffer->buffer.pointer; /* * Pass the aml_buffer into a module that can calculate * the buffer size needed for the linked list */ status = acpi_rs_get_list_length(aml_start, aml_buffer_length, &list_size_needed); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Status=%X ListSizeNeeded=%X\n", status, (u32) list_size_needed)); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(output_buffer, list_size_needed); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Do the conversion */ resource = output_buffer->pointer; status = acpi_ut_walk_aml_resources(NULL, aml_start, aml_buffer_length, acpi_rs_convert_aml_to_resources, &resource); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n", output_buffer->pointer, (u32) output_buffer->length)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_rs_create_pci_routing_table * * PARAMETERS: package_object - Pointer to a package containing one * of more ACPI_OPERAND_OBJECTs * output_buffer - Pointer to the user's buffer * * RETURN: Status AE_OK if okay, else a valid acpi_status code. * If the output_buffer is too small, the error will be * AE_BUFFER_OVERFLOW and output_buffer->Length will point * to the size buffer needed. * * DESCRIPTION: Takes the union acpi_operand_object package and creates a * linked list of PCI interrupt descriptions * * NOTE: It is the caller's responsibility to ensure that the start of the * output buffer is aligned properly (if necessary). * ******************************************************************************/ acpi_status acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, struct acpi_buffer *output_buffer) { u8 *buffer; union acpi_operand_object **top_object_list; union acpi_operand_object **sub_object_list; union acpi_operand_object *obj_desc; acpi_size buffer_size_needed = 0; u32 number_of_elements; u32 index; struct acpi_pci_routing_table *user_prt; struct acpi_namespace_node *node; acpi_status status; struct acpi_buffer path_buffer; ACPI_FUNCTION_TRACE(rs_create_pci_routing_table); /* Params already validated, so we don't re-validate here */ /* Get the required buffer length */ status = acpi_rs_get_pci_routing_table_length(package_object, &buffer_size_needed); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "BufferSizeNeeded = %X\n", (u32) buffer_size_needed)); /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(output_buffer, buffer_size_needed); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Loop through the ACPI_INTERNAL_OBJECTS - Each object should be a * package that in turn contains an u64 Address, a u8 Pin, * a Name, and a u8 source_index. */ top_object_list = package_object->package.elements; number_of_elements = package_object->package.count; buffer = output_buffer->pointer; user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer); for (index = 0; index < number_of_elements; index++) { /* * Point user_prt past this current structure * * NOTE: On the first iteration, user_prt->Length will * be zero because we cleared the return buffer earlier */ buffer += user_prt->length; user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer); /* * Fill in the Length field with the information we have at this * point. The minus four is to subtract the size of the u8 * Source[4] member because it is added below. */ user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4); /* Each subpackage must be of length 4 */ if ((*top_object_list)->package.count != 4) { ACPI_ERROR((AE_INFO, "(PRT[%u]) Need package of length 4, found length %u", index, (*top_object_list)->package.count)); return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT); } /* * Dereference the subpackage. * The sub_object_list will now point to an array of the four IRQ * elements: [Address, Pin, Source, source_index] */ sub_object_list = (*top_object_list)->package.elements; /* 1) First subobject: Dereference the PRT.Address */ obj_desc = sub_object_list[0]; if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%u].Address) Need Integer, found %s", index, acpi_ut_get_object_type_name(obj_desc))); return_ACPI_STATUS(AE_BAD_DATA); } user_prt->address = obj_desc->integer.value; /* 2) Second subobject: Dereference the PRT.Pin */ obj_desc = sub_object_list[1]; if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%u].Pin) Need Integer, found %s", index, acpi_ut_get_object_type_name(obj_desc))); return_ACPI_STATUS(AE_BAD_DATA); } user_prt->pin = (u32) obj_desc->integer.value; /* * 3) Third subobject: Dereference the PRT.source_name * The name may be unresolved (slack mode), so allow a null object */ obj_desc = sub_object_list[2]; if (obj_desc) { switch (obj_desc->common.type) { case ACPI_TYPE_LOCAL_REFERENCE: if (obj_desc->reference.class != ACPI_REFCLASS_NAME) { ACPI_ERROR((AE_INFO, "(PRT[%u].Source) Need name, found Reference Class 0x%X", index, obj_desc->reference.class)); return_ACPI_STATUS(AE_BAD_DATA); } node = obj_desc->reference.node; /* Use *remaining* length of the buffer as max for pathname */ path_buffer.length = output_buffer->length - (u32) ((u8 *) user_prt->source - (u8 *) output_buffer->pointer); path_buffer.pointer = user_prt->source; status = acpi_ns_handle_to_pathname((acpi_handle)node, &path_buffer, FALSE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* +1 to include null terminator */ user_prt->length += (u32)strlen(user_prt->source) + 1; break; case ACPI_TYPE_STRING: strcpy(user_prt->source, obj_desc->string.pointer); /* * Add to the Length field the length of the string * (add 1 for terminator) */ user_prt->length += obj_desc->string.length + 1; break; case ACPI_TYPE_INTEGER: /* * If this is a number, then the Source Name is NULL, since * the entire buffer was zeroed out, we can leave this alone. * * Add to the Length field the length of the u32 NULL */ user_prt->length += sizeof(u32); break; default: ACPI_ERROR((AE_INFO, "(PRT[%u].Source) Need Ref/String/Integer, found %s", index, acpi_ut_get_object_type_name (obj_desc))); return_ACPI_STATUS(AE_BAD_DATA); } } /* Now align the current length */ user_prt->length = (u32) ACPI_ROUND_UP_TO_64BIT(user_prt->length); /* 4) Fourth subobject: Dereference the PRT.source_index */ obj_desc = sub_object_list[3]; if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "(PRT[%u].SourceIndex) Need Integer, found %s", index, acpi_ut_get_object_type_name(obj_desc))); return_ACPI_STATUS(AE_BAD_DATA); } user_prt->source_index = (u32) obj_desc->integer.value; /* Point to the next union acpi_operand_object in the top level package */ top_object_list++; } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n", output_buffer->pointer, (u32) output_buffer->length)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_rs_create_aml_resources * * PARAMETERS: resource_list - Pointer to the resource list buffer * output_buffer - Where the AML buffer is returned * * RETURN: Status AE_OK if okay, else a valid acpi_status code. * If the output_buffer is too small, the error will be * AE_BUFFER_OVERFLOW and output_buffer->Length will point * to the size buffer needed. * * DESCRIPTION: Converts a list of device resources to an AML bytestream * to be used as input for the _SRS control method. * ******************************************************************************/ acpi_status acpi_rs_create_aml_resources(struct acpi_buffer *resource_list, struct acpi_buffer *output_buffer) { acpi_status status; acpi_size aml_size_needed = 0; ACPI_FUNCTION_TRACE(rs_create_aml_resources); /* Params already validated, no need to re-validate here */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ResourceList Buffer = %p\n", resource_list->pointer)); /* Get the buffer size needed for the AML byte stream */ status = acpi_rs_get_aml_length(resource_list->pointer, resource_list->length, &aml_size_needed); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n", (u32)aml_size_needed, acpi_format_exception(status))); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Validate/Allocate/Clear caller buffer */ status = acpi_ut_initialize_buffer(output_buffer, aml_size_needed); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Do the conversion */ status = acpi_rs_convert_resources_to_aml(resource_list->pointer, aml_size_needed, output_buffer->pointer); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n", output_buffer->pointer, (u32) output_buffer->length)); return_ACPI_STATUS(AE_OK); }
linux-master
drivers/acpi/acpica/rscreate.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: hwxface - Public ACPICA hardware interfaces * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #define EXPORT_ACPI_INTERFACES #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwxface") /****************************************************************************** * * FUNCTION: acpi_reset * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Set reset register in memory or IO space. Note: Does not * support reset register in PCI config space, this must be * handled separately. * ******************************************************************************/ acpi_status acpi_reset(void) { struct acpi_generic_address *reset_reg; acpi_status status; ACPI_FUNCTION_TRACE(acpi_reset); reset_reg = &acpi_gbl_FADT.reset_register; /* Check if the reset register is supported */ if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) || !reset_reg->address) { return_ACPI_STATUS(AE_NOT_EXIST); } if (reset_reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { /* * For I/O space, write directly to the OSL. This bypasses the port * validation mechanism, which may block a valid write to the reset * register. * * NOTE: * The ACPI spec requires the reset register width to be 8, so we * hardcode it here and ignore the FADT value. This maintains * compatibility with other ACPI implementations that have allowed * BIOS code with bad register width values to go unnoticed. */ status = acpi_os_write_port((acpi_io_address)reset_reg->address, acpi_gbl_FADT.reset_value, ACPI_RESET_REGISTER_WIDTH); } else { /* Write the reset value to the reset register */ status = acpi_hw_write(acpi_gbl_FADT.reset_value, reset_reg); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_reset) /****************************************************************************** * * FUNCTION: acpi_read * * PARAMETERS: value - Where the value is returned * reg - GAS register structure * * RETURN: Status * * DESCRIPTION: Read from either memory or IO space. * * LIMITATIONS: <These limitations also apply to acpi_write> * bit_width must be exactly 8, 16, 32, or 64. * space_ID must be system_memory or system_IO. * bit_offset and access_width are currently ignored, as there has * not been a need to implement these. * ******************************************************************************/ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg) { acpi_status status; ACPI_FUNCTION_NAME(acpi_read); status = acpi_hw_read(return_value, reg); return (status); } ACPI_EXPORT_SYMBOL(acpi_read) /****************************************************************************** * * FUNCTION: acpi_write * * PARAMETERS: value - Value to be written * reg - GAS register structure * * RETURN: Status * * DESCRIPTION: Write to either memory or IO space. * ******************************************************************************/ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg) { acpi_status status; ACPI_FUNCTION_NAME(acpi_write); status = acpi_hw_write(value, reg); return (status); } ACPI_EXPORT_SYMBOL(acpi_write) #if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_read_bit_register * * PARAMETERS: register_id - ID of ACPI Bit Register to access * return_value - Value that was read from the register, * normalized to bit position zero. * * RETURN: Status and the value read from the specified Register. Value * returned is normalized to bit0 (is shifted all the way right) * * DESCRIPTION: ACPI bit_register read function. Does not acquire the HW lock. * * SUPPORTS: Bit fields in PM1 Status, PM1 Enable, PM1 Control, and * PM2 Control. * * Note: The hardware lock is not required when reading the ACPI bit registers * since almost all of them are single bit and it does not matter that * the parent hardware register can be split across two physical * registers. The only multi-bit field is SLP_TYP in the PM1 control * register, but this field does not cross an 8-bit boundary (nor does * it make much sense to actually read this field.) * ******************************************************************************/ acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value) { struct acpi_bit_register_info *bit_reg_info; u32 register_value; u32 value; acpi_status status; ACPI_FUNCTION_TRACE_U32(acpi_read_bit_register, register_id); /* Get the info structure corresponding to the requested ACPI Register */ bit_reg_info = acpi_hw_get_bit_register_info(register_id); if (!bit_reg_info) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Read the entire parent register */ status = acpi_hw_register_read(bit_reg_info->parent_register, &register_value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Normalize the value that was read, mask off other bits */ value = ((register_value & bit_reg_info->access_bit_mask) >> bit_reg_info->bit_position); ACPI_DEBUG_PRINT((ACPI_DB_IO, "BitReg %X, ParentReg %X, Actual %8.8X, ReturnValue %8.8X\n", register_id, bit_reg_info->parent_register, register_value, value)); *return_value = value; return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_read_bit_register) /******************************************************************************* * * FUNCTION: acpi_write_bit_register * * PARAMETERS: register_id - ID of ACPI Bit Register to access * value - Value to write to the register, in bit * position zero. The bit is automatically * shifted to the correct position. * * RETURN: Status * * DESCRIPTION: ACPI Bit Register write function. Acquires the hardware lock * since most operations require a read/modify/write sequence. * * SUPPORTS: Bit fields in PM1 Status, PM1 Enable, PM1 Control, and * PM2 Control. * * Note that at this level, the fact that there may be actually two * hardware registers (A and B - and B may not exist) is abstracted. * ******************************************************************************/ acpi_status acpi_write_bit_register(u32 register_id, u32 value) { struct acpi_bit_register_info *bit_reg_info; acpi_cpu_flags lock_flags; u32 register_value; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_U32(acpi_write_bit_register, register_id); /* Get the info structure corresponding to the requested ACPI Register */ bit_reg_info = acpi_hw_get_bit_register_info(register_id); if (!bit_reg_info) { return_ACPI_STATUS(AE_BAD_PARAMETER); } lock_flags = acpi_os_acquire_raw_lock(acpi_gbl_hardware_lock); /* * At this point, we know that the parent register is one of the * following: PM1 Status, PM1 Enable, PM1 Control, or PM2 Control */ if (bit_reg_info->parent_register != ACPI_REGISTER_PM1_STATUS) { /* * 1) Case for PM1 Enable, PM1 Control, and PM2 Control * * Perform a register read to preserve the bits that we are not * interested in */ status = acpi_hw_register_read(bit_reg_info->parent_register, &register_value); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* * Insert the input bit into the value that was just read * and write the register */ ACPI_REGISTER_INSERT_VALUE(register_value, bit_reg_info->bit_position, bit_reg_info->access_bit_mask, value); status = acpi_hw_register_write(bit_reg_info->parent_register, register_value); } else { /* * 2) Case for PM1 Status * * The Status register is different from the rest. Clear an event * by writing 1, writing 0 has no effect. So, the only relevant * information is the single bit we're interested in, all others * should be written as 0 so they will be left unchanged. */ register_value = ACPI_REGISTER_PREPARE_BITS(value, bit_reg_info-> bit_position, bit_reg_info-> access_bit_mask); /* No need to write the register if value is all zeros */ if (register_value) { status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, register_value); } } ACPI_DEBUG_PRINT((ACPI_DB_IO, "BitReg %X, ParentReg %X, Value %8.8X, Actual %8.8X\n", register_id, bit_reg_info->parent_register, value, register_value)); unlock_and_exit: acpi_os_release_raw_lock(acpi_gbl_hardware_lock, lock_flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_write_bit_register) #endif /* !ACPI_REDUCED_HARDWARE */ /******************************************************************************* * * FUNCTION: acpi_get_sleep_type_data * * PARAMETERS: sleep_state - Numeric sleep state * *sleep_type_a - Where SLP_TYPa is returned * *sleep_type_b - Where SLP_TYPb is returned * * RETURN: Status * * DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested * sleep state via the appropriate \_Sx object. * * The sleep state package returned from the corresponding \_Sx_ object * must contain at least one integer. * * March 2005: * Added support for a package that contains two integers. This * goes against the ACPI specification which defines this object as a * package with one encoded DWORD integer. However, existing practice * by many BIOS vendors is to return a package with 2 or more integer * elements, at least one per sleep type (A/B). * * January 2013: * Therefore, we must be prepared to accept a package with either a * single integer or multiple integers. * * The single integer DWORD format is as follows: * BYTE 0 - Value for the PM1A SLP_TYP register * BYTE 1 - Value for the PM1B SLP_TYP register * BYTE 2-3 - Reserved * * The dual integer format is as follows: * Integer 0 - Value for the PM1A SLP_TYP register * Integer 1 - Value for the PM1A SLP_TYP register * ******************************************************************************/ acpi_status acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b) { acpi_status status; struct acpi_evaluate_info *info; union acpi_operand_object **elements; ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data); /* Validate parameters */ if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Allocate the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } /* * Evaluate the \_Sx namespace object containing the register values * for this state */ info->relative_pathname = acpi_gbl_sleep_state_names[sleep_state]; status = acpi_ns_evaluate(info); if (ACPI_FAILURE(status)) { if (status == AE_NOT_FOUND) { /* The _Sx states are optional, ignore NOT_FOUND */ goto final_cleanup; } goto warning_cleanup; } /* Must have a return object */ if (!info->return_object) { ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]", info->relative_pathname)); status = AE_AML_NO_RETURN_VALUE; goto warning_cleanup; } /* Return object must be of type Package */ if (info->return_object->common.type != ACPI_TYPE_PACKAGE) { ACPI_ERROR((AE_INFO, "Sleep State return object is not a Package")); status = AE_AML_OPERAND_TYPE; goto return_value_cleanup; } /* * Any warnings about the package length or the object types have * already been issued by the predefined name module -- there is no * need to repeat them here. */ elements = info->return_object->package.elements; switch (info->return_object->package.count) { case 0: status = AE_AML_PACKAGE_LIMIT; break; case 1: if (elements[0]->common.type != ACPI_TYPE_INTEGER) { status = AE_AML_OPERAND_TYPE; break; } /* A valid _Sx_ package with one integer */ *sleep_type_a = (u8)elements[0]->integer.value; *sleep_type_b = (u8)(elements[0]->integer.value >> 8); break; case 2: default: if ((elements[0]->common.type != ACPI_TYPE_INTEGER) || (elements[1]->common.type != ACPI_TYPE_INTEGER)) { status = AE_AML_OPERAND_TYPE; break; } /* A valid _Sx_ package with two integers */ *sleep_type_a = (u8)elements[0]->integer.value; *sleep_type_b = (u8)elements[1]->integer.value; break; } return_value_cleanup: acpi_ut_remove_reference(info->return_object); warning_cleanup: if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While evaluating Sleep State [%s]", info->relative_pathname)); } final_cleanup: ACPI_FREE(info); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
linux-master
drivers/acpi/acpica/hwxface.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: dbfileio - Debugger file I/O commands. These can't usually * be used when running the debugger in Ring 0 (Kernel mode) * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acdebug.h" #include "actables.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbfileio") #ifdef ACPI_APPLICATION #include "acapps.h" #ifdef ACPI_DEBUGGER /******************************************************************************* * * FUNCTION: acpi_db_close_debug_file * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: If open, close the current debug output file * ******************************************************************************/ void acpi_db_close_debug_file(void) { if (acpi_gbl_debug_file) { fclose(acpi_gbl_debug_file); acpi_gbl_debug_file = NULL; acpi_gbl_db_output_to_file = FALSE; acpi_os_printf("Debug output file %s closed\n", acpi_gbl_db_debug_filename); } } /******************************************************************************* * * FUNCTION: acpi_db_open_debug_file * * PARAMETERS: name - Filename to open * * RETURN: None * * DESCRIPTION: Open a file where debug output will be directed. * ******************************************************************************/ void acpi_db_open_debug_file(char *name) { acpi_db_close_debug_file(); acpi_gbl_debug_file = fopen(name, "w+"); if (!acpi_gbl_debug_file) { acpi_os_printf("Could not open debug file %s\n", name); return; } acpi_os_printf("Debug output file %s opened\n", name); acpi_ut_safe_strncpy(acpi_gbl_db_debug_filename, name, sizeof(acpi_gbl_db_debug_filename)); acpi_gbl_db_output_to_file = TRUE; } #endif /******************************************************************************* * * FUNCTION: acpi_db_load_tables * * PARAMETERS: list_head - List of ACPI tables to load * * RETURN: Status * * DESCRIPTION: Load ACPI tables from a previously constructed table list. * ******************************************************************************/ acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head) { acpi_status status; struct acpi_new_table_desc *table_list_head; struct acpi_table_header *table; /* Load all ACPI tables in the list */ table_list_head = list_head; while (table_list_head) { table = table_list_head->table; status = acpi_load_table(table, NULL); if (ACPI_FAILURE(status)) { if (status == AE_ALREADY_EXISTS) { acpi_os_printf ("Table %4.4s is already installed\n", table->signature); } else { acpi_os_printf("Could not install table, %s\n", acpi_format_exception(status)); } return (status); } acpi_os_printf ("Acpi table [%4.4s] successfully installed and loaded\n", table->signature); table_list_head = table_list_head->next; } return (AE_OK); } #endif
linux-master
drivers/acpi/acpica/dbfileio.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: nswalk - Functions for walking the ACPI namespace * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nswalk") /******************************************************************************* * * FUNCTION: acpi_ns_get_next_node * * PARAMETERS: parent_node - Parent node whose children we are * getting * child_node - Previous child that was found. * The NEXT child will be returned * * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if * none is found. * * DESCRIPTION: Return the next peer node within the namespace. If Handle * is valid, Scope is ignored. Otherwise, the first node * within Scope is returned. * ******************************************************************************/ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node *parent_node, struct acpi_namespace_node *child_node) { ACPI_FUNCTION_ENTRY(); if (!child_node) { /* It's really the parent's _scope_ that we want */ return (parent_node->child); } /* Otherwise just return the next peer */ return (child_node->peer); } /******************************************************************************* * * FUNCTION: acpi_ns_get_next_node_typed * * PARAMETERS: type - Type of node to be searched for * parent_node - Parent node whose children we are * getting * child_node - Previous child that was found. * The NEXT child will be returned * * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if * none is found. * * DESCRIPTION: Return the next peer node within the namespace. If Handle * is valid, Scope is ignored. Otherwise, the first node * within Scope is returned. * ******************************************************************************/ struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type, struct acpi_namespace_node *parent_node, struct acpi_namespace_node *child_node) { struct acpi_namespace_node *next_node = NULL; ACPI_FUNCTION_ENTRY(); next_node = acpi_ns_get_next_node(parent_node, child_node); /* If any type is OK, we are done */ if (type == ACPI_TYPE_ANY) { /* next_node is NULL if we are at the end-of-list */ return (next_node); } /* Must search for the node -- but within this scope only */ while (next_node) { /* If type matches, we are done */ if (next_node->type == type) { return (next_node); } /* Otherwise, move on to the next peer node */ next_node = next_node->peer; } /* Not found */ return (NULL); } /******************************************************************************* * * FUNCTION: acpi_ns_walk_namespace * * PARAMETERS: type - acpi_object_type to search for * start_node - Handle in namespace where search begins * max_depth - Depth to which search is to reach * flags - Whether to unlock the NS before invoking * the callback routine * descending_callback - Called during tree descent * when an object of "Type" is found * ascending_callback - Called during tree ascent * when an object of "Type" is found * context - Passed to user function(s) above * return_value - from the user_function if terminated * early. Otherwise, returns NULL. * RETURNS: Status * * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, * starting (and ending) at the node specified by start_handle. * The callback function is called whenever a node that matches * the type parameter is found. If the callback function returns * a non-zero value, the search is terminated immediately and * this value is returned to the caller. * * The point of this procedure is to provide a generic namespace * walk routine that can be called from multiple places to * provide multiple services; the callback function(s) can be * tailored to each task, whether it is a print function, * a compare function, etc. * ******************************************************************************/ acpi_status acpi_ns_walk_namespace(acpi_object_type type, acpi_handle start_node, u32 max_depth, u32 flags, acpi_walk_callback descending_callback, acpi_walk_callback ascending_callback, void *context, void **return_value) { acpi_status status; acpi_status mutex_status; struct acpi_namespace_node *child_node; struct acpi_namespace_node *parent_node; acpi_object_type child_type; u32 level; u8 node_previously_visited = FALSE; ACPI_FUNCTION_TRACE(ns_walk_namespace); /* Special case for the namespace Root Node */ if (start_node == ACPI_ROOT_OBJECT) { start_node = acpi_gbl_root_node; if (!start_node) { return_ACPI_STATUS(AE_NO_NAMESPACE); } } /* Null child means "get first node" */ parent_node = start_node; child_node = acpi_ns_get_next_node(parent_node, NULL); child_type = ACPI_TYPE_ANY; level = 1; /* * Traverse the tree of nodes until we bubble back up to where we * started. When Level is zero, the loop is done because we have * bubbled up to (and passed) the original parent handle (start_entry) */ while (level > 0 && child_node) { status = AE_OK; /* Found next child, get the type if we are not searching for ANY */ if (type != ACPI_TYPE_ANY) { child_type = child_node->type; } /* * Ignore all temporary namespace nodes (created during control * method execution) unless told otherwise. These temporary nodes * can cause a race condition because they can be deleted during * the execution of the user function (if the namespace is * unlocked before invocation of the user function.) Only the * debugger namespace dump will examine the temporary nodes. */ if ((child_node->flags & ANOBJ_TEMPORARY) && !(flags & ACPI_NS_WALK_TEMP_NODES)) { status = AE_CTRL_DEPTH; } /* Type must match requested type */ else if (child_type == type) { /* * Found a matching node, invoke the user callback function. * Unlock the namespace if flag is set. */ if (flags & ACPI_NS_WALK_UNLOCK) { mutex_status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(mutex_status)) { return_ACPI_STATUS(mutex_status); } } /* * Invoke the user function, either descending, ascending, * or both. */ if (!node_previously_visited) { if (descending_callback) { status = descending_callback(child_node, level, context, return_value); } } else { if (ascending_callback) { status = ascending_callback(child_node, level, context, return_value); } } if (flags & ACPI_NS_WALK_UNLOCK) { mutex_status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(mutex_status)) { return_ACPI_STATUS(mutex_status); } } switch (status) { case AE_OK: case AE_CTRL_DEPTH: /* Just keep going */ break; case AE_CTRL_TERMINATE: /* Exit now, with OK status */ return_ACPI_STATUS(AE_OK); default: /* All others are valid exceptions */ return_ACPI_STATUS(status); } } /* * Depth first search: Attempt to go down another level in the * namespace if we are allowed to. Don't go any further if we have * reached the caller specified maximum depth or if the user * function has specified that the maximum depth has been reached. */ if (!node_previously_visited && (level < max_depth) && (status != AE_CTRL_DEPTH)) { if (child_node->child) { /* There is at least one child of this node, visit it */ level++; parent_node = child_node; child_node = acpi_ns_get_next_node(parent_node, NULL); continue; } } /* No more children, re-visit this node */ if (!node_previously_visited) { node_previously_visited = TRUE; continue; } /* No more children, visit peers */ child_node = acpi_ns_get_next_node(parent_node, child_node); if (child_node) { node_previously_visited = FALSE; } /* No peers, re-visit parent */ else { /* * No more children of this node (acpi_ns_get_next_node failed), go * back upwards in the namespace tree to the node's parent. */ level--; child_node = parent_node; parent_node = parent_node->parent; node_previously_visited = TRUE; } } /* Complete walk, not terminated by user function */ return_ACPI_STATUS(AE_OK); }
linux-master
drivers/acpi/acpica/nswalk.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: utstate - state object support procedures * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utstate") /******************************************************************************* * * FUNCTION: acpi_ut_push_generic_state * * PARAMETERS: list_head - Head of the state stack * state - State object to push * * RETURN: None * * DESCRIPTION: Push a state object onto a state stack * ******************************************************************************/ void acpi_ut_push_generic_state(union acpi_generic_state **list_head, union acpi_generic_state *state) { ACPI_FUNCTION_ENTRY(); /* Push the state object onto the front of the list (stack) */ state->common.next = *list_head; *list_head = state; return; } /******************************************************************************* * * FUNCTION: acpi_ut_pop_generic_state * * PARAMETERS: list_head - Head of the state stack * * RETURN: The popped state object * * DESCRIPTION: Pop a state object from a state stack * ******************************************************************************/ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state **list_head) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); /* Remove the state object at the head of the list (stack) */ state = *list_head; if (state) { /* Update the list head */ *list_head = state->common.next; } return (state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_generic_state * * PARAMETERS: None * * RETURN: The new state object. NULL on failure. * * DESCRIPTION: Create a generic state object. Attempt to obtain one from * the global state cache; If none available, create a new one. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_generic_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); state = acpi_os_acquire_object(acpi_gbl_state_cache); if (state) { /* Initialize */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE; } return (state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_thread_state * * PARAMETERS: None * * RETURN: New Thread State. NULL on failure * * DESCRIPTION: Create a "Thread State" - a flavor of the generic state used * to track per-thread info during method execution * ******************************************************************************/ struct acpi_thread_state *acpi_ut_create_thread_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return (NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_THREAD; state->thread.thread_id = acpi_os_get_thread_id(); /* Check for invalid thread ID - zero is very bad, it will break things */ if (!state->thread.thread_id) { ACPI_ERROR((AE_INFO, "Invalid zero ID from AcpiOsGetThreadId")); state->thread.thread_id = (acpi_thread_id) 1; } return ((struct acpi_thread_state *)state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_update_state * * PARAMETERS: object - Initial Object to be installed in the state * action - Update action to be performed * * RETURN: New state object, null on failure * * DESCRIPTION: Create an "Update State" - a flavor of the generic state used * to update reference counts and delete complex objects such * as packages. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object *object, u16 action) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return (NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE; state->update.object = object; state->update.value = action; return (state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_pkg_state * * PARAMETERS: object - Initial Object to be installed in the state * action - Update action to be performed * * RETURN: New state object, null on failure * * DESCRIPTION: Create a "Package State" * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object, void *external_object, u32 index) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return (NULL); } /* Init fields specific to the update struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_PACKAGE; state->pkg.source_object = (union acpi_operand_object *)internal_object; state->pkg.dest_object = external_object; state->pkg.index = index; state->pkg.num_packages = 1; return (state); } /******************************************************************************* * * FUNCTION: acpi_ut_create_control_state * * PARAMETERS: None * * RETURN: New state object, null on failure * * DESCRIPTION: Create a "Control State" - a flavor of the generic state used * to support nested IF/WHILE constructs in the AML. * ******************************************************************************/ union acpi_generic_state *acpi_ut_create_control_state(void) { union acpi_generic_state *state; ACPI_FUNCTION_ENTRY(); /* Create the generic state object */ state = acpi_ut_create_generic_state(); if (!state) { return (NULL); } /* Init fields specific to the control struct */ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL; state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING; return (state); } /******************************************************************************* * * FUNCTION: acpi_ut_delete_generic_state * * PARAMETERS: state - The state object to be deleted * * RETURN: None * * DESCRIPTION: Release a state object to the state cache. NULL state objects * are ignored. * ******************************************************************************/ void acpi_ut_delete_generic_state(union acpi_generic_state *state) { ACPI_FUNCTION_ENTRY(); /* Ignore null state */ if (state) { (void)acpi_os_release_object(acpi_gbl_state_cache, state); } return; }
linux-master
drivers/acpi/acpica/utstate.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exoparg1 - AML execution - opcodes with 1 argument * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "acdispat.h" #include "acinterp.h" #include "amlcode.h" #include "acnamesp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exoparg1") /*! * Naming convention for AML interpreter execution routines. * * The routines that begin execution of AML opcodes are named with a common * convention based upon the number of arguments, the number of target operands, * and whether or not a value is returned: * * AcpiExOpcode_xA_yT_zR * * Where: * * xA - ARGUMENTS: The number of arguments (input operands) that are * required for this opcode type (0 through 6 args). * yT - TARGETS: The number of targets (output operands) that are required * for this opcode type (0, 1, or 2 targets). * zR - RETURN VALUE: Indicates whether this opcode type returns a value * as the function return (0 or 1). * * The AcpiExOpcode* functions are called via the Dispatcher component with * fully resolved operands. !*/ /******************************************************************************* * * FUNCTION: acpi_ex_opcode_0A_0T_1R * * PARAMETERS: walk_state - Current state (contains AML opcode) * * RETURN: Status * * DESCRIPTION: Execute operator with no operands, one return value * ******************************************************************************/ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_operand_object *return_desc = NULL; ACPI_FUNCTION_TRACE_STR(ex_opcode_0A_0T_1R, acpi_ps_get_opcode_name(walk_state->opcode)); /* Examine the AML opcode */ switch (walk_state->opcode) { case AML_TIMER_OP: /* Timer () */ /* Create a return object of type Integer */ return_desc = acpi_ut_create_integer_object(acpi_os_get_timer()); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } break; default: /* Unknown opcode */ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); status = AE_AML_BAD_OPCODE; break; } cleanup: /* Delete return object on error */ if ((ACPI_FAILURE(status)) || walk_state->result_obj) { acpi_ut_remove_reference(return_desc); walk_state->result_obj = NULL; } else { /* Save the return value */ walk_state->result_obj = return_desc; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_opcode_1A_0T_0R * * PARAMETERS: walk_state - Current state (contains AML opcode) * * RETURN: Status * * DESCRIPTION: Execute Type 1 monadic operator with numeric operand on * object stack * ******************************************************************************/ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state) { union acpi_operand_object **operand = &walk_state->operands[0]; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_0R, acpi_ps_get_opcode_name(walk_state->opcode)); /* Examine the AML opcode */ switch (walk_state->opcode) { case AML_RELEASE_OP: /* Release (mutex_object) */ status = acpi_ex_release_mutex(operand[0], walk_state); break; case AML_RESET_OP: /* Reset (event_object) */ status = acpi_ex_system_reset_event(operand[0]); break; case AML_SIGNAL_OP: /* Signal (event_object) */ status = acpi_ex_system_signal_event(operand[0]); break; case AML_SLEEP_OP: /* Sleep (msec_time) */ status = acpi_ex_system_do_sleep(operand[0]->integer.value); break; case AML_STALL_OP: /* Stall (usec_time) */ status = acpi_ex_system_do_stall((u32) operand[0]->integer.value); break; case AML_UNLOAD_OP: /* Unload (Handle) */ status = acpi_ex_unload_table(operand[0]); break; default: /* Unknown opcode */ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); status = AE_AML_BAD_OPCODE; break; } return_ACPI_STATUS(status); } #ifdef _OBSOLETE_CODE /* Was originally used for Load() operator */ /******************************************************************************* * * FUNCTION: acpi_ex_opcode_1A_1T_0R * * PARAMETERS: walk_state - Current state (contains AML opcode) * * RETURN: Status * * DESCRIPTION: Execute opcode with one argument, one target, and no * return value. * ******************************************************************************/ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_operand_object **operand = &walk_state->operands[0]; ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_0R, acpi_ps_get_opcode_name(walk_state->opcode)); /* Examine the AML opcode */ switch (walk_state->opcode) { #ifdef _OBSOLETE_CODE case AML_LOAD_OP: status = acpi_ex_load_op(operand[0], operand[1], walk_state); break; #endif default: /* Unknown opcode */ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; } cleanup: return_ACPI_STATUS(status); } #endif /******************************************************************************* * * FUNCTION: acpi_ex_opcode_1A_1T_1R * * PARAMETERS: walk_state - Current state (contains AML opcode) * * RETURN: Status * * DESCRIPTION: Execute opcode with one argument, one target, and a * return value. * January 2022: Added Load operator, with new ACPI 6.4 * semantics. * ******************************************************************************/ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_operand_object **operand = &walk_state->operands[0]; union acpi_operand_object *return_desc = NULL; union acpi_operand_object *return_desc2 = NULL; u32 temp32; u32 i; u64 power_of_ten; u64 digit; ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_1R, acpi_ps_get_opcode_name(walk_state->opcode)); /* Examine the AML opcode */ switch (walk_state->opcode) { case AML_BIT_NOT_OP: case AML_FIND_SET_LEFT_BIT_OP: case AML_FIND_SET_RIGHT_BIT_OP: case AML_FROM_BCD_OP: case AML_LOAD_OP: case AML_TO_BCD_OP: case AML_CONDITIONAL_REF_OF_OP: /* Create a return object of type Integer for these opcodes */ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } switch (walk_state->opcode) { case AML_BIT_NOT_OP: /* Not (Operand, Result) */ return_desc->integer.value = ~operand[0]->integer.value; break; case AML_FIND_SET_LEFT_BIT_OP: /* find_set_left_bit (Operand, Result) */ return_desc->integer.value = operand[0]->integer.value; /* * Acpi specification describes Integer type as a little * endian unsigned value, so this boundary condition is valid. */ for (temp32 = 0; return_desc->integer.value && temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) { return_desc->integer.value >>= 1; } return_desc->integer.value = temp32; break; case AML_FIND_SET_RIGHT_BIT_OP: /* find_set_right_bit (Operand, Result) */ return_desc->integer.value = operand[0]->integer.value; /* * The Acpi specification describes Integer type as a little * endian unsigned value, so this boundary condition is valid. */ for (temp32 = 0; return_desc->integer.value && temp32 < ACPI_INTEGER_BIT_SIZE; ++temp32) { return_desc->integer.value <<= 1; } /* Since the bit position is one-based, subtract from 33 (65) */ return_desc->integer.value = temp32 == 0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32; break; case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */ /* * The 64-bit ACPI integer can hold 16 4-bit BCD characters * (if table is 32-bit, integer can hold 8 BCD characters) * Convert each 4-bit BCD value */ power_of_ten = 1; return_desc->integer.value = 0; digit = operand[0]->integer.value; /* Convert each BCD digit (each is one nybble wide) */ for (i = 0; (i < acpi_gbl_integer_nybble_width) && (digit > 0); i++) { /* Get the least significant 4-bit BCD digit */ temp32 = ((u32) digit) & 0xF; /* Check the range of the digit */ if (temp32 > 9) { ACPI_ERROR((AE_INFO, "BCD digit too large (not decimal): 0x%X", temp32)); status = AE_AML_NUMERIC_OVERFLOW; goto cleanup; } /* Sum the digit into the result with the current power of 10 */ return_desc->integer.value += (((u64) temp32) * power_of_ten); /* Shift to next BCD digit */ digit >>= 4; /* Next power of 10 */ power_of_ten *= 10; } break; case AML_LOAD_OP: /* Result1 = Load (Operand[0], Result1) */ return_desc->integer.value = 0; status = acpi_ex_load_op(operand[0], return_desc, walk_state); if (ACPI_SUCCESS(status)) { /* Return -1 (non-zero) indicates success */ return_desc->integer.value = 0xFFFFFFFFFFFFFFFF; } break; case AML_TO_BCD_OP: /* to_bcd (Operand, Result) */ return_desc->integer.value = 0; digit = operand[0]->integer.value; /* Each BCD digit is one nybble wide */ for (i = 0; (i < acpi_gbl_integer_nybble_width) && (digit > 0); i++) { (void)acpi_ut_short_divide(digit, 10, &digit, &temp32); /* * Insert the BCD digit that resides in the * remainder from above */ return_desc->integer.value |= (((u64) temp32) << ACPI_MUL_4(i)); } /* Overflow if there is any data left in Digit */ if (digit > 0) { ACPI_ERROR((AE_INFO, "Integer too large to convert to BCD: 0x%8.8X%8.8X", ACPI_FORMAT_UINT64(operand[0]-> integer.value))); status = AE_AML_NUMERIC_OVERFLOW; goto cleanup; } break; case AML_CONDITIONAL_REF_OF_OP: /* cond_ref_of (source_object, Result) */ /* * This op is a little strange because the internal return value is * different than the return value stored in the result descriptor * (There are really two return values) */ if ((struct acpi_namespace_node *)operand[0] == acpi_gbl_root_node) { /* * This means that the object does not exist in the namespace, * return FALSE */ return_desc->integer.value = 0; goto cleanup; } /* Get the object reference, store it, and remove our reference */ status = acpi_ex_get_object_reference(operand[0], &return_desc2, walk_state); if (ACPI_FAILURE(status)) { goto cleanup; } status = acpi_ex_store(return_desc2, operand[1], walk_state); acpi_ut_remove_reference(return_desc2); /* The object exists in the namespace, return TRUE */ return_desc->integer.value = ACPI_UINT64_MAX; goto cleanup; default: /* No other opcodes get here */ break; } break; case AML_STORE_OP: /* Store (Source, Target) */ /* * A store operand is typically a number, string, buffer or lvalue * Be careful about deleting the source object, * since the object itself may have been stored. */ status = acpi_ex_store(operand[0], operand[1], walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* It is possible that the Store already produced a return object */ if (!walk_state->result_obj) { /* * Normally, we would remove a reference on the Operand[0] * parameter; But since it is being used as the internal return * object (meaning we would normally increment it), the two * cancel out, and we simply don't do anything. */ walk_state->result_obj = operand[0]; walk_state->operands[0] = NULL; /* Prevent deletion */ } return_ACPI_STATUS(status); /* * ACPI 2.0 Opcodes */ case AML_COPY_OBJECT_OP: /* copy_object (Source, Target) */ status = acpi_ut_copy_iobject_to_iobject(operand[0], &return_desc, walk_state); break; case AML_TO_DECIMAL_STRING_OP: /* to_decimal_string (Data, Result) */ status = acpi_ex_convert_to_string(operand[0], &return_desc, ACPI_EXPLICIT_CONVERT_DECIMAL); if (return_desc == operand[0]) { /* No conversion performed, add ref to handle return value */ acpi_ut_add_reference(return_desc); } break; case AML_TO_HEX_STRING_OP: /* to_hex_string (Data, Result) */ status = acpi_ex_convert_to_string(operand[0], &return_desc, ACPI_EXPLICIT_CONVERT_HEX); if (return_desc == operand[0]) { /* No conversion performed, add ref to handle return value */ acpi_ut_add_reference(return_desc); } break; case AML_TO_BUFFER_OP: /* to_buffer (Data, Result) */ status = acpi_ex_convert_to_buffer(operand[0], &return_desc); if (return_desc == operand[0]) { /* No conversion performed, add ref to handle return value */ acpi_ut_add_reference(return_desc); } break; case AML_TO_INTEGER_OP: /* to_integer (Data, Result) */ /* Perform "explicit" conversion */ status = acpi_ex_convert_to_integer(operand[0], &return_desc, 0); if (return_desc == operand[0]) { /* No conversion performed, add ref to handle return value */ acpi_ut_add_reference(return_desc); } break; case AML_SHIFT_LEFT_BIT_OP: /* shift_left_bit (Source, bit_num) */ case AML_SHIFT_RIGHT_BIT_OP: /* shift_right_bit (Source, bit_num) */ /* These are two obsolete opcodes */ ACPI_ERROR((AE_INFO, "%s is obsolete and not implemented", acpi_ps_get_opcode_name(walk_state->opcode))); status = AE_SUPPORT; goto cleanup; default: /* Unknown opcode */ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; } if (ACPI_SUCCESS(status)) { /* Store the return value computed above into the target object */ status = acpi_ex_store(return_desc, operand[1], walk_state); } cleanup: /* Delete return object on error */ if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(return_desc); } /* Save return object on success */ else if (!walk_state->result_obj) { walk_state->result_obj = return_desc; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_opcode_1A_0T_1R * * PARAMETERS: walk_state - Current state (contains AML opcode) * * RETURN: Status * * DESCRIPTION: Execute opcode with one argument, no target, and a return value * ******************************************************************************/ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) { union acpi_operand_object **operand = &walk_state->operands[0]; union acpi_operand_object *temp_desc; union acpi_operand_object *return_desc = NULL; acpi_status status = AE_OK; u32 type; u64 value; ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_1R, acpi_ps_get_opcode_name(walk_state->opcode)); /* Examine the AML opcode */ switch (walk_state->opcode) { case AML_LOGICAL_NOT_OP: /* LNot (Operand) */ return_desc = acpi_ut_create_integer_object((u64) 0); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } /* * Set result to ONES (TRUE) if Value == 0. Note: * return_desc->Integer.Value is initially == 0 (FALSE) from above. */ if (!operand[0]->integer.value) { return_desc->integer.value = ACPI_UINT64_MAX; } break; case AML_DECREMENT_OP: /* Decrement (Operand) */ case AML_INCREMENT_OP: /* Increment (Operand) */ /* * Create a new integer. Can't just get the base integer and * increment it because it may be an Arg or Field. */ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } /* * Since we are expecting a Reference operand, it can be either a * NS Node or an internal object. */ temp_desc = operand[0]; if (ACPI_GET_DESCRIPTOR_TYPE(temp_desc) == ACPI_DESC_TYPE_OPERAND) { /* Internal reference object - prevent deletion */ acpi_ut_add_reference(temp_desc); } /* * Convert the Reference operand to an Integer (This removes a * reference on the Operand[0] object) * * NOTE: We use LNOT_OP here in order to force resolution of the * reference operand to an actual integer. */ status = acpi_ex_resolve_operands(AML_LOGICAL_NOT_OP, &temp_desc, walk_state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While resolving operands for [%s]", acpi_ps_get_opcode_name(walk_state-> opcode))); goto cleanup; } /* * temp_desc is now guaranteed to be an Integer object -- * Perform the actual increment or decrement */ if (walk_state->opcode == AML_INCREMENT_OP) { return_desc->integer.value = temp_desc->integer.value + 1; } else { return_desc->integer.value = temp_desc->integer.value - 1; } /* Finished with this Integer object */ acpi_ut_remove_reference(temp_desc); /* * Store the result back (indirectly) through the original * Reference object */ status = acpi_ex_store(return_desc, operand[0], walk_state); break; case AML_OBJECT_TYPE_OP: /* object_type (source_object) */ /* * Note: The operand is not resolved at this point because we want to * get the associated object, not its value. For example, we don't * want to resolve a field_unit to its value, we want the actual * field_unit object. */ /* Get the type of the base object */ status = acpi_ex_resolve_multiple(walk_state, operand[0], &type, NULL); if (ACPI_FAILURE(status)) { goto cleanup; } /* Allocate a descriptor to hold the type. */ return_desc = acpi_ut_create_integer_object((u64) type); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } break; case AML_SIZE_OF_OP: /* size_of (source_object) */ /* * Note: The operand is not resolved at this point because we want to * get the associated object, not its value. */ /* Get the base object */ status = acpi_ex_resolve_multiple(walk_state, operand[0], &type, &temp_desc); if (ACPI_FAILURE(status)) { goto cleanup; } /* * The type of the base object must be integer, buffer, string, or * package. All others are not supported. * * NOTE: Integer is not specifically supported by the ACPI spec, * but is supported implicitly via implicit operand conversion. * rather than bother with conversion, we just use the byte width * global (4 or 8 bytes). */ switch (type) { case ACPI_TYPE_INTEGER: value = acpi_gbl_integer_byte_width; break; case ACPI_TYPE_STRING: value = temp_desc->string.length; break; case ACPI_TYPE_BUFFER: /* Buffer arguments may not be evaluated at this point */ status = acpi_ds_get_buffer_arguments(temp_desc); value = temp_desc->buffer.length; break; case ACPI_TYPE_PACKAGE: /* Package arguments may not be evaluated at this point */ status = acpi_ds_get_package_arguments(temp_desc); value = temp_desc->package.count; break; default: ACPI_ERROR((AE_INFO, "Operand must be Buffer/Integer/String/Package" " - found type %s", acpi_ut_get_type_name(type))); status = AE_AML_OPERAND_TYPE; goto cleanup; } if (ACPI_FAILURE(status)) { goto cleanup; } /* * Now that we have the size of the object, create a result * object to hold the value */ return_desc = acpi_ut_create_integer_object(value); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } break; case AML_REF_OF_OP: /* ref_of (source_object) */ status = acpi_ex_get_object_reference(operand[0], &return_desc, walk_state); if (ACPI_FAILURE(status)) { goto cleanup; } break; case AML_DEREF_OF_OP: /* deref_of (obj_reference | String) */ /* Check for a method local or argument, or standalone String */ if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) == ACPI_DESC_TYPE_NAMED) { temp_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) operand[0]); if (temp_desc && ((temp_desc->common.type == ACPI_TYPE_STRING) || (temp_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE))) { operand[0] = temp_desc; acpi_ut_add_reference(temp_desc); } else { status = AE_AML_OPERAND_TYPE; goto cleanup; } } else { switch ((operand[0])->common.type) { case ACPI_TYPE_LOCAL_REFERENCE: /* * This is a deref_of (local_x | arg_x) * * Must resolve/dereference the local/arg reference first */ switch (operand[0]->reference.class) { case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: /* Set Operand[0] to the value of the local/arg */ status = acpi_ds_method_data_get_value (operand[0]->reference.class, operand[0]->reference.value, walk_state, &temp_desc); if (ACPI_FAILURE(status)) { goto cleanup; } /* * Delete our reference to the input object and * point to the object just retrieved */ acpi_ut_remove_reference(operand[0]); operand[0] = temp_desc; break; case ACPI_REFCLASS_REFOF: /* Get the object to which the reference refers */ temp_desc = operand[0]->reference.object; acpi_ut_remove_reference(operand[0]); operand[0] = temp_desc; break; default: /* Must be an Index op - handled below */ break; } break; case ACPI_TYPE_STRING: break; default: status = AE_AML_OPERAND_TYPE; goto cleanup; } } if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) != ACPI_DESC_TYPE_NAMED) { if ((operand[0])->common.type == ACPI_TYPE_STRING) { /* * This is a deref_of (String). The string is a reference * to a named ACPI object. * * 1) Find the owning Node * 2) Dereference the node to an actual object. Could be a * Field, so we need to resolve the node to a value. */ status = acpi_ns_get_node_unlocked(walk_state-> scope_info->scope. node, operand[0]-> string.pointer, ACPI_NS_SEARCH_PARENT, ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, &return_desc)); if (ACPI_FAILURE(status)) { goto cleanup; } status = acpi_ex_resolve_node_to_value (ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, &return_desc), walk_state); goto cleanup; } } /* Operand[0] may have changed from the code above */ if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) == ACPI_DESC_TYPE_NAMED) { /* * This is a deref_of (object_reference) * Get the actual object from the Node (This is the dereference). * This case may only happen when a local_x or arg_x is * dereferenced above, or for references to device and * thermal objects. */ switch (((struct acpi_namespace_node *)operand[0])-> type) { case ACPI_TYPE_DEVICE: case ACPI_TYPE_THERMAL: /* These types have no node subobject, return the NS node */ return_desc = operand[0]; break; default: /* For most types, get the object attached to the node */ return_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)operand[0]); acpi_ut_add_reference(return_desc); break; } } else { /* * This must be a reference object produced by either the * Index() or ref_of() operator */ switch (operand[0]->reference.class) { case ACPI_REFCLASS_INDEX: /* * The target type for the Index operator must be * either a Buffer or a Package */ switch (operand[0]->reference.target_type) { case ACPI_TYPE_BUFFER_FIELD: temp_desc = operand[0]->reference.object; /* * Create a new object that contains one element of the * buffer -- the element pointed to by the index. * * NOTE: index into a buffer is NOT a pointer to a * sub-buffer of the main buffer, it is only a pointer to a * single element (byte) of the buffer! * * Since we are returning the value of the buffer at the * indexed location, we don't need to add an additional * reference to the buffer itself. */ return_desc = acpi_ut_create_integer_object((u64) temp_desc->buffer.pointer[operand[0]->reference.value]); if (!return_desc) { status = AE_NO_MEMORY; goto cleanup; } break; case ACPI_TYPE_PACKAGE: /* * Return the referenced element of the package. We must * add another reference to the referenced object, however. */ return_desc = *(operand[0]->reference.where); if (!return_desc) { /* * Element is NULL, do not allow the dereference. * This provides compatibility with other ACPI * implementations. */ return_ACPI_STATUS (AE_AML_UNINITIALIZED_ELEMENT); } acpi_ut_add_reference(return_desc); break; default: ACPI_ERROR((AE_INFO, "Unknown Index TargetType 0x%X in reference object %p", operand[0]->reference. target_type, operand[0])); status = AE_AML_OPERAND_TYPE; goto cleanup; } break; case ACPI_REFCLASS_REFOF: return_desc = operand[0]->reference.object; if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) == ACPI_DESC_TYPE_NAMED) { return_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) return_desc); if (!return_desc) { break; } /* * June 2013: * buffer_fields/field_units require additional resolution */ switch (return_desc->common.type) { case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: status = acpi_ex_read_data_from_field (walk_state, return_desc, &temp_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS (status); } return_desc = temp_desc; break; default: /* Add another reference to the object */ acpi_ut_add_reference (return_desc); break; } } break; default: ACPI_ERROR((AE_INFO, "Unknown class in reference(%p) - 0x%2.2X", operand[0], operand[0]->reference.class)); status = AE_TYPE; goto cleanup; } } break; default: ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", walk_state->opcode)); status = AE_AML_BAD_OPCODE; goto cleanup; } cleanup: /* Delete return object on error */ if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(return_desc); } /* Save return object on success */ else { walk_state->result_obj = return_desc; } return_ACPI_STATUS(status); }
linux-master
drivers/acpi/acpica/exoparg1.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: rsirq - IRQ resource descriptors * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rsirq") /******************************************************************************* * * acpi_rs_get_irq * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_get_irq[9] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ, ACPI_RS_SIZE(struct acpi_resource_irq), ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)}, /* Get the IRQ mask (bytes 1:2) */ {ACPI_RSC_BITMASK16, ACPI_RS_OFFSET(data.irq.interrupts[0]), AML_OFFSET(irq.irq_mask), ACPI_RS_OFFSET(data.irq.interrupt_count)}, /* Set default flags (others are zero) */ {ACPI_RSC_SET8, ACPI_RS_OFFSET(data.irq.triggering), ACPI_EDGE_SENSITIVE, 1}, /* Get the descriptor length (2 or 3 for IRQ descriptor) */ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.irq.descriptor_length), AML_OFFSET(irq.descriptor_type), 0}, /* All done if no flag byte present in descriptor */ {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3}, /* Get flags: Triggering[0], Polarity[3], Sharing[4], Wake[5] */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering), AML_OFFSET(irq.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.polarity), AML_OFFSET(irq.flags), 3}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.shareable), AML_OFFSET(irq.flags), 4}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.wake_capable), AML_OFFSET(irq.flags), 5} }; /******************************************************************************* * * acpi_rs_set_irq * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_set_irq[14] = { /* Start with a default descriptor of length 3 */ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ, sizeof(struct aml_resource_irq), ACPI_RSC_TABLE_SIZE(acpi_rs_set_irq)}, /* Convert interrupt list to 16-bit IRQ bitmask */ {ACPI_RSC_BITMASK16, ACPI_RS_OFFSET(data.irq.interrupts[0]), AML_OFFSET(irq.irq_mask), ACPI_RS_OFFSET(data.irq.interrupt_count)}, /* Set flags: Triggering[0], Polarity[3], Sharing[4], Wake[5] */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering), AML_OFFSET(irq.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.polarity), AML_OFFSET(irq.flags), 3}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.shareable), AML_OFFSET(irq.flags), 4}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.wake_capable), AML_OFFSET(irq.flags), 5}, /* * All done if the output descriptor length is required to be 3 * (i.e., optimization to 2 bytes cannot be attempted) */ {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE, ACPI_RS_OFFSET(data.irq.descriptor_length), 3}, /* Set length to 2 bytes (no flags byte) */ {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)}, /* * All done if the output descriptor length is required to be 2. * * TBD: Perhaps we should check for error if input flags are not * compatible with a 2-byte descriptor. */ {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE, ACPI_RS_OFFSET(data.irq.descriptor_length), 2}, /* Reset length to 3 bytes (descriptor with flags byte) */ {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq)}, /* * Check if the flags byte is necessary. Not needed if the flags are: * ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH, ACPI_EXCLUSIVE */ {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE, ACPI_RS_OFFSET(data.irq.triggering), ACPI_EDGE_SENSITIVE}, {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE, ACPI_RS_OFFSET(data.irq.polarity), ACPI_ACTIVE_HIGH}, {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE, ACPI_RS_OFFSET(data.irq.shareable), ACPI_EXCLUSIVE}, /* We can optimize to a 2-byte irq_no_flags() descriptor */ {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)} }; /******************************************************************************* * * acpi_rs_convert_ext_irq * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[10] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_EXTENDED_IRQ, ACPI_RS_SIZE(struct acpi_resource_extended_irq), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_ext_irq)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_EXTENDED_IRQ, sizeof(struct aml_resource_extended_irq), 0}, /* * Flags: Producer/Consumer[0], Triggering[1], Polarity[2], * Sharing[3], Wake[4] */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.producer_consumer), AML_OFFSET(extended_irq.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.triggering), AML_OFFSET(extended_irq.flags), 1}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.polarity), AML_OFFSET(extended_irq.flags), 2}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.shareable), AML_OFFSET(extended_irq.flags), 3}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.wake_capable), AML_OFFSET(extended_irq.flags), 4}, /* IRQ Table length (Byte4) */ {ACPI_RSC_COUNT, ACPI_RS_OFFSET(data.extended_irq.interrupt_count), AML_OFFSET(extended_irq.interrupt_count), sizeof(u32)}, /* Copy every IRQ in the table, each is 32 bits */ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.extended_irq.interrupts[0]), AML_OFFSET(extended_irq.interrupts[0]), 0}, /* Optional resource_source (Index and String) */ {ACPI_RSC_SOURCEX, ACPI_RS_OFFSET(data.extended_irq.resource_source), ACPI_RS_OFFSET(data.extended_irq.interrupts[0]), sizeof(struct aml_resource_extended_irq)} }; /******************************************************************************* * * acpi_rs_convert_dma * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_dma[6] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_DMA, ACPI_RS_SIZE(struct acpi_resource_dma), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_dma)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_DMA, sizeof(struct aml_resource_dma), 0}, /* Flags: transfer preference, bus mastering, channel speed */ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.dma.transfer), AML_OFFSET(dma.flags), 0}, {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.dma.bus_master), AML_OFFSET(dma.flags), 2}, {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.dma.type), AML_OFFSET(dma.flags), 5}, /* DMA channel mask bits */ {ACPI_RSC_BITMASK, ACPI_RS_OFFSET(data.dma.channels[0]), AML_OFFSET(dma.dma_channel_mask), ACPI_RS_OFFSET(data.dma.channel_count)} }; /******************************************************************************* * * acpi_rs_convert_fixed_dma * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA, ACPI_RS_SIZE(struct acpi_resource_fixed_dma), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA, sizeof(struct aml_resource_fixed_dma), 0}, /* * These fields are contiguous in both the source and destination: * request_lines * Channels */ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines), AML_OFFSET(fixed_dma.request_lines), 2}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width), AML_OFFSET(fixed_dma.width), 1}, };
linux-master
drivers/acpi/acpica/rsirq.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: dbobject - ACPI object decode and display * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acdebug.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbobject") /* Local prototypes */ static void acpi_db_decode_node(struct acpi_namespace_node *node); /******************************************************************************* * * FUNCTION: acpi_db_dump_method_info * * PARAMETERS: status - Method execution status * walk_state - Current state of the parse tree walk * * RETURN: None * * DESCRIPTION: Called when a method has been aborted because of an error. * Dumps the method execution stack, and the method locals/args, * and disassembles the AML opcode that failed. * ******************************************************************************/ void acpi_db_dump_method_info(acpi_status status, struct acpi_walk_state *walk_state) { struct acpi_thread_state *thread; struct acpi_namespace_node *node; node = walk_state->method_node; /* There are no locals or arguments for the module-level code case */ if (node == acpi_gbl_root_node) { return; } /* Ignore control codes, they are not errors */ if (ACPI_CNTL_EXCEPTION(status)) { return; } /* We may be executing a deferred opcode */ if (walk_state->deferred_node) { acpi_os_printf("Executing subtree for Buffer/Package/Region\n"); return; } /* * If there is no Thread, we are not actually executing a method. * This can happen when the iASL compiler calls the interpreter * to perform constant folding. */ thread = walk_state->thread; if (!thread) { return; } /* Display the method locals and arguments */ acpi_os_printf("\n"); acpi_db_decode_locals(walk_state); acpi_os_printf("\n"); acpi_db_decode_arguments(walk_state); acpi_os_printf("\n"); } /******************************************************************************* * * FUNCTION: acpi_db_decode_internal_object * * PARAMETERS: obj_desc - Object to be displayed * * RETURN: None * * DESCRIPTION: Short display of an internal object. Numbers/Strings/Buffers. * ******************************************************************************/ void acpi_db_decode_internal_object(union acpi_operand_object *obj_desc) { u32 i; if (!obj_desc) { acpi_os_printf(" Uninitialized"); return; } if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) { acpi_os_printf(" %p [%s]", obj_desc, acpi_ut_get_descriptor_name(obj_desc)); return; } acpi_os_printf(" %s", acpi_ut_get_object_type_name(obj_desc)); switch (obj_desc->common.type) { case ACPI_TYPE_INTEGER: acpi_os_printf(" %8.8X%8.8X", ACPI_FORMAT_UINT64(obj_desc->integer.value)); break; case ACPI_TYPE_STRING: acpi_os_printf("(%u) \"%.60s", obj_desc->string.length, obj_desc->string.pointer); if (obj_desc->string.length > 60) { acpi_os_printf("..."); } else { acpi_os_printf("\""); } break; case ACPI_TYPE_BUFFER: acpi_os_printf("(%u)", obj_desc->buffer.length); for (i = 0; (i < 8) && (i < obj_desc->buffer.length); i++) { acpi_os_printf(" %2.2X", obj_desc->buffer.pointer[i]); } break; default: acpi_os_printf(" %p", obj_desc); break; } } /******************************************************************************* * * FUNCTION: acpi_db_decode_node * * PARAMETERS: node - Object to be displayed * * RETURN: None * * DESCRIPTION: Short display of a namespace node * ******************************************************************************/ static void acpi_db_decode_node(struct acpi_namespace_node *node) { acpi_os_printf("<Node> Name %4.4s", acpi_ut_get_node_name(node)); if (node->flags & ANOBJ_METHOD_ARG) { acpi_os_printf(" [Method Arg]"); } if (node->flags & ANOBJ_METHOD_LOCAL) { acpi_os_printf(" [Method Local]"); } switch (node->type) { /* These types have no attached object */ case ACPI_TYPE_DEVICE: acpi_os_printf(" Device"); break; case ACPI_TYPE_THERMAL: acpi_os_printf(" Thermal Zone"); break; default: acpi_db_decode_internal_object(acpi_ns_get_attached_object (node)); break; } } /******************************************************************************* * * FUNCTION: acpi_db_display_internal_object * * PARAMETERS: obj_desc - Object to be displayed * walk_state - Current walk state * * RETURN: None * * DESCRIPTION: Short display of an internal object * ******************************************************************************/ void acpi_db_display_internal_object(union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { u8 type; acpi_os_printf("%p ", obj_desc); if (!obj_desc) { acpi_os_printf("<Null Object>\n"); return; } /* Decode the object type */ switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) { case ACPI_DESC_TYPE_PARSER: acpi_os_printf("<Parser> "); break; case ACPI_DESC_TYPE_NAMED: acpi_db_decode_node((struct acpi_namespace_node *)obj_desc); break; case ACPI_DESC_TYPE_OPERAND: type = obj_desc->common.type; if (type > ACPI_TYPE_LOCAL_MAX) { acpi_os_printf(" Type %X [Invalid Type]", (u32)type); return; } /* Decode the ACPI object type */ switch (obj_desc->common.type) { case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("[%s] ", acpi_ut_get_reference_name(obj_desc)); /* Decode the reference */ switch (obj_desc->reference.class) { case ACPI_REFCLASS_LOCAL: acpi_os_printf("%X ", obj_desc->reference.value); if (walk_state) { obj_desc = walk_state->local_variables [obj_desc->reference.value].object; acpi_os_printf("%p", obj_desc); acpi_db_decode_internal_object (obj_desc); } break; case ACPI_REFCLASS_ARG: acpi_os_printf("%X ", obj_desc->reference.value); if (walk_state) { obj_desc = walk_state->arguments [obj_desc->reference.value].object; acpi_os_printf("%p", obj_desc); acpi_db_decode_internal_object (obj_desc); } break; case ACPI_REFCLASS_INDEX: switch (obj_desc->reference.target_type) { case ACPI_TYPE_BUFFER_FIELD: acpi_os_printf("%p", obj_desc->reference. object); acpi_db_decode_internal_object (obj_desc->reference.object); break; case ACPI_TYPE_PACKAGE: acpi_os_printf("%p", obj_desc->reference. where); if (!obj_desc->reference.where) { acpi_os_printf (" Uninitialized WHERE pointer"); } else { acpi_db_decode_internal_object(* (obj_desc-> reference. where)); } break; default: acpi_os_printf ("Unknown index target type"); break; } break; case ACPI_REFCLASS_REFOF: if (!obj_desc->reference.object) { acpi_os_printf ("Uninitialized reference subobject pointer"); break; } /* Reference can be to a Node or an Operand object */ switch (ACPI_GET_DESCRIPTOR_TYPE (obj_desc->reference.object)) { case ACPI_DESC_TYPE_NAMED: acpi_db_decode_node(obj_desc->reference. object); break; case ACPI_DESC_TYPE_OPERAND: acpi_db_decode_internal_object (obj_desc->reference.object); break; default: break; } break; case ACPI_REFCLASS_NAME: acpi_db_decode_node(obj_desc->reference.node); break; case ACPI_REFCLASS_DEBUG: case ACPI_REFCLASS_TABLE: acpi_os_printf("\n"); break; default: /* Unknown reference class */ acpi_os_printf("%2.2X\n", obj_desc->reference.class); break; } break; default: acpi_os_printf("<Obj> "); acpi_db_decode_internal_object(obj_desc); break; } break; default: acpi_os_printf("<Not a valid ACPI Object Descriptor> [%s]", acpi_ut_get_descriptor_name(obj_desc)); break; } acpi_os_printf("\n"); } /******************************************************************************* * * FUNCTION: acpi_db_decode_locals * * PARAMETERS: walk_state - State for current method * * RETURN: None * * DESCRIPTION: Display all locals for the currently running control method * ******************************************************************************/ void acpi_db_decode_locals(struct acpi_walk_state *walk_state) { u32 i; union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; u8 display_locals = FALSE; node = walk_state->method_node; /* There are no locals for the module-level code case */ if (node == acpi_gbl_root_node) { return; } if (!node) { acpi_os_printf ("No method node (Executing subtree for buffer or opregion)\n"); return; } if (node->type != ACPI_TYPE_METHOD) { acpi_os_printf("Executing subtree for Buffer/Package/Region\n"); return; } /* Are any locals actually set? */ for (i = 0; i < ACPI_METHOD_NUM_LOCALS; i++) { obj_desc = walk_state->local_variables[i].object; if (obj_desc) { display_locals = TRUE; break; } } /* If any are set, only display the ones that are set */ if (display_locals) { acpi_os_printf ("\nInitialized Local Variables for Method [%4.4s]:\n", acpi_ut_get_node_name(node)); for (i = 0; i < ACPI_METHOD_NUM_LOCALS; i++) { obj_desc = walk_state->local_variables[i].object; if (obj_desc) { acpi_os_printf(" Local%X: ", i); acpi_db_display_internal_object(obj_desc, walk_state); } } } else { acpi_os_printf ("No Local Variables are initialized for Method [%4.4s]\n", acpi_ut_get_node_name(node)); } } /******************************************************************************* * * FUNCTION: acpi_db_decode_arguments * * PARAMETERS: walk_state - State for current method * * RETURN: None * * DESCRIPTION: Display all arguments for the currently running control method * ******************************************************************************/ void acpi_db_decode_arguments(struct acpi_walk_state *walk_state) { u32 i; union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; u8 display_args = FALSE; node = walk_state->method_node; /* There are no arguments for the module-level code case */ if (node == acpi_gbl_root_node) { return; } if (!node) { acpi_os_printf ("No method node (Executing subtree for buffer or opregion)\n"); return; } if (node->type != ACPI_TYPE_METHOD) { acpi_os_printf("Executing subtree for Buffer/Package/Region\n"); return; } /* Are any arguments actually set? */ for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) { obj_desc = walk_state->arguments[i].object; if (obj_desc) { display_args = TRUE; break; } } /* If any are set, only display the ones that are set */ if (display_args) { acpi_os_printf("Initialized Arguments for Method [%4.4s]: " "(%X arguments defined for method invocation)\n", acpi_ut_get_node_name(node), node->object->method.param_count); for (i = 0; i < ACPI_METHOD_NUM_ARGS; i++) { obj_desc = walk_state->arguments[i].object; if (obj_desc) { acpi_os_printf(" Arg%u: ", i); acpi_db_display_internal_object(obj_desc, walk_state); } } } else { acpi_os_printf ("No Arguments are initialized for method [%4.4s]\n", acpi_ut_get_node_name(node)); } }
linux-master
drivers/acpi/acpica/dbobject.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: psscope - Parser scope stack management routines * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psscope") /******************************************************************************* * * FUNCTION: acpi_ps_get_parent_scope * * PARAMETERS: parser_state - Current parser state object * * RETURN: Pointer to an Op object * * DESCRIPTION: Get parent of current op being parsed * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_parent_scope(struct acpi_parse_state *parser_state) { return (parser_state->scope->parse_scope.op); } /******************************************************************************* * * FUNCTION: acpi_ps_has_completed_scope * * PARAMETERS: parser_state - Current parser state object * * RETURN: Boolean, TRUE = scope completed. * * DESCRIPTION: Is parsing of current argument complete? Determined by * 1) AML pointer is at or beyond the end of the scope * 2) The scope argument count has reached zero. * ******************************************************************************/ u8 acpi_ps_has_completed_scope(struct acpi_parse_state * parser_state) { return ((u8) ((parser_state->aml >= parser_state->scope->parse_scope.arg_end || !parser_state->scope->parse_scope.arg_count))); } /******************************************************************************* * * FUNCTION: acpi_ps_init_scope * * PARAMETERS: parser_state - Current parser state object * root - the Root Node of this new scope * * RETURN: Status * * DESCRIPTION: Allocate and init a new scope object * ******************************************************************************/ acpi_status acpi_ps_init_scope(struct acpi_parse_state * parser_state, union acpi_parse_object * root_op) { union acpi_generic_state *scope; ACPI_FUNCTION_TRACE_PTR(ps_init_scope, root_op); scope = acpi_ut_create_generic_state(); if (!scope) { return_ACPI_STATUS(AE_NO_MEMORY); } scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_RPSCOPE; scope->parse_scope.op = root_op; scope->parse_scope.arg_count = ACPI_VAR_ARGS; scope->parse_scope.arg_end = parser_state->aml_end; scope->parse_scope.pkg_end = parser_state->aml_end; parser_state->scope = scope; parser_state->start_op = root_op; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ps_push_scope * * PARAMETERS: parser_state - Current parser state object * op - Current op to be pushed * remaining_args - List of args remaining * arg_count - Fixed or variable number of args * * RETURN: Status * * DESCRIPTION: Push current op to begin parsing its argument * ******************************************************************************/ acpi_status acpi_ps_push_scope(struct acpi_parse_state *parser_state, union acpi_parse_object *op, u32 remaining_args, u32 arg_count) { union acpi_generic_state *scope; ACPI_FUNCTION_TRACE_PTR(ps_push_scope, op); scope = acpi_ut_create_generic_state(); if (!scope) { return_ACPI_STATUS(AE_NO_MEMORY); } scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_PSCOPE; scope->parse_scope.op = op; scope->parse_scope.arg_list = remaining_args; scope->parse_scope.arg_count = arg_count; scope->parse_scope.pkg_end = parser_state->pkg_end; /* Push onto scope stack */ acpi_ut_push_generic_state(&parser_state->scope, scope); if (arg_count == ACPI_VAR_ARGS) { /* Multiple arguments */ scope->parse_scope.arg_end = parser_state->pkg_end; } else { /* Single argument */ scope->parse_scope.arg_end = ACPI_TO_POINTER(ACPI_MAX_PTR); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ps_pop_scope * * PARAMETERS: parser_state - Current parser state object * op - Where the popped op is returned * arg_list - Where the popped "next argument" is * returned * arg_count - Count of objects in arg_list * * RETURN: Status * * DESCRIPTION: Return to parsing a previous op * ******************************************************************************/ void acpi_ps_pop_scope(struct acpi_parse_state *parser_state, union acpi_parse_object **op, u32 * arg_list, u32 * arg_count) { union acpi_generic_state *scope = parser_state->scope; ACPI_FUNCTION_TRACE(ps_pop_scope); /* Only pop the scope if there is in fact a next scope */ if (scope->common.next) { scope = acpi_ut_pop_generic_state(&parser_state->scope); /* Return to parsing previous op */ *op = scope->parse_scope.op; *arg_list = scope->parse_scope.arg_list; *arg_count = scope->parse_scope.arg_count; parser_state->pkg_end = scope->parse_scope.pkg_end; /* All done with this scope state structure */ acpi_ut_delete_generic_state(scope); } else { /* Empty parse stack, prepare to fetch next opcode */ *op = NULL; *arg_list = 0; *arg_count = 0; } ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped Op %p Args %X\n", *op, *arg_count)); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ps_cleanup_scope * * PARAMETERS: parser_state - Current parser state object * * RETURN: None * * DESCRIPTION: Destroy available list, remaining stack levels, and return * root scope * ******************************************************************************/ void acpi_ps_cleanup_scope(struct acpi_parse_state *parser_state) { union acpi_generic_state *scope; ACPI_FUNCTION_TRACE_PTR(ps_cleanup_scope, parser_state); if (!parser_state) { return_VOID; } /* Delete anything on the scope stack */ while (parser_state->scope) { scope = acpi_ut_pop_generic_state(&parser_state->scope); acpi_ut_delete_generic_state(scope); } return_VOID; }
linux-master
drivers/acpi/acpica/psscope.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: evgpeblk - GPE block creation and initialization. * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evgpeblk") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* Local prototypes */ static acpi_status acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, u32 interrupt_number); static acpi_status acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block); /******************************************************************************* * * FUNCTION: acpi_ev_install_gpe_block * * PARAMETERS: gpe_block - New GPE block * interrupt_number - Xrupt to be associated with this * GPE block * * RETURN: Status * * DESCRIPTION: Install new GPE block with mutex support * ******************************************************************************/ static acpi_status acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, u32 interrupt_number) { struct acpi_gpe_block_info *next_gpe_block; struct acpi_gpe_xrupt_info *gpe_xrupt_block; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(ev_install_gpe_block); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_ev_get_gpe_xrupt_block(interrupt_number, &gpe_xrupt_block); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Install the new block at the end of the list with lock */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); if (gpe_xrupt_block->gpe_block_list_head) { next_gpe_block = gpe_xrupt_block->gpe_block_list_head; while (next_gpe_block->next) { next_gpe_block = next_gpe_block->next; } next_gpe_block->next = gpe_block; gpe_block->previous = next_gpe_block; } else { gpe_xrupt_block->gpe_block_list_head = gpe_block; } gpe_block->xrupt_block = gpe_xrupt_block; acpi_os_release_lock(acpi_gbl_gpe_lock, flags); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_delete_gpe_block * * PARAMETERS: gpe_block - Existing GPE block * * RETURN: Status * * DESCRIPTION: Remove a GPE block * ******************************************************************************/ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block) { acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(ev_install_gpe_block); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Disable all GPEs in this block */ status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (!gpe_block->previous && !gpe_block->next) { /* This is the last gpe_block on this interrupt */ status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } } else { /* Remove the block on this interrupt with lock */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); if (gpe_block->previous) { gpe_block->previous->next = gpe_block->next; } else { gpe_block->xrupt_block->gpe_block_list_head = gpe_block->next; } if (gpe_block->next) { gpe_block->next->previous = gpe_block->previous; } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); } acpi_current_gpe_count -= gpe_block->gpe_count; /* Free the gpe_block */ ACPI_FREE(gpe_block->register_info); ACPI_FREE(gpe_block->event_info); ACPI_FREE(gpe_block); unlock_and_exit: status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_create_gpe_info_blocks * * PARAMETERS: gpe_block - New GPE block * * RETURN: Status * * DESCRIPTION: Create the register_info and event_info blocks for this GPE block * ******************************************************************************/ static acpi_status acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) { struct acpi_gpe_register_info *gpe_register_info = NULL; struct acpi_gpe_event_info *gpe_event_info = NULL; struct acpi_gpe_event_info *this_event; struct acpi_gpe_register_info *this_register; u32 i; u32 j; acpi_status status; ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks); /* Allocate the GPE register information block */ gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block-> register_count * sizeof(struct acpi_gpe_register_info)); if (!gpe_register_info) { ACPI_ERROR((AE_INFO, "Could not allocate the GpeRegisterInfo table")); return_ACPI_STATUS(AE_NO_MEMORY); } /* * Allocate the GPE event_info block. There are eight distinct GPEs * per register. Initialization to zeros is sufficient. */ gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count * sizeof(struct acpi_gpe_event_info)); if (!gpe_event_info) { ACPI_ERROR((AE_INFO, "Could not allocate the GpeEventInfo table")); status = AE_NO_MEMORY; goto error_exit; } /* Save the new Info arrays in the GPE block */ gpe_block->register_info = gpe_register_info; gpe_block->event_info = gpe_event_info; /* * Initialize the GPE Register and Event structures. A goal of these * tables is to hide the fact that there are two separate GPE register * sets in a given GPE hardware block, the status registers occupy the * first half, and the enable registers occupy the second half. */ this_register = gpe_register_info; this_event = gpe_event_info; for (i = 0; i < gpe_block->register_count; i++) { /* Init the register_info for this GPE register (8 GPEs) */ this_register->base_gpe_number = (u16) (gpe_block->block_base_number + (i * ACPI_GPE_REGISTER_WIDTH)); this_register->status_address.address = gpe_block->address + i; this_register->enable_address.address = gpe_block->address + i + gpe_block->register_count; this_register->status_address.space_id = gpe_block->space_id; this_register->enable_address.space_id = gpe_block->space_id; /* Init the event_info for each GPE within this register */ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { this_event->gpe_number = (u8) (this_register->base_gpe_number + j); this_event->register_info = this_register; this_event++; } /* Disable all GPEs within this register */ status = acpi_hw_gpe_write(0x00, &this_register->enable_address); if (ACPI_FAILURE(status)) { goto error_exit; } /* Clear any pending GPE events within this register */ status = acpi_hw_gpe_write(0xFF, &this_register->status_address); if (ACPI_FAILURE(status)) { goto error_exit; } this_register++; } return_ACPI_STATUS(AE_OK); error_exit: if (gpe_register_info) { ACPI_FREE(gpe_register_info); } if (gpe_event_info) { ACPI_FREE(gpe_event_info); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_create_gpe_block * * PARAMETERS: gpe_device - Handle to the parent GPE block * gpe_block_address - Address and space_ID * register_count - Number of GPE register pairs in the block * gpe_block_base_number - Starting GPE number for the block * interrupt_number - H/W interrupt for the block * return_gpe_block - Where the new block descriptor is returned * * RETURN: Status * * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within * the block are disabled at exit. * Note: Assumes namespace is locked. * ******************************************************************************/ acpi_status acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, u64 address, u8 space_id, u32 register_count, u16 gpe_block_base_number, u32 interrupt_number, struct acpi_gpe_block_info **return_gpe_block) { acpi_status status; struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_walk_info walk_info; ACPI_FUNCTION_TRACE(ev_create_gpe_block); if (!register_count) { return_ACPI_STATUS(AE_OK); } /* Validate the space_ID */ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { ACPI_ERROR((AE_INFO, "Unsupported address space: 0x%X", space_id)); return_ACPI_STATUS(AE_SUPPORT); } if (space_id == ACPI_ADR_SPACE_SYSTEM_IO) { status = acpi_hw_validate_io_block(address, ACPI_GPE_REGISTER_WIDTH, register_count); if (ACPI_FAILURE(status)) return_ACPI_STATUS(status); } /* Allocate a new GPE block */ gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info)); if (!gpe_block) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Initialize the new GPE block */ gpe_block->address = address; gpe_block->space_id = space_id; gpe_block->node = gpe_device; gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); gpe_block->initialized = FALSE; gpe_block->register_count = register_count; gpe_block->block_base_number = gpe_block_base_number; /* * Create the register_info and event_info sub-structures * Note: disables and clears all GPEs in the block */ status = acpi_ev_create_gpe_info_blocks(gpe_block); if (ACPI_FAILURE(status)) { ACPI_FREE(gpe_block); return_ACPI_STATUS(status); } /* Install the new block in the global lists */ status = acpi_ev_install_gpe_block(gpe_block, interrupt_number); if (ACPI_FAILURE(status)) { ACPI_FREE(gpe_block->register_info); ACPI_FREE(gpe_block->event_info); ACPI_FREE(gpe_block); return_ACPI_STATUS(status); } acpi_gbl_all_gpes_initialized = FALSE; /* Find all GPE methods (_Lxx or_Exx) for this block */ walk_info.gpe_block = gpe_block; walk_info.gpe_device = gpe_device; walk_info.execute_by_owner_id = FALSE; (void)acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ev_match_gpe_method, NULL, &walk_info, NULL); /* Return the new block */ if (return_gpe_block) { (*return_gpe_block) = gpe_block; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X%s\n", (u32)gpe_block->block_base_number, (u32)(gpe_block->block_base_number + (gpe_block->gpe_count - 1)), gpe_device->name.ascii, gpe_block->register_count, interrupt_number, interrupt_number == acpi_gbl_FADT.sci_interrupt ? " (SCI)" : "")); /* Update global count of currently available GPEs */ acpi_current_gpe_count += gpe_block->gpe_count; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_initialize_gpe_block * * PARAMETERS: acpi_gpe_callback * * RETURN: Status * * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have * associated methods. * Note: Assumes namespace is locked. * ******************************************************************************/ acpi_status acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, struct acpi_gpe_block_info *gpe_block, void *context) { acpi_status status; struct acpi_gpe_event_info *gpe_event_info; u32 gpe_enabled_count; u32 gpe_index; u32 i; u32 j; u8 *is_polling_needed = context; ACPI_ERROR_ONLY(u32 gpe_number); ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); /* * Ignore a null GPE block (e.g., if no GPE block 1 exists), and * any GPE blocks that have been initialized already. */ if (!gpe_block || gpe_block->initialized) { return_ACPI_STATUS(AE_OK); } /* * Enable all GPEs that have a corresponding method and have the * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block * must be enabled via the acpi_enable_gpe() interface. */ gpe_enabled_count = 0; for (i = 0; i < gpe_block->register_count; i++) { for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { /* Get the info block for this particular GPE */ gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; gpe_event_info = &gpe_block->event_info[gpe_index]; ACPI_ERROR_ONLY(gpe_number = gpe_block->block_base_number + gpe_index); gpe_event_info->flags |= ACPI_GPE_INITIALIZED; /* * Ignore GPEs that have no corresponding _Lxx/_Exx method * and GPEs that are used for wakeup */ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != ACPI_GPE_DISPATCH_METHOD) || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { continue; } status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not enable GPE 0x%02X", gpe_number)); continue; } gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED; if (is_polling_needed && ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) { *is_polling_needed = TRUE; } gpe_enabled_count++; } } if (gpe_enabled_count) { ACPI_INFO(("Enabled %u GPEs in block %02X to %02X", gpe_enabled_count, (u32)gpe_block->block_base_number, (u32)(gpe_block->block_base_number + (gpe_block->gpe_count - 1)))); } gpe_block->initialized = TRUE; return_ACPI_STATUS(AE_OK); } #endif /* !ACPI_REDUCED_HARDWARE */
linux-master
drivers/acpi/acpica/evgpeblk.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: nsprepkg - Validation of package objects for predefined names * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acpredef.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsprepkg") /* Local prototypes */ static acpi_status acpi_ns_check_package_list(struct acpi_evaluate_info *info, const union acpi_predefined_info *package, union acpi_operand_object **elements, u32 count); static acpi_status acpi_ns_check_package_elements(struct acpi_evaluate_info *info, union acpi_operand_object **elements, u8 type1, u32 count1, u8 type2, u32 count2, u32 start_index); static acpi_status acpi_ns_custom_package(struct acpi_evaluate_info *info, union acpi_operand_object **elements, u32 count); /******************************************************************************* * * FUNCTION: acpi_ns_check_package * * PARAMETERS: info - Method execution information block * return_object_ptr - Pointer to the object returned from the * evaluation of a method or object * * RETURN: Status * * DESCRIPTION: Check a returned package object for the correct count and * correct type of all sub-objects. * ******************************************************************************/ acpi_status acpi_ns_check_package(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; const union acpi_predefined_info *package; union acpi_operand_object **elements; acpi_status status = AE_OK; u32 expected_count; u32 count; u32 i; ACPI_FUNCTION_TRACE(ns_check_package); /* The package info for this name is in the next table entry */ package = info->predefined + 1; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s Validating return Package of Type %X, Count %X\n", info->full_pathname, package->ret_info.type, return_object->package.count)); /* * For variable-length Packages, we can safely remove all embedded * and trailing NULL package elements */ acpi_ns_remove_null_elements(info, package->ret_info.type, return_object); /* Extract package count and elements array */ elements = return_object->package.elements; count = return_object->package.count; /* * Most packages must have at least one element. The only exception * is the variable-length package (ACPI_PTYPE1_VAR). */ if (!count) { if (package->ret_info.type == ACPI_PTYPE1_VAR) { return_ACPI_STATUS(AE_OK); } ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Return Package has no elements (empty)")); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } /* * Decode the type of the expected package contents * * PTYPE1 packages contain no subpackages * PTYPE2 packages contain subpackages */ switch (package->ret_info.type) { case ACPI_PTYPE_CUSTOM: status = acpi_ns_custom_package(info, elements, count); break; case ACPI_PTYPE1_FIXED: /* * The package count is fixed and there are no subpackages * * If package is too small, exit. * If package is larger than expected, issue warning but continue */ expected_count = package->ret_info.count1 + package->ret_info.count2; if (count < expected_count) { goto package_too_small; } else if (count > expected_count) { ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Return Package is larger than needed - " "found %u, expected %u\n", info->full_pathname, count, expected_count)); } /* Validate all elements of the returned package */ status = acpi_ns_check_package_elements(info, elements, package->ret_info. object_type1, package->ret_info. count1, package->ret_info. object_type2, package->ret_info. count2, 0); break; case ACPI_PTYPE1_VAR: /* * The package count is variable, there are no subpackages, and all * elements must be of the same type */ for (i = 0; i < count; i++) { status = acpi_ns_check_object_type(info, elements, package->ret_info. object_type1, i); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } elements++; } break; case ACPI_PTYPE1_OPTION: /* * The package count is variable, there are no subpackages. There are * a fixed number of required elements, and a variable number of * optional elements. * * Check if package is at least as large as the minimum required */ expected_count = package->ret_info3.count; if (count < expected_count) { goto package_too_small; } /* Variable number of sub-objects */ for (i = 0; i < count; i++) { if (i < package->ret_info3.count) { /* These are the required package elements (0, 1, or 2) */ status = acpi_ns_check_object_type(info, elements, package-> ret_info3. object_type[i], i); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } else { /* These are the optional package elements */ status = acpi_ns_check_object_type(info, elements, package-> ret_info3. tail_object_type, i); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } elements++; } break; case ACPI_PTYPE2_REV_FIXED: /* First element is the (Integer) revision */ status = acpi_ns_check_object_type(info, elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } elements++; count--; /* Examine the subpackages */ status = acpi_ns_check_package_list(info, package, elements, count); break; case ACPI_PTYPE2_PKG_COUNT: /* First element is the (Integer) count of subpackages to follow */ status = acpi_ns_check_object_type(info, elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Count cannot be larger than the parent package length, but allow it * to be smaller. The >= accounts for the Integer above. */ expected_count = (u32)(*elements)->integer.value; if (expected_count >= count) { goto package_too_small; } count = expected_count; elements++; /* Examine the subpackages */ status = acpi_ns_check_package_list(info, package, elements, count); break; case ACPI_PTYPE2: case ACPI_PTYPE2_FIXED: case ACPI_PTYPE2_MIN: case ACPI_PTYPE2_COUNT: case ACPI_PTYPE2_FIX_VAR: /* * These types all return a single Package that consists of a * variable number of subpackages. * * First, ensure that the first element is a subpackage. If not, * the BIOS may have incorrectly returned the object as a single * package instead of a Package of Packages (a common error if * there is only one entry). We may be able to repair this by * wrapping the returned Package with a new outer Package. */ if (*elements && ((*elements)->common.type != ACPI_TYPE_PACKAGE)) { /* Create the new outer package and populate it */ status = acpi_ns_wrap_with_package(info, return_object, return_object_ptr); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Update locals to point to the new package (of 1 element) */ return_object = *return_object_ptr; elements = return_object->package.elements; count = 1; } /* Examine the subpackages */ status = acpi_ns_check_package_list(info, package, elements, count); break; case ACPI_PTYPE2_VAR_VAR: /* * Returns a variable list of packages, each with a variable list * of objects. */ break; case ACPI_PTYPE2_UUID_PAIR: /* The package must contain pairs of (UUID + type) */ if (count & 1) { expected_count = count + 1; goto package_too_small; } while (count > 0) { status = acpi_ns_check_object_type(info, elements, package->ret_info. object_type1, 0); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Validate length of the UUID buffer */ if ((*elements)->buffer.length != 16) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Invalid length for UUID Buffer")); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } status = acpi_ns_check_object_type(info, elements + 1, package->ret_info. object_type2, 0); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } elements += 2; count -= 2; } break; default: /* Should not get here if predefined info table is correct */ ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Invalid internal return type in table entry: %X", package->ret_info.type)); return_ACPI_STATUS(AE_AML_INTERNAL); } return_ACPI_STATUS(status); package_too_small: /* Error exit for the case with an incorrect package count */ ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Return Package is too small - found %u elements, expected %u", count, expected_count)); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } /******************************************************************************* * * FUNCTION: acpi_ns_check_package_list * * PARAMETERS: info - Method execution information block * package - Pointer to package-specific info for method * elements - Element list of parent package. All elements * of this list should be of type Package. * count - Count of subpackages * * RETURN: Status * * DESCRIPTION: Examine a list of subpackages * ******************************************************************************/ static acpi_status acpi_ns_check_package_list(struct acpi_evaluate_info *info, const union acpi_predefined_info *package, union acpi_operand_object **elements, u32 count) { union acpi_operand_object *sub_package; union acpi_operand_object **sub_elements; acpi_status status; u32 expected_count; u32 i; u32 j; /* * Validate each subpackage in the parent Package * * NOTE: assumes list of subpackages contains no NULL elements. * Any NULL elements should have been removed by earlier call * to acpi_ns_remove_null_elements. */ for (i = 0; i < count; i++) { sub_package = *elements; sub_elements = sub_package->package.elements; info->parent_package = sub_package; /* Each sub-object must be of type Package */ status = acpi_ns_check_object_type(info, &sub_package, ACPI_RTYPE_PACKAGE, i); if (ACPI_FAILURE(status)) { return (status); } /* Examine the different types of expected subpackages */ info->parent_package = sub_package; switch (package->ret_info.type) { case ACPI_PTYPE2: case ACPI_PTYPE2_PKG_COUNT: case ACPI_PTYPE2_REV_FIXED: /* Each subpackage has a fixed number of elements */ expected_count = package->ret_info.count1 + package->ret_info.count2; if (sub_package->package.count < expected_count) { goto package_too_small; } status = acpi_ns_check_package_elements(info, sub_elements, package->ret_info. object_type1, package->ret_info. count1, package->ret_info. object_type2, package->ret_info. count2, 0); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_PTYPE2_FIX_VAR: /* * Each subpackage has a fixed number of elements and an * optional element */ expected_count = package->ret_info.count1 + package->ret_info.count2; if (sub_package->package.count < expected_count) { goto package_too_small; } status = acpi_ns_check_package_elements(info, sub_elements, package->ret_info. object_type1, package->ret_info. count1, package->ret_info. object_type2, sub_package->package. count - package->ret_info. count1, 0); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_PTYPE2_VAR_VAR: /* * Each subpackage has a fixed or variable number of elements */ break; case ACPI_PTYPE2_FIXED: /* Each subpackage has a fixed length */ expected_count = package->ret_info2.count; if (sub_package->package.count < expected_count) { goto package_too_small; } /* Check the type of each subpackage element */ for (j = 0; j < expected_count; j++) { status = acpi_ns_check_object_type(info, &sub_elements[j], package-> ret_info2. object_type[j], j); if (ACPI_FAILURE(status)) { return (status); } } break; case ACPI_PTYPE2_MIN: /* Each subpackage has a variable but minimum length */ expected_count = package->ret_info.count1; if (sub_package->package.count < expected_count) { goto package_too_small; } /* Check the type of each subpackage element */ status = acpi_ns_check_package_elements(info, sub_elements, package->ret_info. object_type1, sub_package->package. count, 0, 0, 0); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_PTYPE2_COUNT: /* * First element is the (Integer) count of elements, including * the count field (the ACPI name is num_elements) */ status = acpi_ns_check_object_type(info, sub_elements, ACPI_RTYPE_INTEGER, 0); if (ACPI_FAILURE(status)) { return (status); } /* * Make sure package is large enough for the Count and is * is as large as the minimum size */ expected_count = (u32)(*sub_elements)->integer.value; if (sub_package->package.count < expected_count) { goto package_too_small; } if (sub_package->package.count < package->ret_info.count1) { expected_count = package->ret_info.count1; goto package_too_small; } if (expected_count == 0) { /* * Either the num_entries element was originally zero or it was * a NULL element and repaired to an Integer of value zero. * In either case, repair it by setting num_entries to be the * actual size of the subpackage. */ expected_count = sub_package->package.count; (*sub_elements)->integer.value = expected_count; } /* Check the type of each subpackage element */ status = acpi_ns_check_package_elements(info, (sub_elements + 1), package->ret_info. object_type1, (expected_count - 1), 0, 0, 1); if (ACPI_FAILURE(status)) { return (status); } break; default: /* Should not get here, type was validated by caller */ ACPI_ERROR((AE_INFO, "Invalid Package type: %X", package->ret_info.type)); return (AE_AML_INTERNAL); } elements++; } return (AE_OK); package_too_small: /* The subpackage count was smaller than required */ ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Return SubPackage[%u] is too small - found %u elements, expected %u", i, sub_package->package.count, expected_count)); return (AE_AML_OPERAND_VALUE); } /******************************************************************************* * * FUNCTION: acpi_ns_custom_package * * PARAMETERS: info - Method execution information block * elements - Pointer to the package elements array * count - Element count for the package * * RETURN: Status * * DESCRIPTION: Check a returned package object for the correct count and * correct type of all sub-objects. * * NOTE: Currently used for the _BIX method only. When needed for two or more * methods, probably a detect/dispatch mechanism will be required. * ******************************************************************************/ static acpi_status acpi_ns_custom_package(struct acpi_evaluate_info *info, union acpi_operand_object **elements, u32 count) { u32 expected_count; u32 version; acpi_status status = AE_OK; ACPI_FUNCTION_NAME(ns_custom_package); /* Get version number, must be Integer */ if ((*elements)->common.type != ACPI_TYPE_INTEGER) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Return Package has invalid object type for version number")); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } version = (u32)(*elements)->integer.value; expected_count = 21; /* Version 1 */ if (version == 0) { expected_count = 20; /* Version 0 */ } if (count < expected_count) { ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags, "Return Package is too small - found %u elements, expected %u", count, expected_count)); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } else if (count > expected_count) { ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Return Package is larger than needed - " "found %u, expected %u\n", info->full_pathname, count, expected_count)); } /* Validate all elements of the returned package */ status = acpi_ns_check_package_elements(info, elements, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING, 4, 0); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Version 1 has a single trailing integer */ if (version > 0) { status = acpi_ns_check_package_elements(info, elements + 20, ACPI_RTYPE_INTEGER, 1, 0, 0, 20); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ns_check_package_elements * * PARAMETERS: info - Method execution information block * elements - Pointer to the package elements array * type1 - Object type for first group * count1 - Count for first group * type2 - Object type for second group * count2 - Count for second group * start_index - Start of the first group of elements * * RETURN: Status * * DESCRIPTION: Check that all elements of a package are of the correct object * type. Supports up to two groups of different object types. * ******************************************************************************/ static acpi_status acpi_ns_check_package_elements(struct acpi_evaluate_info *info, union acpi_operand_object **elements, u8 type1, u32 count1, u8 type2, u32 count2, u32 start_index) { union acpi_operand_object **this_element = elements; acpi_status status; u32 i; ACPI_FUNCTION_TRACE(ns_check_package_elements); /* * Up to two groups of package elements are supported by the data * structure. All elements in each group must be of the same type. * The second group can have a count of zero. */ for (i = 0; i < count1; i++) { status = acpi_ns_check_object_type(info, this_element, type1, i + start_index); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } this_element++; } for (i = 0; i < count2; i++) { status = acpi_ns_check_object_type(info, this_element, type2, (i + count1 + start_index)); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } this_element++; } return_ACPI_STATUS(AE_OK); }
linux-master
drivers/acpi/acpica/nsprepkg.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #define EXPORT_ACPI_INTERFACES #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwxfsleep") /* Local prototypes */ #if (!ACPI_REDUCED_HARDWARE) static acpi_status acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs, acpi_physical_address physical_address, acpi_physical_address physical_address64); #endif /* * These functions are removed for the ACPI_REDUCED_HARDWARE case: * acpi_set_firmware_waking_vector * acpi_enter_sleep_state_s4bios */ #if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: acpi_hw_set_firmware_waking_vector * * PARAMETERS: facs - Pointer to FACS table * physical_address - 32-bit physical address of ACPI real mode * entry point * physical_address64 - 64-bit physical address of ACPI protected * mode entry point * * RETURN: Status * * DESCRIPTION: Sets the firmware_waking_vector fields of the FACS * ******************************************************************************/ static acpi_status acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs, acpi_physical_address physical_address, acpi_physical_address physical_address64) { ACPI_FUNCTION_TRACE(acpi_hw_set_firmware_waking_vector); /* * According to the ACPI specification 2.0c and later, the 64-bit * waking vector should be cleared and the 32-bit waking vector should * be used, unless we want the wake-up code to be called by the BIOS in * Protected Mode. Some systems (for example HP dv5-1004nr) are known * to fail to resume if the 64-bit vector is used. */ /* Set the 32-bit vector */ facs->firmware_waking_vector = (u32)physical_address; if (facs->length > 32) { if (facs->version >= 1) { /* Set the 64-bit vector */ facs->xfirmware_waking_vector = physical_address64; } else { /* Clear the 64-bit vector if it exists */ facs->xfirmware_waking_vector = 0; } } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_set_firmware_waking_vector * * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode * entry point * physical_address64 - 64-bit physical address of ACPI protected * mode entry point * * RETURN: Status * * DESCRIPTION: Sets the firmware_waking_vector fields of the FACS * ******************************************************************************/ acpi_status acpi_set_firmware_waking_vector(acpi_physical_address physical_address, acpi_physical_address physical_address64) { ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); if (acpi_gbl_FACS) { (void)acpi_hw_set_firmware_waking_vector(acpi_gbl_FACS, physical_address, physical_address64); } return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector) /******************************************************************************* * * FUNCTION: acpi_enter_sleep_state_s4bios * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Perform a S4 bios request. * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ acpi_status acpi_enter_sleep_state_s4bios(void) { u32 in_value; acpi_status status; ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios); /* Clear the wake status bit (PM1) */ status = acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_clear_acpi_status(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * 1) Disable all GPEs * 2) Enable all wakeup GPEs */ status = acpi_hw_disable_all_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_gbl_system_awake_and_running = FALSE; status = acpi_hw_enable_all_wakeup_gpes(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32)acpi_gbl_FADT.s4_bios_request, 8); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } do { acpi_os_stall(ACPI_USEC_PER_MSEC); status = acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } while (!in_value); return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) #endif /* !ACPI_REDUCED_HARDWARE */ /******************************************************************************* * * FUNCTION: acpi_enter_sleep_state_prep * * PARAMETERS: sleep_state - Which sleep state to enter * * RETURN: Status * * DESCRIPTION: Prepare to enter a system sleep state. * This function must execute with interrupts enabled. * We break sleeping into 2 stages so that OSPM can handle * various OS-specific tasks between the two steps. * ******************************************************************************/ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state) { acpi_status status; struct acpi_object_list arg_list; union acpi_object arg; u32 sst_value; ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep); status = acpi_get_sleep_type_data(sleep_state, &acpi_gbl_sleep_type_a, &acpi_gbl_sleep_type_b); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_get_sleep_type_data(ACPI_STATE_S0, &acpi_gbl_sleep_type_a_s0, &acpi_gbl_sleep_type_b_s0); if (ACPI_FAILURE(status)) { acpi_gbl_sleep_type_a_s0 = ACPI_SLEEP_TYPE_INVALID; } /* Execute the _PTS method (Prepare To Sleep) */ arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; arg.integer.value = sleep_state; status = acpi_evaluate_object(NULL, METHOD_PATHNAME__PTS, &arg_list, NULL); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { return_ACPI_STATUS(status); } /* Setup the argument to the _SST method (System STatus) */ switch (sleep_state) { case ACPI_STATE_S0: sst_value = ACPI_SST_WORKING; break; case ACPI_STATE_S1: case ACPI_STATE_S2: case ACPI_STATE_S3: sst_value = ACPI_SST_SLEEPING; break; case ACPI_STATE_S4: sst_value = ACPI_SST_SLEEP_CONTEXT; break; default: sst_value = ACPI_SST_INDICATOR_OFF; /* Default is off */ break; } /* * Set the system indicators to show the desired sleep state. * _SST is an optional method (return no error if not found) */ acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, sst_value); return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) /******************************************************************************* * * FUNCTION: acpi_enter_sleep_state * * PARAMETERS: sleep_state - Which sleep state to enter * * RETURN: Status * * DESCRIPTION: Enter a system sleep state * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ acpi_status acpi_enter_sleep_state(u8 sleep_state) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_enter_sleep_state); if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) || (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) { ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X", acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b)); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } #if !ACPI_REDUCED_HARDWARE if (!acpi_gbl_reduced_hardware) status = acpi_hw_legacy_sleep(sleep_state); else #endif status = acpi_hw_extended_sleep(sleep_state); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state) /******************************************************************************* * * FUNCTION: acpi_leave_sleep_state_prep * * PARAMETERS: sleep_state - Which sleep state we are exiting * * RETURN: Status * * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a * sleep. Called with interrupts DISABLED. * We break wake/resume into 2 stages so that OSPM can handle * various OS-specific tasks between the two steps. * ******************************************************************************/ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); #if !ACPI_REDUCED_HARDWARE if (!acpi_gbl_reduced_hardware) status = acpi_hw_legacy_wake_prep(sleep_state); else #endif status = acpi_hw_extended_wake_prep(sleep_state); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state_prep) /******************************************************************************* * * FUNCTION: acpi_leave_sleep_state * * PARAMETERS: sleep_state - Which sleep state we are exiting * * RETURN: Status * * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep * Called with interrupts ENABLED. * ******************************************************************************/ acpi_status acpi_leave_sleep_state(u8 sleep_state) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); #if !ACPI_REDUCED_HARDWARE if (!acpi_gbl_reduced_hardware) status = acpi_hw_legacy_wake(sleep_state); else #endif status = acpi_hw_extended_wake(sleep_state); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
linux-master
drivers/acpi/acpica/hwxfsleep.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exconvrt - Object conversion routines * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exconvrt") /* Local prototypes */ static u32 acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length); /******************************************************************************* * * FUNCTION: acpi_ex_convert_to_integer * * PARAMETERS: obj_desc - Object to be converted. Must be an * Integer, Buffer, or String * result_desc - Where the new Integer object is returned * implicit_conversion - Used for string conversion * * RETURN: Status * * DESCRIPTION: Convert an ACPI Object to an integer. * ******************************************************************************/ acpi_status acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc, union acpi_operand_object **result_desc, u32 implicit_conversion) { union acpi_operand_object *return_desc; u8 *pointer; u64 result; u32 i; u32 count; ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc); switch (obj_desc->common.type) { case ACPI_TYPE_INTEGER: /* No conversion necessary */ *result_desc = obj_desc; return_ACPI_STATUS(AE_OK); case ACPI_TYPE_BUFFER: case ACPI_TYPE_STRING: /* Note: Takes advantage of common buffer/string fields */ pointer = obj_desc->buffer.pointer; count = obj_desc->buffer.length; break; default: return_ACPI_STATUS(AE_TYPE); } /* * Convert the buffer/string to an integer. Note that both buffers and * strings are treated as raw data - we don't convert ascii to hex for * strings. * * There are two terminating conditions for the loop: * 1) The size of an integer has been reached, or * 2) The end of the buffer or string has been reached */ result = 0; /* String conversion is different than Buffer conversion */ switch (obj_desc->common.type) { case ACPI_TYPE_STRING: /* * Convert string to an integer - for most cases, the string must be * hexadecimal as per the ACPI specification. The only exception (as * of ACPI 3.0) is that the to_integer() operator allows both decimal * and hexadecimal strings (hex prefixed with "0x"). * * Explicit conversion is used only by to_integer. * All other string-to-integer conversions are implicit conversions. */ if (implicit_conversion) { result = acpi_ut_implicit_strtoul64(ACPI_CAST_PTR (char, pointer)); } else { result = acpi_ut_explicit_strtoul64(ACPI_CAST_PTR (char, pointer)); } break; case ACPI_TYPE_BUFFER: /* Check for zero-length buffer */ if (!count) { return_ACPI_STATUS(AE_AML_BUFFER_LIMIT); } /* Transfer no more than an integer's worth of data */ if (count > acpi_gbl_integer_byte_width) { count = acpi_gbl_integer_byte_width; } /* * Convert buffer to an integer - we simply grab enough raw data * from the buffer to fill an integer */ for (i = 0; i < count; i++) { /* * Get next byte and shift it into the Result. * Little endian is used, meaning that the first byte of the buffer * is the LSB of the integer */ result |= (((u64) pointer[i]) << (i * 8)); } break; default: /* No other types can get here */ break; } /* Create a new integer */ return_desc = acpi_ut_create_integer_object(result); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n", ACPI_FORMAT_UINT64(result))); /* Save the Result */ (void)acpi_ex_truncate_for32bit_table(return_desc); *result_desc = return_desc; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_convert_to_buffer * * PARAMETERS: obj_desc - Object to be converted. Must be an * Integer, Buffer, or String * result_desc - Where the new buffer object is returned * * RETURN: Status * * DESCRIPTION: Convert an ACPI Object to a Buffer * ******************************************************************************/ acpi_status acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc, union acpi_operand_object **result_desc) { union acpi_operand_object *return_desc; u8 *new_buf; ACPI_FUNCTION_TRACE_PTR(ex_convert_to_buffer, obj_desc); switch (obj_desc->common.type) { case ACPI_TYPE_BUFFER: /* No conversion necessary */ *result_desc = obj_desc; return_ACPI_STATUS(AE_OK); case ACPI_TYPE_INTEGER: /* * Create a new Buffer object. * Need enough space for one integer */ return_desc = acpi_ut_create_buffer_object(acpi_gbl_integer_byte_width); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Copy the integer to the buffer, LSB first */ new_buf = return_desc->buffer.pointer; memcpy(new_buf, &obj_desc->integer.value, acpi_gbl_integer_byte_width); break; case ACPI_TYPE_STRING: /* * Create a new Buffer object * Size will be the string length * * NOTE: Add one to the string length to include the null terminator. * The ACPI spec is unclear on this subject, but there is existing * ASL/AML code that depends on the null being transferred to the new * buffer. */ return_desc = acpi_ut_create_buffer_object((acpi_size) obj_desc->string. length + 1); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Copy the string to the buffer */ new_buf = return_desc->buffer.pointer; strncpy((char *)new_buf, (char *)obj_desc->string.pointer, obj_desc->string.length); break; default: return_ACPI_STATUS(AE_TYPE); } /* Mark buffer initialized */ return_desc->common.flags |= AOPOBJ_DATA_VALID; *result_desc = return_desc; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_convert_to_ascii * * PARAMETERS: integer - Value to be converted * base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX * string - Where the string is returned * data_width - Size of data item to be converted, in bytes * * RETURN: Actual string length * * DESCRIPTION: Convert an ACPI Integer to a hex or decimal string * ******************************************************************************/ static u32 acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width) { u64 digit; u32 i; u32 j; u32 k = 0; u32 hex_length; u32 decimal_length; u32 remainder; u8 supress_zeros; ACPI_FUNCTION_ENTRY(); switch (base) { case 10: /* Setup max length for the decimal number */ switch (data_width) { case 1: decimal_length = ACPI_MAX8_DECIMAL_DIGITS; break; case 4: decimal_length = ACPI_MAX32_DECIMAL_DIGITS; break; case 8: default: decimal_length = ACPI_MAX64_DECIMAL_DIGITS; break; } supress_zeros = TRUE; /* No leading zeros */ remainder = 0; for (i = decimal_length; i > 0; i--) { /* Divide by nth factor of 10 */ digit = integer; for (j = 0; j < i; j++) { (void)acpi_ut_short_divide(digit, 10, &digit, &remainder); } /* Handle leading zeros */ if (remainder != 0) { supress_zeros = FALSE; } if (!supress_zeros) { string[k] = (u8) (ACPI_ASCII_ZERO + remainder); k++; } } break; case 16: /* hex_length: 2 ascii hex chars per data byte */ hex_length = (data_width * 2); for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) { /* Get one hex digit, most significant digits first */ string[k] = (u8) acpi_ut_hex_to_ascii_char(integer, ACPI_MUL_4(j)); k++; } break; default: return (0); } /* * Since leading zeros are suppressed, we must check for the case where * the integer equals 0 * * Finally, null terminate the string and return the length */ if (!k) { string[0] = ACPI_ASCII_ZERO; k = 1; } string[k] = 0; return ((u32) k); } /******************************************************************************* * * FUNCTION: acpi_ex_convert_to_string * * PARAMETERS: obj_desc - Object to be converted. Must be an * Integer, Buffer, or String * result_desc - Where the string object is returned * type - String flags (base and conversion type) * * RETURN: Status * * DESCRIPTION: Convert an ACPI Object to a string. Supports both implicit * and explicit conversions and related rules. * ******************************************************************************/ acpi_status acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, union acpi_operand_object ** result_desc, u32 type) { union acpi_operand_object *return_desc; u8 *new_buf; u32 i; u32 string_length = 0; u16 base = 16; u8 separator = ','; ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc); switch (obj_desc->common.type) { case ACPI_TYPE_STRING: /* No conversion necessary */ *result_desc = obj_desc; return_ACPI_STATUS(AE_OK); case ACPI_TYPE_INTEGER: switch (type) { case ACPI_EXPLICIT_CONVERT_DECIMAL: /* * From to_decimal_string, integer source. * * Make room for the maximum decimal number size */ string_length = ACPI_MAX_DECIMAL_DIGITS; base = 10; break; default: /* Two hex string characters for each integer byte */ string_length = ACPI_MUL_2(acpi_gbl_integer_byte_width); break; } /* * Create a new String * Need enough space for one ASCII integer (plus null terminator) */ return_desc = acpi_ut_create_string_object((acpi_size)string_length); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } new_buf = return_desc->buffer.pointer; /* Convert integer to string */ string_length = acpi_ex_convert_to_ascii(obj_desc->integer.value, base, new_buf, acpi_gbl_integer_byte_width); /* Null terminate at the correct place */ return_desc->string.length = string_length; new_buf[string_length] = 0; break; case ACPI_TYPE_BUFFER: /* Setup string length, base, and separator */ switch (type) { case ACPI_EXPLICIT_CONVERT_DECIMAL: /* Used by to_decimal_string */ /* * Explicit conversion from the to_decimal_string ASL operator. * * From ACPI: "If the input is a buffer, it is converted to a * a string of decimal values separated by commas." */ base = 10; /* * Calculate the final string length. Individual string values * are variable length (include separator for each) */ for (i = 0; i < obj_desc->buffer.length; i++) { if (obj_desc->buffer.pointer[i] >= 100) { string_length += 4; } else if (obj_desc->buffer.pointer[i] >= 10) { string_length += 3; } else { string_length += 2; } } break; case ACPI_IMPLICIT_CONVERT_HEX: /* * Implicit buffer-to-string conversion * * From the ACPI spec: * "The entire contents of the buffer are converted to a string of * two-character hexadecimal numbers, each separated by a space." * * Each hex number is prefixed with 0x (11/2018) */ separator = ' '; string_length = (obj_desc->buffer.length * 5); break; case ACPI_EXPLICIT_CONVERT_HEX: /* * Explicit conversion from the to_hex_string ASL operator. * * From ACPI: "If Data is a buffer, it is converted to a string of * hexadecimal values separated by commas." * * Each hex number is prefixed with 0x (11/2018) */ separator = ','; string_length = (obj_desc->buffer.length * 5); break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Create a new string object and string buffer * (-1 because of extra separator included in string_length from above) * Allow creation of zero-length strings from zero-length buffers. */ if (string_length) { string_length--; } return_desc = acpi_ut_create_string_object((acpi_size)string_length); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } new_buf = return_desc->buffer.pointer; /* * Convert buffer bytes to hex or decimal values * (separated by commas or spaces) */ for (i = 0; i < obj_desc->buffer.length; i++) { if (base == 16) { /* Emit 0x prefix for explicit/implicit hex conversion */ *new_buf++ = '0'; *new_buf++ = 'x'; } new_buf += acpi_ex_convert_to_ascii((u64) obj_desc-> buffer.pointer[i], base, new_buf, 1); /* Each digit is separated by either a comma or space */ *new_buf++ = separator; } /* * Null terminate the string * (overwrites final comma/space from above) */ if (obj_desc->buffer.length) { new_buf--; } *new_buf = 0; break; default: return_ACPI_STATUS(AE_TYPE); } *result_desc = return_desc; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_convert_to_target_type * * PARAMETERS: destination_type - Current type of the destination * source_desc - Source object to be converted. * result_desc - Where the converted object is returned * walk_state - Current method state * * RETURN: Status * * DESCRIPTION: Implements "implicit conversion" rules for storing an object. * ******************************************************************************/ acpi_status acpi_ex_convert_to_target_type(acpi_object_type destination_type, union acpi_operand_object *source_desc, union acpi_operand_object **result_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_convert_to_target_type); /* Default behavior */ *result_desc = source_desc; /* * If required by the target, * perform implicit conversion on the source before we store it. */ switch (GET_CURRENT_ARG_TYPE(walk_state->op_info->runtime_args)) { case ARGI_SIMPLE_TARGET: case ARGI_FIXED_TARGET: case ARGI_INTEGER_REF: /* Handles Increment, Decrement cases */ switch (destination_type) { case ACPI_TYPE_LOCAL_REGION_FIELD: /* * Named field can always handle conversions */ break; default: /* No conversion allowed for these types */ if (destination_type != source_desc->common.type) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Explicit operator, will store (%s) over existing type (%s)\n", acpi_ut_get_object_type_name (source_desc), acpi_ut_get_type_name (destination_type))); status = AE_TYPE; } } break; case ARGI_TARGETREF: case ARGI_STORE_TARGET: switch (destination_type) { case ACPI_TYPE_INTEGER: case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: /* * These types require an Integer operand. We can convert * a Buffer or a String to an Integer if necessary. */ status = acpi_ex_convert_to_integer(source_desc, result_desc, ACPI_IMPLICIT_CONVERSION); break; case ACPI_TYPE_STRING: /* * The operand must be a String. We can convert an * Integer or Buffer if necessary */ status = acpi_ex_convert_to_string(source_desc, result_desc, ACPI_IMPLICIT_CONVERT_HEX); break; case ACPI_TYPE_BUFFER: /* * The operand must be a Buffer. We can convert an * Integer or String if necessary */ status = acpi_ex_convert_to_buffer(source_desc, result_desc); break; default: ACPI_ERROR((AE_INFO, "Bad destination type during conversion: 0x%X", destination_type)); status = AE_AML_INTERNAL; break; } break; case ARGI_REFERENCE: /* * create_xxxx_field cases - we are storing the field object into the name */ break; default: ACPI_ERROR((AE_INFO, "Unknown Target type ID 0x%X AmlOpcode 0x%X DestType %s", GET_CURRENT_ARG_TYPE(walk_state->op_info-> runtime_args), walk_state->opcode, acpi_ut_get_type_name(destination_type))); status = AE_AML_INTERNAL; } /* * Source-to-Target conversion semantics: * * If conversion to the target type cannot be performed, then simply * overwrite the target with the new object and type. */ if (status == AE_TYPE) { status = AE_OK; } return_ACPI_STATUS(status); }
linux-master
drivers/acpi/acpica/exconvrt.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: psopinfo - AML opcode information functions and dispatch tables * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "acopcode.h" #include "amlcode.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psopinfo") static const u8 acpi_gbl_argument_count[] = { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 }; /******************************************************************************* * * FUNCTION: acpi_ps_get_opcode_info * * PARAMETERS: opcode - The AML opcode * * RETURN: A pointer to the info about the opcode. * * DESCRIPTION: Find AML opcode description based on the opcode. * NOTE: This procedure must ALWAYS return a valid pointer! * ******************************************************************************/ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode) { #ifdef ACPI_DEBUG_OUTPUT const char *opcode_name = "Unknown AML opcode"; #endif ACPI_FUNCTION_NAME(ps_get_opcode_info); /* * Detect normal 8-bit opcode or extended 16-bit opcode */ if (!(opcode & 0xFF00)) { /* Simple (8-bit) opcode: 0-255, can't index beyond table */ return (&acpi_gbl_aml_op_info [acpi_gbl_short_op_index[(u8)opcode]]); } if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) && (((u8)opcode) <= MAX_EXTENDED_OPCODE)) { /* Valid extended (16-bit) opcode */ return (&acpi_gbl_aml_op_info [acpi_gbl_long_op_index[(u8)opcode]]); } #if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT #include "asldefine.h" switch (opcode) { case AML_RAW_DATA_BYTE: opcode_name = "-Raw Data Byte-"; break; case AML_RAW_DATA_WORD: opcode_name = "-Raw Data Word-"; break; case AML_RAW_DATA_DWORD: opcode_name = "-Raw Data Dword-"; break; case AML_RAW_DATA_QWORD: opcode_name = "-Raw Data Qword-"; break; case AML_RAW_DATA_BUFFER: opcode_name = "-Raw Data Buffer-"; break; case AML_RAW_DATA_CHAIN: opcode_name = "-Raw Data Buffer Chain-"; break; case AML_PACKAGE_LENGTH: opcode_name = "-Package Length-"; break; case AML_UNASSIGNED_OPCODE: opcode_name = "-Unassigned Opcode-"; break; case AML_DEFAULT_ARG_OP: opcode_name = "-Default Arg-"; break; default: break; } #endif /* Unknown AML opcode */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode)); return (&acpi_gbl_aml_op_info[_UNK]); } /******************************************************************************* * * FUNCTION: acpi_ps_get_opcode_name * * PARAMETERS: opcode - The AML opcode * * RETURN: A pointer to the name of the opcode (ASCII String) * Note: Never returns NULL. * * DESCRIPTION: Translate an opcode into a human-readable string * ******************************************************************************/ const char *acpi_ps_get_opcode_name(u16 opcode) { #if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT) const struct acpi_opcode_info *op; op = acpi_ps_get_opcode_info(opcode); /* Always guaranteed to return a valid pointer */ return (op->name); #else return ("OpcodeName unavailable"); #endif } /******************************************************************************* * * FUNCTION: acpi_ps_get_argument_count * * PARAMETERS: op_type - Type associated with the AML opcode * * RETURN: Argument count * * DESCRIPTION: Obtain the number of expected arguments for an AML opcode * ******************************************************************************/ u8 acpi_ps_get_argument_count(u32 op_type) { if (op_type <= AML_TYPE_EXEC_6A_0T_1R) { return (acpi_gbl_argument_count[op_type]); } return (0); } /* * This table is directly indexed by the opcodes It returns * an index into the opcode table (acpi_gbl_aml_op_info) */ const u8 acpi_gbl_short_op_index[256] = { /* 0 1 2 3 4 5 6 7 */ /* 8 9 A B C D E F */ /* 0x00 */ 0x00, 0x01, _UNK, _UNK, _UNK, _UNK, 0x02, _UNK, /* 0x08 */ 0x03, _UNK, 0x04, 0x05, 0x06, 0x07, 0x6E, _UNK, /* 0x10 */ 0x08, 0x09, 0x0a, 0x6F, 0x0b, 0x81, _UNK, _UNK, /* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX, /* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D, /* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, /* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, /* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, /* 0x58 */ _ASC, _ASC, _ASC, _UNK, _PFX, _UNK, _PFX, _ASC, /* 0x60 */ 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, /* 0x68 */ 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, _UNK, /* 0x70 */ 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, /* 0x78 */ 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, /* 0x80 */ 0x2b, 0x2c, 0x2d, 0x2e, 0x70, 0x71, 0x2f, 0x30, /* 0x88 */ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x72, /* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74, /* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A, /* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61, /* 0xA8 */ 0x62, 0x82, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xC8 */ _UNK, _UNK, _UNK, _UNK, 0x44, _UNK, _UNK, _UNK, /* 0xD0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xD8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xE0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xE8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xF0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0xF8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x45, }; /* * This table is indexed by the second opcode of the extended opcode * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info) */ const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = { /* 0 1 2 3 4 5 6 7 */ /* 8 9 A B C D E F */ /* 0x00 */ _UNK, 0x46, 0x47, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x08 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x10 */ _UNK, _UNK, 0x48, 0x49, _UNK, _UNK, _UNK, _UNK, /* 0x18 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, 0x7B, /* 0x20 */ 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, /* 0x28 */ 0x52, 0x53, 0x54, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x30 */ 0x55, 0x56, 0x57, 0x7e, _UNK, _UNK, _UNK, _UNK, /* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x40 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x48 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x50 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x58 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x60 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x68 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x70 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x78 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, /* 0x80 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x88 */ 0x7C, };
linux-master
drivers/acpi/acpica/psopinfo.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwacpi") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /****************************************************************************** * * FUNCTION: acpi_hw_set_mode * * PARAMETERS: mode - SYS_MODE_ACPI or SYS_MODE_LEGACY * * RETURN: Status * * DESCRIPTION: Transitions the system into the requested mode. * ******************************************************************************/ acpi_status acpi_hw_set_mode(u32 mode) { acpi_status status; ACPI_FUNCTION_TRACE(hw_set_mode); /* If the Hardware Reduced flag is set, machine is always in acpi mode */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } /* * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, * system does not support mode transition. */ if (!acpi_gbl_FADT.smi_command) { ACPI_ERROR((AE_INFO, "No SMI_CMD in FADT, mode transition failed")); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } /* * ACPI 2.0 clarified the meaning of ACPI_ENABLE and ACPI_DISABLE * in FADT: If it is zero, enabling or disabling is not supported. * As old systems may have used zero for mode transition, * we make sure both the numbers are zero to determine these * transitions are not supported. */ if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) { ACPI_ERROR((AE_INFO, "No ACPI mode transition supported in this system " "(enable/disable both zero)")); return_ACPI_STATUS(AE_OK); } switch (mode) { case ACPI_SYS_MODE_ACPI: /* BIOS should have disabled ALL fixed and GP events */ status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32) acpi_gbl_FADT.acpi_enable, 8); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Attempting to enable ACPI mode\n")); break; case ACPI_SYS_MODE_LEGACY: /* * BIOS should clear all fixed status bits and restore fixed event * enable bits to default */ status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32)acpi_gbl_FADT.acpi_disable, 8); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Attempting to enable Legacy (non-ACPI) mode\n")); break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not write ACPI mode change")); return_ACPI_STATUS(status); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_hw_get_mode * * PARAMETERS: none * * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY * * DESCRIPTION: Return current operating state of system. Determined by * querying the SCI_EN bit. * ******************************************************************************/ u32 acpi_hw_get_mode(void) { acpi_status status; u32 value; ACPI_FUNCTION_TRACE(hw_get_mode); /* If the Hardware Reduced flag is set, machine is always in acpi mode */ if (acpi_gbl_reduced_hardware) { return_UINT32(ACPI_SYS_MODE_ACPI); } /* * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, * system does not support mode transition. */ if (!acpi_gbl_FADT.smi_command) { return_UINT32(ACPI_SYS_MODE_ACPI); } status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value); if (ACPI_FAILURE(status)) { return_UINT32(ACPI_SYS_MODE_LEGACY); } if (value) { return_UINT32(ACPI_SYS_MODE_ACPI); } else { return_UINT32(ACPI_SYS_MODE_LEGACY); } } #endif /* !ACPI_REDUCED_HARDWARE */
linux-master
drivers/acpi/acpica/hwacpi.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: utmutex - local mutex support * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utmutex") /* Local prototypes */ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id); static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id); /******************************************************************************* * * FUNCTION: acpi_ut_mutex_initialize * * PARAMETERS: None. * * RETURN: Status * * DESCRIPTION: Create the system mutex objects. This includes mutexes, * spin locks, and reader/writer locks. * ******************************************************************************/ acpi_status acpi_ut_mutex_initialize(void) { u32 i; acpi_status status; ACPI_FUNCTION_TRACE(ut_mutex_initialize); /* Create each of the predefined mutex objects */ for (i = 0; i < ACPI_NUM_MUTEX; i++) { status = acpi_ut_create_mutex(i); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Create the spinlocks for use at interrupt level or for speed */ status = acpi_os_create_lock (&acpi_gbl_gpe_lock); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } status = acpi_os_create_raw_lock(&acpi_gbl_hardware_lock); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } status = acpi_os_create_lock(&acpi_gbl_reference_count_lock); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Mutex for _OSI support */ status = acpi_os_create_mutex(&acpi_gbl_osi_mutex); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Create the reader/writer lock for namespace access */ status = acpi_ut_create_rw_lock(&acpi_gbl_namespace_rw_lock); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_mutex_terminate * * PARAMETERS: None. * * RETURN: None. * * DESCRIPTION: Delete all of the system mutex objects. This includes mutexes, * spin locks, and reader/writer locks. * ******************************************************************************/ void acpi_ut_mutex_terminate(void) { u32 i; ACPI_FUNCTION_TRACE(ut_mutex_terminate); /* Delete each predefined mutex object */ for (i = 0; i < ACPI_NUM_MUTEX; i++) { acpi_ut_delete_mutex(i); } acpi_os_delete_mutex(acpi_gbl_osi_mutex); /* Delete the spinlocks */ acpi_os_delete_lock(acpi_gbl_gpe_lock); acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); acpi_os_delete_lock(acpi_gbl_reference_count_lock); /* Delete the reader/writer lock */ acpi_ut_delete_rw_lock(&acpi_gbl_namespace_rw_lock); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_create_mutex * * PARAMETERS: mutex_ID - ID of the mutex to be created * * RETURN: Status * * DESCRIPTION: Create a mutex object. * ******************************************************************************/ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id); if (!acpi_gbl_mutex_info[mutex_id].mutex) { status = acpi_os_create_mutex(&acpi_gbl_mutex_info[mutex_id].mutex); acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; acpi_gbl_mutex_info[mutex_id].use_count = 0; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_delete_mutex * * PARAMETERS: mutex_ID - ID of the mutex to be deleted * * RETURN: Status * * DESCRIPTION: Delete a mutex object. * ******************************************************************************/ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) { ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id); acpi_os_delete_mutex(acpi_gbl_mutex_info[mutex_id].mutex); acpi_gbl_mutex_info[mutex_id].mutex = NULL; acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_acquire_mutex * * PARAMETERS: mutex_ID - ID of the mutex to be acquired * * RETURN: Status * * DESCRIPTION: Acquire a mutex object. * ******************************************************************************/ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) { acpi_status status; acpi_thread_id this_thread_id; ACPI_FUNCTION_NAME(ut_acquire_mutex); if (mutex_id > ACPI_MAX_MUTEX) { return (AE_BAD_PARAMETER); } this_thread_id = acpi_os_get_thread_id(); #ifdef ACPI_MUTEX_DEBUG { u32 i; /* * Mutex debug code, for internal debugging only. * * Deadlock prevention. Check if this thread owns any mutexes of value * greater than or equal to this one. If so, the thread has violated * the mutex ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { if (i == mutex_id) { ACPI_ERROR((AE_INFO, "Mutex [%s] already acquired by this thread [%u]", acpi_ut_get_mutex_name (mutex_id), (u32)this_thread_id)); return (AE_ALREADY_ACQUIRED); } ACPI_ERROR((AE_INFO, "Invalid acquire order: Thread %u owns [%s], wants [%s]", (u32)this_thread_id, acpi_ut_get_mutex_name(i), acpi_ut_get_mutex_name(mutex_id))); return (AE_ACQUIRE_DEADLOCK); } } } #endif ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u attempting to acquire Mutex [%s]\n", (u32)this_thread_id, acpi_ut_get_mutex_name(mutex_id))); status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, ACPI_WAIT_FOREVER); if (ACPI_SUCCESS(status)) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u acquired Mutex [%s]\n", (u32)this_thread_id, acpi_ut_get_mutex_name(mutex_id))); acpi_gbl_mutex_info[mutex_id].use_count++; acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id; } else { ACPI_EXCEPTION((AE_INFO, status, "Thread %u could not acquire Mutex [%s] (0x%X)", (u32)this_thread_id, acpi_ut_get_mutex_name(mutex_id), mutex_id)); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_release_mutex * * PARAMETERS: mutex_ID - ID of the mutex to be released * * RETURN: Status * * DESCRIPTION: Release a mutex object. * ******************************************************************************/ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) { ACPI_FUNCTION_NAME(ut_release_mutex); ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n", (u32)acpi_os_get_thread_id(), acpi_ut_get_mutex_name(mutex_id))); if (mutex_id > ACPI_MAX_MUTEX) { return (AE_BAD_PARAMETER); } /* * Mutex must be acquired in order to release it! */ if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) { ACPI_ERROR((AE_INFO, "Mutex [%s] (0x%X) is not acquired, cannot release", acpi_ut_get_mutex_name(mutex_id), mutex_id)); return (AE_NOT_ACQUIRED); } #ifdef ACPI_MUTEX_DEBUG { u32 i; /* * Mutex debug code, for internal debugging only. * * Deadlock prevention. Check if this thread owns any mutexes of value * greater than this one. If so, the thread has violated the mutex * ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { if (acpi_gbl_mutex_info[i].thread_id == acpi_os_get_thread_id()) { if (i == mutex_id) { continue; } ACPI_ERROR((AE_INFO, "Invalid release order: owns [%s], releasing [%s]", acpi_ut_get_mutex_name(i), acpi_ut_get_mutex_name(mutex_id))); return (AE_RELEASE_DEADLOCK); } } } #endif /* Mark unlocked FIRST */ acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; acpi_os_release_mutex(acpi_gbl_mutex_info[mutex_id].mutex); return (AE_OK); }
linux-master
drivers/acpi/acpica/utmutex.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exmisc") /******************************************************************************* * * FUNCTION: acpi_ex_get_object_reference * * PARAMETERS: obj_desc - Create a reference to this object * return_desc - Where to store the reference * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Obtain and return a "reference" to the target object * Common code for the ref_of_op and the cond_ref_of_op. * ******************************************************************************/ acpi_status acpi_ex_get_object_reference(union acpi_operand_object *obj_desc, union acpi_operand_object **return_desc, struct acpi_walk_state *walk_state) { union acpi_operand_object *reference_obj; union acpi_operand_object *referenced_obj; ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc); *return_desc = NULL; switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) { case ACPI_DESC_TYPE_OPERAND: if (obj_desc->common.type != ACPI_TYPE_LOCAL_REFERENCE) { return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* * Must be a reference to a Local or Arg */ switch (obj_desc->reference.class) { case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: case ACPI_REFCLASS_DEBUG: /* The referenced object is the pseudo-node for the local/arg */ referenced_obj = obj_desc->reference.object; break; default: ACPI_ERROR((AE_INFO, "Invalid Reference Class 0x%2.2X", obj_desc->reference.class)); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } break; case ACPI_DESC_TYPE_NAMED: /* * A named reference that has already been resolved to a Node */ referenced_obj = obj_desc; break; default: ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X", ACPI_GET_DESCRIPTOR_TYPE(obj_desc))); return_ACPI_STATUS(AE_TYPE); } /* Create a new reference object */ reference_obj = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE); if (!reference_obj) { return_ACPI_STATUS(AE_NO_MEMORY); } reference_obj->reference.class = ACPI_REFCLASS_REFOF; reference_obj->reference.object = referenced_obj; *return_desc = reference_obj; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Object %p Type [%s], returning Reference %p\n", obj_desc, acpi_ut_get_object_type_name(obj_desc), *return_desc)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_do_math_op * * PARAMETERS: opcode - AML opcode * integer0 - Integer operand #0 * integer1 - Integer operand #1 * * RETURN: Integer result of the operation * * DESCRIPTION: Execute a math AML opcode. The purpose of having all of the * math functions here is to prevent a lot of pointer dereferencing * to obtain the operands. * ******************************************************************************/ u64 acpi_ex_do_math_op(u16 opcode, u64 integer0, u64 integer1) { ACPI_FUNCTION_ENTRY(); switch (opcode) { case AML_ADD_OP: /* Add (Integer0, Integer1, Result) */ return (integer0 + integer1); case AML_BIT_AND_OP: /* And (Integer0, Integer1, Result) */ return (integer0 & integer1); case AML_BIT_NAND_OP: /* NAnd (Integer0, Integer1, Result) */ return (~(integer0 & integer1)); case AML_BIT_OR_OP: /* Or (Integer0, Integer1, Result) */ return (integer0 | integer1); case AML_BIT_NOR_OP: /* NOr (Integer0, Integer1, Result) */ return (~(integer0 | integer1)); case AML_BIT_XOR_OP: /* XOr (Integer0, Integer1, Result) */ return (integer0 ^ integer1); case AML_MULTIPLY_OP: /* Multiply (Integer0, Integer1, Result) */ return (integer0 * integer1); case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */ /* * We need to check if the shiftcount is larger than the integer bit * width since the behavior of this is not well-defined in the C language. */ if (integer1 >= acpi_gbl_integer_bit_width) { return (0); } return (integer0 << integer1); case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */ /* * We need to check if the shiftcount is larger than the integer bit * width since the behavior of this is not well-defined in the C language. */ if (integer1 >= acpi_gbl_integer_bit_width) { return (0); } return (integer0 >> integer1); case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */ return (integer0 - integer1); default: return (0); } } /******************************************************************************* * * FUNCTION: acpi_ex_do_logical_numeric_op * * PARAMETERS: opcode - AML opcode * integer0 - Integer operand #0 * integer1 - Integer operand #1 * logical_result - TRUE/FALSE result of the operation * * RETURN: Status * * DESCRIPTION: Execute a logical "Numeric" AML opcode. For these Numeric * operators (LAnd and LOr), both operands must be integers. * * Note: cleanest machine code seems to be produced by the code * below, rather than using statements of the form: * Result = (Integer0 && Integer1); * ******************************************************************************/ acpi_status acpi_ex_do_logical_numeric_op(u16 opcode, u64 integer0, u64 integer1, u8 *logical_result) { acpi_status status = AE_OK; u8 local_result = FALSE; ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op); switch (opcode) { case AML_LOGICAL_AND_OP: /* LAnd (Integer0, Integer1) */ if (integer0 && integer1) { local_result = TRUE; } break; case AML_LOGICAL_OR_OP: /* LOr (Integer0, Integer1) */ if (integer0 || integer1) { local_result = TRUE; } break; default: ACPI_ERROR((AE_INFO, "Invalid numeric logical opcode: %X", opcode)); status = AE_AML_INTERNAL; break; } /* Return the logical result and status */ *logical_result = local_result; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_do_logical_op * * PARAMETERS: opcode - AML opcode * operand0 - operand #0 * operand1 - operand #1 * logical_result - TRUE/FALSE result of the operation * * RETURN: Status * * DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the * functions here is to prevent a lot of pointer dereferencing * to obtain the operands and to simplify the generation of the * logical value. For the Numeric operators (LAnd and LOr), both * operands must be integers. For the other logical operators, * operands can be any combination of Integer/String/Buffer. The * first operand determines the type to which the second operand * will be converted. * * Note: cleanest machine code seems to be produced by the code * below, rather than using statements of the form: * Result = (Operand0 == Operand1); * ******************************************************************************/ acpi_status acpi_ex_do_logical_op(u16 opcode, union acpi_operand_object *operand0, union acpi_operand_object *operand1, u8 * logical_result) { union acpi_operand_object *local_operand1 = operand1; u64 integer0; u64 integer1; u32 length0; u32 length1; acpi_status status = AE_OK; u8 local_result = FALSE; int compare; ACPI_FUNCTION_TRACE(ex_do_logical_op); /* * Convert the second operand if necessary. The first operand * determines the type of the second operand, (See the Data Types * section of the ACPI 3.0+ specification.) Both object types are * guaranteed to be either Integer/String/Buffer by the operand * resolution mechanism. */ switch (operand0->common.type) { case ACPI_TYPE_INTEGER: status = acpi_ex_convert_to_integer(operand1, &local_operand1, ACPI_IMPLICIT_CONVERSION); break; case ACPI_TYPE_STRING: status = acpi_ex_convert_to_string(operand1, &local_operand1, ACPI_IMPLICIT_CONVERT_HEX); break; case ACPI_TYPE_BUFFER: status = acpi_ex_convert_to_buffer(operand1, &local_operand1); break; default: ACPI_ERROR((AE_INFO, "Invalid object type for logical operator: %X", operand0->common.type)); status = AE_AML_INTERNAL; break; } if (ACPI_FAILURE(status)) { goto cleanup; } /* * Two cases: 1) Both Integers, 2) Both Strings or Buffers */ if (operand0->common.type == ACPI_TYPE_INTEGER) { /* * 1) Both operands are of type integer * Note: local_operand1 may have changed above */ integer0 = operand0->integer.value; integer1 = local_operand1->integer.value; switch (opcode) { case AML_LOGICAL_EQUAL_OP: /* LEqual (Operand0, Operand1) */ if (integer0 == integer1) { local_result = TRUE; } break; case AML_LOGICAL_GREATER_OP: /* LGreater (Operand0, Operand1) */ if (integer0 > integer1) { local_result = TRUE; } break; case AML_LOGICAL_LESS_OP: /* LLess (Operand0, Operand1) */ if (integer0 < integer1) { local_result = TRUE; } break; default: ACPI_ERROR((AE_INFO, "Invalid comparison opcode: %X", opcode)); status = AE_AML_INTERNAL; break; } } else { /* * 2) Both operands are Strings or both are Buffers * Note: Code below takes advantage of common Buffer/String * object fields. local_operand1 may have changed above. Use * memcmp to handle nulls in buffers. */ length0 = operand0->buffer.length; length1 = local_operand1->buffer.length; /* Lexicographic compare: compare the data bytes */ compare = memcmp(operand0->buffer.pointer, local_operand1->buffer.pointer, (length0 > length1) ? length1 : length0); switch (opcode) { case AML_LOGICAL_EQUAL_OP: /* LEqual (Operand0, Operand1) */ /* Length and all bytes must be equal */ if ((length0 == length1) && (compare == 0)) { /* Length and all bytes match ==> TRUE */ local_result = TRUE; } break; case AML_LOGICAL_GREATER_OP: /* LGreater (Operand0, Operand1) */ if (compare > 0) { local_result = TRUE; goto cleanup; /* TRUE */ } if (compare < 0) { goto cleanup; /* FALSE */ } /* Bytes match (to shortest length), compare lengths */ if (length0 > length1) { local_result = TRUE; } break; case AML_LOGICAL_LESS_OP: /* LLess (Operand0, Operand1) */ if (compare > 0) { goto cleanup; /* FALSE */ } if (compare < 0) { local_result = TRUE; goto cleanup; /* TRUE */ } /* Bytes match (to shortest length), compare lengths */ if (length0 < length1) { local_result = TRUE; } break; default: ACPI_ERROR((AE_INFO, "Invalid comparison opcode: %X", opcode)); status = AE_AML_INTERNAL; break; } } cleanup: /* New object was created if implicit conversion performed - delete */ if (local_operand1 != operand1) { acpi_ut_remove_reference(local_operand1); } /* Return the logical result and status */ *logical_result = local_result; return_ACPI_STATUS(status); }
linux-master
drivers/acpi/acpica/exmisc.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: tbxfroot - Find the root ACPI table (RSDT) * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" #define _COMPONENT ACPI_TABLES ACPI_MODULE_NAME("tbxfroot") /******************************************************************************* * * FUNCTION: acpi_tb_get_rsdp_length * * PARAMETERS: rsdp - Pointer to RSDP * * RETURN: Table length * * DESCRIPTION: Get the length of the RSDP * ******************************************************************************/ u32 acpi_tb_get_rsdp_length(struct acpi_table_rsdp *rsdp) { if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) { /* BAD Signature */ return (0); } /* "Length" field is available if table version >= 2 */ if (rsdp->revision >= 2) { return (rsdp->length); } else { return (ACPI_RSDP_CHECKSUM_LENGTH); } } /******************************************************************************* * * FUNCTION: acpi_tb_validate_rsdp * * PARAMETERS: rsdp - Pointer to unvalidated RSDP * * RETURN: Status * * DESCRIPTION: Validate the RSDP (ptr) * ******************************************************************************/ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) { /* * The signature and checksum must both be correct * * Note: Sometimes there exists more than one RSDP in memory; the valid * RSDP has a valid checksum, all others have an invalid checksum. */ if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) { /* Nope, BAD Signature */ return (AE_BAD_SIGNATURE); } /* Check the standard checksum */ if (acpi_ut_checksum((u8 *)rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { return (AE_BAD_CHECKSUM); } /* Check extended checksum if table version >= 2 */ if ((rsdp->revision >= 2) && (acpi_ut_checksum((u8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) { return (AE_BAD_CHECKSUM); } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_find_root_pointer * * PARAMETERS: table_address - Where the table pointer is returned * * RETURN: Status, RSDP physical address * * DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor * pointer structure. If it is found, set *RSDP to point to it. * * NOTE1: The RSDP must be either in the first 1K of the Extended * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.) * Only a 32-bit physical address is necessary. * * NOTE2: This function is always available, regardless of the * initialization state of the rest of ACPI. * ******************************************************************************/ acpi_status ACPI_INIT_FUNCTION acpi_find_root_pointer(acpi_physical_address *table_address) { u8 *table_ptr; u8 *mem_rover; u32 physical_address; u32 ebda_window_size; ACPI_FUNCTION_TRACE(acpi_find_root_pointer); /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */ table_ptr = acpi_os_map_memory((acpi_physical_address) ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH); if (!table_ptr) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X for length %u", ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH)); return_ACPI_STATUS(AE_NO_MEMORY); } ACPI_MOVE_16_TO_32(&physical_address, table_ptr); /* Convert segment part to physical address */ physical_address <<= 4; acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH); /* EBDA present? */ /* * Check that the EBDA pointer from memory is sane and does not point * above valid low memory */ if (physical_address > 0x400 && physical_address < 0xA0000) { /* * Calculate the scan window size * The EBDA is not guaranteed to be larger than a ki_b and in case * that it is smaller, the scanning function would leave the low * memory and continue to the VGA range. */ ebda_window_size = ACPI_MIN(ACPI_EBDA_WINDOW_SIZE, 0xA0000 - physical_address); /* * 1b) Search EBDA paragraphs */ table_ptr = acpi_os_map_memory((acpi_physical_address) physical_address, ebda_window_size); if (!table_ptr) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X for length %u", physical_address, ebda_window_size)); return_ACPI_STATUS(AE_NO_MEMORY); } mem_rover = acpi_tb_scan_memory_for_rsdp(table_ptr, ebda_window_size); acpi_os_unmap_memory(table_ptr, ebda_window_size); if (mem_rover) { /* Return the physical address */ physical_address += (u32) ACPI_PTR_DIFF(mem_rover, table_ptr); *table_address = (acpi_physical_address)physical_address; return_ACPI_STATUS(AE_OK); } } /* * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh */ table_ptr = acpi_os_map_memory((acpi_physical_address) ACPI_HI_RSDP_WINDOW_BASE, ACPI_HI_RSDP_WINDOW_SIZE); if (!table_ptr) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X for length %u", ACPI_HI_RSDP_WINDOW_BASE, ACPI_HI_RSDP_WINDOW_SIZE)); return_ACPI_STATUS(AE_NO_MEMORY); } mem_rover = acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE); if (mem_rover) { /* Return the physical address */ physical_address = (u32) (ACPI_HI_RSDP_WINDOW_BASE + ACPI_PTR_DIFF(mem_rover, table_ptr)); *table_address = (acpi_physical_address)physical_address; return_ACPI_STATUS(AE_OK); } /* A valid RSDP was not found */ ACPI_BIOS_ERROR((AE_INFO, "A valid RSDP was not found")); return_ACPI_STATUS(AE_NOT_FOUND); } ACPI_EXPORT_SYMBOL_INIT(acpi_find_root_pointer) /******************************************************************************* * * FUNCTION: acpi_tb_scan_memory_for_rsdp * * PARAMETERS: start_address - Starting pointer for search * length - Maximum length to search * * RETURN: Pointer to the RSDP if found, otherwise NULL. * * DESCRIPTION: Search a block of memory for the RSDP signature * ******************************************************************************/ u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length) { acpi_status status; u8 *mem_rover; u8 *end_address; ACPI_FUNCTION_TRACE(tb_scan_memory_for_rsdp); end_address = start_address + length; /* Search from given start address for the requested length */ for (mem_rover = start_address; mem_rover < end_address; mem_rover += ACPI_RSDP_SCAN_STEP) { /* The RSDP signature and checksum must both be correct */ status = acpi_tb_validate_rsdp(ACPI_CAST_PTR (struct acpi_table_rsdp, mem_rover)); if (ACPI_SUCCESS(status)) { /* Sig and checksum valid, we have found a real RSDP */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "RSDP located at physical address %p\n", mem_rover)); return_PTR(mem_rover); } /* No sig match or bad checksum, keep searching */ } /* Searched entire block, no RSDP was found */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Searched entire block from %p, valid RSDP was not found\n", start_address)); return_PTR(NULL); }
linux-master
drivers/acpi/acpica/tbxfroot.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: nsdump - table dumping routines for debug * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include <acpi/acoutput.h> #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsdump") /* Local prototypes */ #ifdef ACPI_OBSOLETE_FUNCTIONS void acpi_ns_dump_root_devices(void); static acpi_status acpi_ns_dump_one_device(acpi_handle obj_handle, u32 level, void *context, void **return_value); #endif #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) static acpi_status acpi_ns_dump_one_object_path(acpi_handle obj_handle, u32 level, void *context, void **return_value); static acpi_status acpi_ns_get_max_depth(acpi_handle obj_handle, u32 level, void *context, void **return_value); /******************************************************************************* * * FUNCTION: acpi_ns_print_pathname * * PARAMETERS: num_segments - Number of ACPI name segments * pathname - The compressed (internal) path * * RETURN: None * * DESCRIPTION: Print an object's full namespace pathname * ******************************************************************************/ void acpi_ns_print_pathname(u32 num_segments, const char *pathname) { u32 i; ACPI_FUNCTION_NAME(ns_print_pathname); /* Check if debug output enabled */ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_NAMES, ACPI_NAMESPACE)) { return; } /* Print the entire name */ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "[")); while (num_segments) { for (i = 0; i < 4; i++) { isprint((int)pathname[i]) ? acpi_os_printf("%c", pathname[i]) : acpi_os_printf("?"); } pathname += ACPI_NAMESEG_SIZE; num_segments--; if (num_segments) { acpi_os_printf("."); } } acpi_os_printf("]\n"); } #ifdef ACPI_OBSOLETE_FUNCTIONS /* Not used at this time, perhaps later */ /******************************************************************************* * * FUNCTION: acpi_ns_dump_pathname * * PARAMETERS: handle - Object * msg - Prefix message * level - Desired debug level * component - Caller's component ID * * RETURN: None * * DESCRIPTION: Print an object's full namespace pathname * Manages allocation/freeing of a pathname buffer * ******************************************************************************/ void acpi_ns_dump_pathname(acpi_handle handle, const char *msg, u32 level, u32 component) { ACPI_FUNCTION_TRACE(ns_dump_pathname); /* Do this only if the requested debug level and component are enabled */ if (!ACPI_IS_DEBUG_ENABLED(level, component)) { return_VOID; } /* Convert handle to a full pathname and print it (with supplied message) */ acpi_ns_print_node_pathname(handle, msg); acpi_os_printf("\n"); return_VOID; } #endif /******************************************************************************* * * FUNCTION: acpi_ns_dump_one_object * * PARAMETERS: obj_handle - Node to be dumped * level - Nesting level of the handle * context - Passed into walk_namespace * return_value - Not used * * RETURN: Status * * DESCRIPTION: Dump a single Node * This procedure is a user_function called by acpi_ns_walk_namespace. * ******************************************************************************/ acpi_status acpi_ns_dump_one_object(acpi_handle obj_handle, u32 level, void *context, void **return_value) { struct acpi_walk_info *info = (struct acpi_walk_info *)context; struct acpi_namespace_node *this_node; union acpi_operand_object *obj_desc = NULL; acpi_object_type obj_type; acpi_object_type type; u32 bytes_to_dump; u32 dbg_level; u32 i; ACPI_FUNCTION_NAME(ns_dump_one_object); /* Is output enabled? */ if (!(acpi_dbg_level & info->debug_level)) { return (AE_OK); } if (!obj_handle) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Null object handle\n")); return (AE_OK); } this_node = acpi_ns_validate_handle(obj_handle); if (!this_node) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid object handle %p\n", obj_handle)); return (AE_OK); } type = this_node->type; info->count++; /* Check if the owner matches */ if ((info->owner_id != ACPI_OWNER_ID_MAX) && (info->owner_id != this_node->owner_id)) { return (AE_OK); } if (!(info->display_type & ACPI_DISPLAY_SHORT)) { /* Indent the object according to the level */ acpi_os_printf("%2d%*s", (u32) level - 1, (int)level * 2, " "); /* Check the node type and name */ if (type > ACPI_TYPE_LOCAL_MAX) { ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type 0x%08X", type)); } acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node)); } /* Now we can print out the pertinent information */ acpi_os_printf(" %-12s %p %3.3X ", acpi_ut_get_type_name(type), this_node, this_node->owner_id); dbg_level = acpi_dbg_level; acpi_dbg_level = 0; obj_desc = acpi_ns_get_attached_object(this_node); acpi_dbg_level = dbg_level; /* Temp nodes are those nodes created by a control method */ if (this_node->flags & ANOBJ_TEMPORARY) { acpi_os_printf("(T) "); } switch (info->display_type & ACPI_DISPLAY_MASK) { case ACPI_DISPLAY_SUMMARY: if (!obj_desc) { /* No attached object. Some types should always have an object */ switch (type) { case ACPI_TYPE_INTEGER: case ACPI_TYPE_PACKAGE: case ACPI_TYPE_BUFFER: case ACPI_TYPE_STRING: case ACPI_TYPE_METHOD: acpi_os_printf("<No attached object>"); break; default: break; } acpi_os_printf("\n"); return (AE_OK); } switch (type) { case ACPI_TYPE_PROCESSOR: acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n", obj_desc->processor.proc_id, obj_desc->processor.length, ACPI_FORMAT_UINT64(obj_desc->processor. address)); break; case ACPI_TYPE_DEVICE: acpi_os_printf("Notify Object: %p\n", obj_desc); break; case ACPI_TYPE_METHOD: acpi_os_printf("Args %X Len %.4X Aml %p\n", (u32) obj_desc->method.param_count, obj_desc->method.aml_length, obj_desc->method.aml_start); break; case ACPI_TYPE_INTEGER: acpi_os_printf("= %8.8X%8.8X\n", ACPI_FORMAT_UINT64(obj_desc->integer. value)); break; case ACPI_TYPE_PACKAGE: if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { acpi_os_printf("Elements %.2X\n", obj_desc->package.count); } else { acpi_os_printf("[Length not yet evaluated]\n"); } break; case ACPI_TYPE_BUFFER: if (obj_desc->common.flags & AOPOBJ_DATA_VALID) { acpi_os_printf("Len %.2X", obj_desc->buffer.length); /* Dump some of the buffer */ if (obj_desc->buffer.length > 0) { acpi_os_printf(" ="); for (i = 0; (i < obj_desc->buffer.length && i < 12); i++) { acpi_os_printf(" %2.2X", obj_desc->buffer. pointer[i]); } } acpi_os_printf("\n"); } else { acpi_os_printf("[Length not yet evaluated]\n"); } break; case ACPI_TYPE_STRING: acpi_os_printf("Len %.2X ", obj_desc->string.length); acpi_ut_print_string(obj_desc->string.pointer, 80); acpi_os_printf("\n"); break; case ACPI_TYPE_REGION: acpi_os_printf("[%s]", acpi_ut_get_region_name(obj_desc->region. space_id)); if (obj_desc->region.flags & AOPOBJ_DATA_VALID) { acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n", ACPI_FORMAT_UINT64(obj_desc-> region. address), obj_desc->region.length); } else { acpi_os_printf (" [Address/Length not yet evaluated]\n"); } break; case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("[%s]\n", acpi_ut_get_reference_name(obj_desc)); break; case ACPI_TYPE_BUFFER_FIELD: if (obj_desc->buffer_field.buffer_obj && obj_desc->buffer_field.buffer_obj->buffer.node) { acpi_os_printf("Buf [%4.4s]", acpi_ut_get_node_name(obj_desc-> buffer_field. buffer_obj-> buffer. node)); } break; case ACPI_TYPE_LOCAL_REGION_FIELD: acpi_os_printf("Rgn [%4.4s]", acpi_ut_get_node_name(obj_desc-> common_field. region_obj->region. node)); break; case ACPI_TYPE_LOCAL_BANK_FIELD: acpi_os_printf("Rgn [%4.4s] Bnk [%4.4s]", acpi_ut_get_node_name(obj_desc-> common_field. region_obj->region. node), acpi_ut_get_node_name(obj_desc-> bank_field. bank_obj-> common_field. node)); break; case ACPI_TYPE_LOCAL_INDEX_FIELD: acpi_os_printf("Idx [%4.4s] Dat [%4.4s]", acpi_ut_get_node_name(obj_desc-> index_field. index_obj-> common_field.node), acpi_ut_get_node_name(obj_desc-> index_field. data_obj-> common_field. node)); break; case ACPI_TYPE_LOCAL_ALIAS: case ACPI_TYPE_LOCAL_METHOD_ALIAS: acpi_os_printf("Target %4.4s (%p)\n", acpi_ut_get_node_name(obj_desc), obj_desc); break; default: acpi_os_printf("Object %p\n", obj_desc); break; } /* Common field handling */ switch (type) { case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: acpi_os_printf(" Off %.3X Len %.2X Acc %.2X\n", (obj_desc->common_field. base_byte_offset * 8) + obj_desc->common_field. start_field_bit_offset, obj_desc->common_field.bit_length, obj_desc->common_field. access_byte_width); break; default: break; } break; case ACPI_DISPLAY_OBJECTS: acpi_os_printf("O:%p", obj_desc); if (!obj_desc) { /* No attached object, we are done */ acpi_os_printf("\n"); return (AE_OK); } acpi_os_printf("(R%u)", obj_desc->common.reference_count); switch (type) { case ACPI_TYPE_METHOD: /* Name is a Method and its AML offset/length are set */ acpi_os_printf(" M:%p-%X\n", obj_desc->method.aml_start, obj_desc->method.aml_length); break; case ACPI_TYPE_INTEGER: acpi_os_printf(" I:%8.8X8.8%X\n", ACPI_FORMAT_UINT64(obj_desc->integer. value)); break; case ACPI_TYPE_STRING: acpi_os_printf(" S:%p-%X\n", obj_desc->string.pointer, obj_desc->string.length); break; case ACPI_TYPE_BUFFER: acpi_os_printf(" B:%p-%X\n", obj_desc->buffer.pointer, obj_desc->buffer.length); break; default: acpi_os_printf("\n"); break; } break; default: acpi_os_printf("\n"); break; } /* If debug turned off, done */ if (!(acpi_dbg_level & ACPI_LV_VALUES)) { return (AE_OK); } /* If there is an attached object, display it */ dbg_level = acpi_dbg_level; acpi_dbg_level = 0; obj_desc = acpi_ns_get_attached_object(this_node); acpi_dbg_level = dbg_level; /* Dump attached objects */ while (obj_desc) { obj_type = ACPI_TYPE_INVALID; acpi_os_printf("Attached Object %p: ", obj_desc); /* Decode the type of attached object and dump the contents */ switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) { case ACPI_DESC_TYPE_NAMED: acpi_os_printf("(Ptr to Node)\n"); bytes_to_dump = sizeof(struct acpi_namespace_node); ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump); break; case ACPI_DESC_TYPE_OPERAND: obj_type = obj_desc->common.type; if (obj_type > ACPI_TYPE_LOCAL_MAX) { acpi_os_printf ("(Pointer to ACPI Object type %.2X [UNKNOWN])\n", obj_type); bytes_to_dump = 32; } else { acpi_os_printf ("(Pointer to ACPI Object type %.2X [%s])\n", obj_type, acpi_ut_get_type_name(obj_type)); bytes_to_dump = sizeof(union acpi_operand_object); } ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump); break; default: break; } /* If value is NOT an internal object, we are done */ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) { goto cleanup; } /* Valid object, get the pointer to next level, if any */ switch (obj_type) { case ACPI_TYPE_BUFFER: case ACPI_TYPE_STRING: /* * NOTE: takes advantage of common fields between string/buffer */ bytes_to_dump = obj_desc->string.length; obj_desc = (void *)obj_desc->string.pointer; acpi_os_printf("(Buffer/String pointer %p length %X)\n", obj_desc, bytes_to_dump); ACPI_DUMP_BUFFER(obj_desc, bytes_to_dump); goto cleanup; case ACPI_TYPE_BUFFER_FIELD: obj_desc = (union acpi_operand_object *)obj_desc->buffer_field. buffer_obj; break; case ACPI_TYPE_PACKAGE: obj_desc = (void *)obj_desc->package.elements; break; case ACPI_TYPE_METHOD: obj_desc = (void *)obj_desc->method.aml_start; break; case ACPI_TYPE_LOCAL_REGION_FIELD: obj_desc = (void *)obj_desc->field.region_obj; break; case ACPI_TYPE_LOCAL_BANK_FIELD: obj_desc = (void *)obj_desc->bank_field.region_obj; break; case ACPI_TYPE_LOCAL_INDEX_FIELD: obj_desc = (void *)obj_desc->index_field.index_obj; break; default: goto cleanup; } } cleanup: acpi_os_printf("\n"); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_dump_objects * * PARAMETERS: type - Object type to be dumped * display_type - 0 or ACPI_DISPLAY_SUMMARY * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX * for an effectively unlimited depth. * owner_id - Dump only objects owned by this ID. Use * ACPI_UINT32_MAX to match all owners. * start_handle - Where in namespace to start/end search * * RETURN: None * * DESCRIPTION: Dump typed objects within the loaded namespace. Uses * acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object. * ******************************************************************************/ void acpi_ns_dump_objects(acpi_object_type type, u8 display_type, u32 max_depth, acpi_owner_id owner_id, acpi_handle start_handle) { struct acpi_walk_info info; acpi_status status; ACPI_FUNCTION_ENTRY(); /* * Just lock the entire namespace for the duration of the dump. * We don't want any changes to the namespace during this time, * especially the temporary nodes since we are going to display * them also. */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not acquire namespace mutex\n"); return; } info.count = 0; info.debug_level = ACPI_LV_TABLES; info.owner_id = owner_id; info.display_type = display_type; (void)acpi_ns_walk_namespace(type, start_handle, max_depth, ACPI_NS_WALK_NO_UNLOCK | ACPI_NS_WALK_TEMP_NODES, acpi_ns_dump_one_object, NULL, (void *)&info, NULL); acpi_os_printf("\nNamespace node count: %u\n\n", info.count); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); } /******************************************************************************* * * FUNCTION: acpi_ns_dump_one_object_path, acpi_ns_get_max_depth * * PARAMETERS: obj_handle - Node to be dumped * level - Nesting level of the handle * context - Passed into walk_namespace * return_value - Not used * * RETURN: Status * * DESCRIPTION: Dump the full pathname to a namespace object. acp_ns_get_max_depth * computes the maximum nesting depth in the namespace tree, in * order to simplify formatting in acpi_ns_dump_one_object_path. * These procedures are user_functions called by acpi_ns_walk_namespace. * ******************************************************************************/ static acpi_status acpi_ns_dump_one_object_path(acpi_handle obj_handle, u32 level, void *context, void **return_value) { u32 max_level = *((u32 *)context); char *pathname; struct acpi_namespace_node *node; int path_indent; if (!obj_handle) { return (AE_OK); } node = acpi_ns_validate_handle(obj_handle); if (!node) { /* Ignore bad node during namespace walk */ return (AE_OK); } pathname = acpi_ns_get_normalized_pathname(node, TRUE); path_indent = 1; if (level <= max_level) { path_indent = max_level - level + 1; } acpi_os_printf("%2d%*s%-12s%*s", level, level, " ", acpi_ut_get_type_name(node->type), path_indent, " "); acpi_os_printf("%s\n", &pathname[1]); ACPI_FREE(pathname); return (AE_OK); } static acpi_status acpi_ns_get_max_depth(acpi_handle obj_handle, u32 level, void *context, void **return_value) { u32 *max_level = (u32 *)context; if (level > *max_level) { *max_level = level; } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_dump_object_paths * * PARAMETERS: type - Object type to be dumped * display_type - 0 or ACPI_DISPLAY_SUMMARY * max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX * for an effectively unlimited depth. * owner_id - Dump only objects owned by this ID. Use * ACPI_UINT32_MAX to match all owners. * start_handle - Where in namespace to start/end search * * RETURN: None * * DESCRIPTION: Dump full object pathnames within the loaded namespace. Uses * acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object_path. * ******************************************************************************/ void acpi_ns_dump_object_paths(acpi_object_type type, u8 display_type, u32 max_depth, acpi_owner_id owner_id, acpi_handle start_handle) { acpi_status status; u32 max_level = 0; ACPI_FUNCTION_ENTRY(); /* * Just lock the entire namespace for the duration of the dump. * We don't want any changes to the namespace during this time, * especially the temporary nodes since we are going to display * them also. */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not acquire namespace mutex\n"); return; } /* Get the max depth of the namespace tree, for formatting later */ (void)acpi_ns_walk_namespace(type, start_handle, max_depth, ACPI_NS_WALK_NO_UNLOCK | ACPI_NS_WALK_TEMP_NODES, acpi_ns_get_max_depth, NULL, (void *)&max_level, NULL); /* Now dump the entire namespace */ (void)acpi_ns_walk_namespace(type, start_handle, max_depth, ACPI_NS_WALK_NO_UNLOCK | ACPI_NS_WALK_TEMP_NODES, acpi_ns_dump_one_object_path, NULL, (void *)&max_level, NULL); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); } /******************************************************************************* * * FUNCTION: acpi_ns_dump_entry * * PARAMETERS: handle - Node to be dumped * debug_level - Output level * * RETURN: None * * DESCRIPTION: Dump a single Node * ******************************************************************************/ void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level) { struct acpi_walk_info info; ACPI_FUNCTION_ENTRY(); info.debug_level = debug_level; info.owner_id = ACPI_OWNER_ID_MAX; info.display_type = ACPI_DISPLAY_SUMMARY; (void)acpi_ns_dump_one_object(handle, 1, &info, NULL); } #ifdef ACPI_ASL_COMPILER /******************************************************************************* * * FUNCTION: acpi_ns_dump_tables * * PARAMETERS: search_base - Root of subtree to be dumped, or * NS_ALL to dump the entire namespace * max_depth - Maximum depth of dump. Use INT_MAX * for an effectively unlimited depth. * * RETURN: None * * DESCRIPTION: Dump the name space, or a portion of it. * ******************************************************************************/ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth) { acpi_handle search_handle = search_base; ACPI_FUNCTION_TRACE(ns_dump_tables); if (!acpi_gbl_root_node) { /* * If the name space has not been initialized, * there is nothing to dump. */ ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "namespace not initialized!\n")); return_VOID; } if (ACPI_NS_ALL == search_base) { /* Entire namespace */ search_handle = acpi_gbl_root_node; ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "\\\n")); } acpi_ns_dump_objects(ACPI_TYPE_ANY, ACPI_DISPLAY_OBJECTS, max_depth, ACPI_OWNER_ID_MAX, search_handle); return_VOID; } #endif #endif
linux-master
drivers/acpi/acpica/nsdump.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: utinit - Common ACPI subsystem initialization * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acevents.h" #include "actables.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utinit") /* Local prototypes */ static void acpi_ut_terminate(void); #if (!ACPI_REDUCED_HARDWARE) static void acpi_ut_free_gpe_lists(void); #else #define acpi_ut_free_gpe_lists() #endif /* !ACPI_REDUCED_HARDWARE */ #if (!ACPI_REDUCED_HARDWARE) /****************************************************************************** * * FUNCTION: acpi_ut_free_gpe_lists * * PARAMETERS: none * * RETURN: none * * DESCRIPTION: Free global GPE lists * ******************************************************************************/ static void acpi_ut_free_gpe_lists(void) { struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_block_info *next_gpe_block; struct acpi_gpe_xrupt_info *gpe_xrupt_info; struct acpi_gpe_xrupt_info *next_gpe_xrupt_info; /* Free global GPE blocks and related info structures */ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; while (gpe_xrupt_info) { gpe_block = gpe_xrupt_info->gpe_block_list_head; while (gpe_block) { next_gpe_block = gpe_block->next; ACPI_FREE(gpe_block->event_info); ACPI_FREE(gpe_block->register_info); ACPI_FREE(gpe_block); gpe_block = next_gpe_block; } next_gpe_xrupt_info = gpe_xrupt_info->next; ACPI_FREE(gpe_xrupt_info); gpe_xrupt_info = next_gpe_xrupt_info; } } #endif /* !ACPI_REDUCED_HARDWARE */ /******************************************************************************* * * FUNCTION: acpi_ut_init_globals * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize ACPICA globals. All globals that require specific * initialization should be initialized here. This allows for * a warm restart. * ******************************************************************************/ acpi_status acpi_ut_init_globals(void) { acpi_status status; u32 i; ACPI_FUNCTION_TRACE(ut_init_globals); /* Create all memory caches */ status = acpi_ut_create_caches(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Address Range lists */ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) { acpi_gbl_address_range_list[i] = NULL; } /* Mutex locked flags */ for (i = 0; i < ACPI_NUM_MUTEX; i++) { acpi_gbl_mutex_info[i].mutex = NULL; acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED; acpi_gbl_mutex_info[i].use_count = 0; } for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) { acpi_gbl_owner_id_mask[i] = 0; } /* Last owner_ID is never valid */ acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000; /* Event counters */ acpi_method_count = 0; acpi_sci_count = 0; acpi_gpe_count = 0; for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { acpi_fixed_event_count[i] = 0; } #if (!ACPI_REDUCED_HARDWARE) /* GPE/SCI support */ acpi_gbl_all_gpes_initialized = FALSE; acpi_gbl_gpe_xrupt_list_head = NULL; acpi_gbl_gpe_fadt_blocks[0] = NULL; acpi_gbl_gpe_fadt_blocks[1] = NULL; acpi_current_gpe_count = 0; acpi_gbl_global_event_handler = NULL; acpi_gbl_sci_handler_list = NULL; #endif /* !ACPI_REDUCED_HARDWARE */ /* Global handlers */ acpi_gbl_global_notify[0].handler = NULL; acpi_gbl_global_notify[1].handler = NULL; acpi_gbl_exception_handler = NULL; acpi_gbl_init_handler = NULL; acpi_gbl_table_handler = NULL; acpi_gbl_interface_handler = NULL; /* Global Lock support */ acpi_gbl_global_lock_semaphore = NULL; acpi_gbl_global_lock_mutex = NULL; acpi_gbl_global_lock_acquired = FALSE; acpi_gbl_global_lock_handle = 0; acpi_gbl_global_lock_present = FALSE; /* Miscellaneous variables */ acpi_gbl_DSDT = NULL; acpi_gbl_cm_single_step = FALSE; acpi_gbl_shutdown = FALSE; acpi_gbl_ns_lookup_count = 0; acpi_gbl_ps_find_count = 0; acpi_gbl_acpi_hardware_present = TRUE; acpi_gbl_last_owner_id_index = 0; acpi_gbl_next_owner_id_offset = 0; acpi_gbl_debugger_configuration = DEBUGGER_THREADING; acpi_gbl_osi_mutex = NULL; /* Hardware oriented */ acpi_gbl_events_initialized = FALSE; acpi_gbl_system_awake_and_running = TRUE; /* Namespace */ acpi_gbl_root_node = NULL; acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME; acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED; acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE; acpi_gbl_root_node_struct.parent = NULL; acpi_gbl_root_node_struct.child = NULL; acpi_gbl_root_node_struct.peer = NULL; acpi_gbl_root_node_struct.object = NULL; #ifdef ACPI_DISASSEMBLER acpi_gbl_external_list = NULL; acpi_gbl_num_external_methods = 0; acpi_gbl_resolved_external_methods = 0; #endif #ifdef ACPI_DEBUG_OUTPUT acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX); #endif #ifdef ACPI_DBG_TRACK_ALLOCATIONS acpi_gbl_display_final_mem_stats = FALSE; acpi_gbl_disable_mem_tracking = FALSE; #endif return_ACPI_STATUS(AE_OK); } /****************************************************************************** * * FUNCTION: acpi_ut_terminate * * PARAMETERS: none * * RETURN: none * * DESCRIPTION: Free global memory * ******************************************************************************/ static void acpi_ut_terminate(void) { ACPI_FUNCTION_TRACE(ut_terminate); acpi_ut_free_gpe_lists(); acpi_ut_delete_address_lists(); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_subsystem_shutdown * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Shutdown the various components. Do not delete the mutex * objects here, because the AML debugger may be still running. * ******************************************************************************/ void acpi_ut_subsystem_shutdown(void) { ACPI_FUNCTION_TRACE(ut_subsystem_shutdown); /* Just exit if subsystem is already shutdown */ if (acpi_gbl_shutdown) { ACPI_ERROR((AE_INFO, "ACPI Subsystem is already terminated")); return_VOID; } /* Subsystem appears active, go ahead and shut it down */ acpi_gbl_shutdown = TRUE; acpi_gbl_startup_flags = 0; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n")); #ifndef ACPI_ASL_COMPILER /* Close the acpi_event Handling */ acpi_ev_terminate(); /* Delete any dynamic _OSI interfaces */ acpi_ut_interface_terminate(); #endif /* Close the Namespace */ acpi_ns_terminate(); /* Delete the ACPI tables */ acpi_tb_terminate(); /* Close the globals */ acpi_ut_terminate(); /* Purge the local caches */ (void)acpi_ut_delete_caches(); return_VOID; }
linux-master
drivers/acpi/acpica/utinit.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exresolv - AML Interpreter object resolution * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exresolv") /* Local prototypes */ static acpi_status acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, struct acpi_walk_state *walk_state); /******************************************************************************* * * FUNCTION: acpi_ex_resolve_to_value * * PARAMETERS: **stack_ptr - Points to entry on obj_stack, which can * be either an (union acpi_operand_object *) * or an acpi_handle. * walk_state - Current method state * * RETURN: Status * * DESCRIPTION: Convert Reference objects to values * ******************************************************************************/ acpi_status acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr, struct acpi_walk_state *walk_state) { acpi_status status; ACPI_FUNCTION_TRACE_PTR(ex_resolve_to_value, stack_ptr); if (!stack_ptr || !*stack_ptr) { ACPI_ERROR((AE_INFO, "Internal - null pointer")); return_ACPI_STATUS(AE_AML_NO_OPERAND); } /* * The entity pointed to by the stack_ptr can be either * 1) A valid union acpi_operand_object, or * 2) A struct acpi_namespace_node (named_obj) */ if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_OPERAND) { status = acpi_ex_resolve_object_to_value(stack_ptr, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (!*stack_ptr) { ACPI_ERROR((AE_INFO, "Internal - null pointer")); return_ACPI_STATUS(AE_AML_NO_OPERAND); } } /* * Object on the stack may have changed if acpi_ex_resolve_object_to_value() * was called (i.e., we can't use an _else_ here.) */ if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_NAMED) { status = acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, stack_ptr), walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Resolved object %p\n", *stack_ptr)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_resolve_object_to_value * * PARAMETERS: stack_ptr - Pointer to an internal object * walk_state - Current method state * * RETURN: Status * * DESCRIPTION: Retrieve the value from an internal object. The Reference type * uses the associated AML opcode to determine the value. * ******************************************************************************/ static acpi_status acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_operand_object *stack_desc; union acpi_operand_object *obj_desc = NULL; u8 ref_type; ACPI_FUNCTION_TRACE(ex_resolve_object_to_value); stack_desc = *stack_ptr; /* This is an object of type union acpi_operand_object */ switch (stack_desc->common.type) { case ACPI_TYPE_LOCAL_REFERENCE: ref_type = stack_desc->reference.class; switch (ref_type) { case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: /* * Get the local from the method's state info * Note: this increments the local's object reference count */ status = acpi_ds_method_data_get_value(ref_type, stack_desc-> reference.value, walk_state, &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[Arg/Local %X] ValueObj is %p\n", stack_desc->reference.value, obj_desc)); /* * Now we can delete the original Reference Object and * replace it with the resolved value */ acpi_ut_remove_reference(stack_desc); *stack_ptr = obj_desc; break; case ACPI_REFCLASS_INDEX: switch (stack_desc->reference.target_type) { case ACPI_TYPE_BUFFER_FIELD: /* Just return - do not dereference */ break; case ACPI_TYPE_PACKAGE: /* If method call or copy_object - do not dereference */ if ((walk_state->opcode == AML_INT_METHODCALL_OP) || (walk_state->opcode == AML_COPY_OBJECT_OP)) { break; } /* Otherwise, dereference the package_index to a package element */ obj_desc = *stack_desc->reference.where; if (obj_desc) { /* * Valid object descriptor, copy pointer to return value * (i.e., dereference the package index) * Delete the ref object, increment the returned object */ acpi_ut_add_reference(obj_desc); *stack_ptr = obj_desc; } else { /* * A NULL object descriptor means an uninitialized element of * the package, can't dereference it */ ACPI_ERROR((AE_INFO, "Attempt to dereference an Index to " "NULL package element Idx=%p", stack_desc)); status = AE_AML_UNINITIALIZED_ELEMENT; } break; default: /* Invalid reference object */ ACPI_ERROR((AE_INFO, "Unknown TargetType 0x%X in Index/Reference object %p", stack_desc->reference.target_type, stack_desc)); status = AE_AML_INTERNAL; break; } break; case ACPI_REFCLASS_REFOF: case ACPI_REFCLASS_DEBUG: case ACPI_REFCLASS_TABLE: /* Just leave the object as-is, do not dereference */ break; case ACPI_REFCLASS_NAME: /* Reference to a named object */ /* Dereference the name */ if ((stack_desc->reference.node->type == ACPI_TYPE_DEVICE) || (stack_desc->reference.node->type == ACPI_TYPE_THERMAL)) { /* These node types do not have 'real' subobjects */ *stack_ptr = (void *)stack_desc->reference.node; } else { /* Get the object pointed to by the namespace node */ *stack_ptr = (stack_desc->reference.node)->object; acpi_ut_add_reference(*stack_ptr); } acpi_ut_remove_reference(stack_desc); break; default: ACPI_ERROR((AE_INFO, "Unknown Reference type 0x%X in %p", ref_type, stack_desc)); status = AE_AML_INTERNAL; break; } break; case ACPI_TYPE_BUFFER: status = acpi_ds_get_buffer_arguments(stack_desc); break; case ACPI_TYPE_PACKAGE: status = acpi_ds_get_package_arguments(stack_desc); break; case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "FieldRead SourceDesc=%p Type=%X\n", stack_desc, stack_desc->common.type)); status = acpi_ex_read_data_from_field(walk_state, stack_desc, &obj_desc); /* Remove a reference to the original operand, then override */ acpi_ut_remove_reference(*stack_ptr); *stack_ptr = (void *)obj_desc; break; default: break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_resolve_multiple * * PARAMETERS: walk_state - Current state (contains AML opcode) * operand - Starting point for resolution * return_type - Where the object type is returned * return_desc - Where the resolved object is returned * * RETURN: Status * * DESCRIPTION: Return the base object and type. Traverse a reference list if * necessary to get to the base object. * ******************************************************************************/ acpi_status acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state, union acpi_operand_object *operand, acpi_object_type *return_type, union acpi_operand_object **return_desc) { union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand); struct acpi_namespace_node *node = ACPI_CAST_PTR(struct acpi_namespace_node, operand); acpi_object_type type; acpi_status status; ACPI_FUNCTION_TRACE(acpi_ex_resolve_multiple); /* Operand can be either a namespace node or an operand descriptor */ switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) { case ACPI_DESC_TYPE_OPERAND: type = obj_desc->common.type; break; case ACPI_DESC_TYPE_NAMED: type = ((struct acpi_namespace_node *)obj_desc)->type; obj_desc = acpi_ns_get_attached_object(node); /* If we had an Alias node, use the attached object for type info */ if (type == ACPI_TYPE_LOCAL_ALIAS) { type = ((struct acpi_namespace_node *)obj_desc)->type; obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)obj_desc); } switch (type) { case ACPI_TYPE_DEVICE: case ACPI_TYPE_THERMAL: /* These types have no attached subobject */ break; default: /* All other types require a subobject */ if (!obj_desc) { ACPI_ERROR((AE_INFO, "[%4.4s] Node is unresolved or uninitialized", acpi_ut_get_node_name(node))); return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE); } break; } break; default: return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* If type is anything other than a reference, we are done */ if (type != ACPI_TYPE_LOCAL_REFERENCE) { goto exit; } /* * For reference objects created via the ref_of, Index, or Load/load_table * operators, we need to get to the base object (as per the ACPI * specification of the object_type and size_of operators). This means * traversing the list of possibly many nested references. */ while (obj_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) { switch (obj_desc->reference.class) { case ACPI_REFCLASS_REFOF: case ACPI_REFCLASS_NAME: /* Dereference the reference pointer */ if (obj_desc->reference.class == ACPI_REFCLASS_REFOF) { node = obj_desc->reference.object; } else { /* AML_INT_NAMEPATH_OP */ node = obj_desc->reference.node; } /* All "References" point to a NS node */ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) { ACPI_ERROR((AE_INFO, "Not a namespace node %p [%s]", node, acpi_ut_get_descriptor_name(node))); return_ACPI_STATUS(AE_AML_INTERNAL); } /* Get the attached object */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { /* No object, use the NS node type */ type = acpi_ns_get_type(node); goto exit; } /* Check for circular references */ if (obj_desc == operand) { return_ACPI_STATUS(AE_AML_CIRCULAR_REFERENCE); } break; case ACPI_REFCLASS_INDEX: /* Get the type of this reference (index into another object) */ type = obj_desc->reference.target_type; if (type != ACPI_TYPE_PACKAGE) { goto exit; } /* * The main object is a package, we want to get the type * of the individual package element that is referenced by * the index. * * This could of course in turn be another reference object. */ obj_desc = *(obj_desc->reference.where); if (!obj_desc) { /* NULL package elements are allowed */ type = 0; /* Uninitialized */ goto exit; } break; case ACPI_REFCLASS_TABLE: type = ACPI_TYPE_DDB_HANDLE; goto exit; case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: if (return_desc) { status = acpi_ds_method_data_get_value(obj_desc-> reference. class, obj_desc-> reference. value, walk_state, &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_ut_remove_reference(obj_desc); } else { status = acpi_ds_method_data_get_node(obj_desc-> reference. class, obj_desc-> reference. value, walk_state, &node); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { type = ACPI_TYPE_ANY; goto exit; } } break; case ACPI_REFCLASS_DEBUG: /* The Debug Object is of type "DebugObject" */ type = ACPI_TYPE_DEBUG_OBJECT; goto exit; default: ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X", obj_desc->reference.class)); return_ACPI_STATUS(AE_AML_INTERNAL); } } /* * Now we are guaranteed to have an object that has not been created * via the ref_of or Index operators. */ type = obj_desc->common.type; exit: /* Convert internal types to external types */ switch (type) { case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: type = ACPI_TYPE_FIELD_UNIT; break; case ACPI_TYPE_LOCAL_SCOPE: /* Per ACPI Specification, Scope is untyped */ type = ACPI_TYPE_ANY; break; default: /* No change to Type required */ break; } *return_type = type; if (return_desc) { *return_desc = obj_desc; } return_ACPI_STATUS(AE_OK); }
linux-master
drivers/acpi/acpica/exresolv.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #define EXPORT_ACPI_INTERFACES #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evxfgpe") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /******************************************************************************* * * FUNCTION: acpi_update_all_gpes * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Complete GPE initialization and enable all GPEs that have * associated _Lxx or _Exx methods and are not pointed to by any * device _PRW methods (this indicates that these GPEs are * generally intended for system or device wakeup. Such GPEs * have to be enabled directly when the devices whose _PRW * methods point to them are set up for wakeup signaling.) * * NOTE: Should be called after any GPEs are added to the system. Primarily, * after the system _PRW methods have been run, but also after a GPE Block * Device has been added or if any new GPE methods have been added via a * dynamic table load. * ******************************************************************************/ acpi_status acpi_update_all_gpes(void) { acpi_status status; u8 is_polling_needed = FALSE; ACPI_FUNCTION_TRACE(acpi_update_all_gpes); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (acpi_gbl_all_gpes_initialized) { goto unlock_and_exit; } status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, &is_polling_needed); if (ACPI_SUCCESS(status)) { acpi_gbl_all_gpes_initialized = TRUE; } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); if (is_polling_needed && acpi_gbl_all_gpes_initialized) { /* Poll GPEs to handle already triggered events */ acpi_ev_gpe_detect(acpi_gbl_gpe_xrupt_list_head); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_update_all_gpes) /******************************************************************************* * * FUNCTION: acpi_enable_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * * RETURN: Status * * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is * hardware-enabled. * ******************************************************************************/ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) { acpi_status status = AE_BAD_PARAMETER; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_enable_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* * Ensure that we have a valid GPE number and that there is some way * of handling the GPE (handler or a GPE method). In other words, we * won't allow a valid GPE to be enabled if there is no way to handle it. */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (gpe_event_info) { if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) != ACPI_GPE_DISPATCH_NONE) { status = acpi_ev_add_gpe_reference(gpe_event_info, TRUE); if (ACPI_SUCCESS(status) && ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) { /* Poll edge-triggered GPEs to handle existing events */ acpi_os_release_lock(acpi_gbl_gpe_lock, flags); (void)acpi_ev_detect_gpe(gpe_device, gpe_event_info, gpe_number); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); } } else { status = AE_NO_HANDLER; } } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_enable_gpe) /******************************************************************************* * * FUNCTION: acpi_disable_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * * RETURN: Status * * DESCRIPTION: Remove a reference to a GPE. When the last reference is * removed, only then is the GPE disabled (for runtime GPEs), or * the GPE mask bit disabled (for wake GPEs) * ******************************************************************************/ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) { acpi_status status = AE_BAD_PARAMETER; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_disable_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (gpe_event_info) { status = acpi_ev_remove_gpe_reference(gpe_event_info) ; } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_disable_gpe) /******************************************************************************* * * FUNCTION: acpi_set_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE * * RETURN: Status * * DESCRIPTION: Enable or disable an individual GPE. This function bypasses * the reference count mechanism used in the acpi_enable_gpe(), * acpi_disable_gpe() interfaces. * This API is typically used by the GPE raw handler mode driver * to switch between the polling mode and the interrupt mode after * the driver has enabled the GPE. * The APIs should be invoked in this order: * acpi_enable_gpe() <- Ensure the reference count > 0 * acpi_set_gpe(ACPI_GPE_DISABLE) <- Enter polling mode * acpi_set_gpe(ACPI_GPE_ENABLE) <- Leave polling mode * acpi_disable_gpe() <- Decrease the reference count * * Note: If a GPE is shared by 2 silicon components, then both the drivers * should support GPE polling mode or disabling the GPE for long period * for one driver may break the other. So use it with care since all * firmware _Lxx/_Exx handlers currently rely on the GPE interrupt mode. * ******************************************************************************/ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) { struct acpi_gpe_event_info *gpe_event_info; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_set_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Perform the action */ switch (action) { case ACPI_GPE_ENABLE: status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); gpe_event_info->disable_for_dispatch = FALSE; break; case ACPI_GPE_DISABLE: status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); gpe_event_info->disable_for_dispatch = TRUE; break; default: status = AE_BAD_PARAMETER; break; } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_set_gpe) /******************************************************************************* * * FUNCTION: acpi_mask_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * is_masked - Whether the GPE is masked or not * * RETURN: Status * * DESCRIPTION: Unconditionally mask/unmask the an individual GPE, ex., to * prevent a GPE flooding. * ******************************************************************************/ acpi_status acpi_mask_gpe(acpi_handle gpe_device, u32 gpe_number, u8 is_masked) { struct acpi_gpe_event_info *gpe_event_info; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_mask_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ev_mask_gpe(gpe_event_info, is_masked); unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_mask_gpe) /******************************************************************************* * * FUNCTION: acpi_mark_gpe_for_wake * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * * RETURN: Status * * DESCRIPTION: Mark a GPE as having the ability to wake the system. Simply * sets the ACPI_GPE_CAN_WAKE flag. * * Some potential callers of acpi_setup_gpe_for_wake may know in advance that * there won't be any notify handlers installed for device wake notifications * from the given GPE (one example is a button GPE in Linux). For these cases, * acpi_mark_gpe_for_wake should be used instead of acpi_setup_gpe_for_wake. * This will set the ACPI_GPE_CAN_WAKE flag for the GPE without trying to * setup implicit wake notification for it (since there's no handler method). * ******************************************************************************/ acpi_status acpi_mark_gpe_for_wake(acpi_handle gpe_device, u32 gpe_number) { struct acpi_gpe_event_info *gpe_event_info; acpi_status status = AE_BAD_PARAMETER; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_mark_gpe_for_wake); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (gpe_event_info) { /* Mark the GPE as a possible wake event */ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; status = AE_OK; } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_mark_gpe_for_wake) /******************************************************************************* * * FUNCTION: acpi_setup_gpe_for_wake * * PARAMETERS: wake_device - Device associated with the GPE (via _PRW) * gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * * RETURN: Status * * DESCRIPTION: Mark a GPE as having the ability to wake the system. This * interface is intended to be used as the host executes the * _PRW methods (Power Resources for Wake) in the system tables. * Each _PRW appears under a Device Object (The wake_device), and * contains the info for the wake GPE associated with the * wake_device. * ******************************************************************************/ acpi_status acpi_setup_gpe_for_wake(acpi_handle wake_device, acpi_handle gpe_device, u32 gpe_number) { acpi_status status; struct acpi_gpe_event_info *gpe_event_info; struct acpi_namespace_node *device_node; struct acpi_gpe_notify_info *notify; struct acpi_gpe_notify_info *new_notify; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); /* Parameter Validation */ if (!wake_device) { /* * By forcing wake_device to be valid, we automatically enable the * implicit notify feature on all hosts. */ return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Handle root object case */ if (wake_device == ACPI_ROOT_OBJECT) { device_node = acpi_gbl_root_node; } else { device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); } /* Validate wake_device is of type Device */ if (device_node->type != ACPI_TYPE_DEVICE) { return_ACPI_STATUS (AE_BAD_PARAMETER); } /* * Allocate a new notify object up front, in case it is needed. * Memory allocation while holding a spinlock is a big no-no * on some hosts. */ new_notify = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_notify_info)); if (!new_notify) { return_ACPI_STATUS(AE_NO_MEMORY); } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* * If there is no method or handler for this GPE, then the * wake_device will be notified whenever this GPE fires. This is * known as an "implicit notify". Note: The GPE is assumed to be * level-triggered (for windows compatibility). */ if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == ACPI_GPE_DISPATCH_NONE) { /* * This is the first device for implicit notify on this GPE. * Just set the flags here, and enter the NOTIFY block below. */ gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED); } else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) { /* * A reference to this GPE has been added during the GPE block * initialization, so drop it now to prevent the GPE from being * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag. */ (void)acpi_ev_remove_gpe_reference(gpe_event_info); gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED; } /* * If we already have an implicit notify on this GPE, add * this device to the notify list. */ if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == ACPI_GPE_DISPATCH_NOTIFY) { /* Ensure that the device is not already in the list */ notify = gpe_event_info->dispatch.notify_list; while (notify) { if (notify->device_node == device_node) { status = AE_ALREADY_EXISTS; goto unlock_and_exit; } notify = notify->next; } /* Add this device to the notify list for this GPE */ new_notify->device_node = device_node; new_notify->next = gpe_event_info->dispatch.notify_list; gpe_event_info->dispatch.notify_list = new_notify; new_notify = NULL; } /* Mark the GPE as a possible wake event */ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; status = AE_OK; unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); /* Delete the notify object if it was not used above */ if (new_notify) { ACPI_FREE(new_notify); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake) /******************************************************************************* * * FUNCTION: acpi_set_gpe_wake_mask * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * action - Enable or Disable * * RETURN: Status * * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must * already be marked as a WAKE GPE. * ******************************************************************************/ acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; struct acpi_gpe_register_info *gpe_register_info; acpi_cpu_flags flags; u32 register_bit; ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* * Ensure that we have a valid GPE number and that this GPE is in * fact a wake GPE */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { status = AE_TYPE; goto unlock_and_exit; } gpe_register_info = gpe_event_info->register_info; if (!gpe_register_info) { status = AE_NOT_EXIST; goto unlock_and_exit; } register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); /* Perform the action */ switch (action) { case ACPI_GPE_ENABLE: ACPI_SET_BIT(gpe_register_info->enable_for_wake, (u8)register_bit); break; case ACPI_GPE_DISABLE: ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, (u8)register_bit); break; default: ACPI_ERROR((AE_INFO, "%u, Invalid action", action)); status = AE_BAD_PARAMETER; break; } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask) /******************************************************************************* * * FUNCTION: acpi_clear_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * * RETURN: Status * * DESCRIPTION: Clear an ACPI event (general purpose) * ******************************************************************************/ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_clear_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_hw_clear_gpe(gpe_event_info); unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_clear_gpe) /******************************************************************************* * * FUNCTION: acpi_get_gpe_status * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * event_status - Where the current status of the event * will be returned * * RETURN: Status * * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled) * ******************************************************************************/ acpi_status acpi_get_gpe_status(acpi_handle gpe_device, u32 gpe_number, acpi_event_status *event_status) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_get_gpe_status); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Obtain status on the requested GPE number */ status = acpi_hw_get_gpe_status(gpe_event_info, event_status); unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) /******************************************************************************* * * FUNCTION: acpi_gispatch_gpe * * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 * gpe_number - GPE level within the GPE block * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Detect and dispatch a General Purpose Event to either a function * (e.g. EC) or method (e.g. _Lxx/_Exx) handler. * ******************************************************************************/ u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number) { ACPI_FUNCTION_TRACE(acpi_dispatch_gpe); return acpi_ev_detect_gpe(gpe_device, NULL, gpe_number); } ACPI_EXPORT_SYMBOL(acpi_dispatch_gpe) /******************************************************************************* * * FUNCTION: acpi_finish_gpe * * PARAMETERS: gpe_device - Namespace node for the GPE Block * (NULL for FADT defined GPEs) * gpe_number - GPE level within the GPE block * * RETURN: Status * * DESCRIPTION: Clear and conditionally re-enable a GPE. This completes the GPE * processing. Intended for use by asynchronous host-installed * GPE handlers. The GPE is only re-enabled if the enable_for_run bit * is set in the GPE info. * ******************************************************************************/ acpi_status acpi_finish_gpe(acpi_handle gpe_device, u32 gpe_number) { struct acpi_gpe_event_info *gpe_event_info; acpi_status status; acpi_cpu_flags flags; ACPI_FUNCTION_TRACE(acpi_finish_gpe); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Ensure that we have a valid GPE number */ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); if (!gpe_event_info) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } status = acpi_ev_finish_gpe(gpe_event_info); unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_finish_gpe) /****************************************************************************** * * FUNCTION: acpi_disable_all_gpes * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Disable and clear all GPEs in all GPE blocks * ******************************************************************************/ acpi_status acpi_disable_all_gpes(void) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_disable_all_gpes(); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes) /****************************************************************************** * * FUNCTION: acpi_enable_all_runtime_gpes * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks * ******************************************************************************/ acpi_status acpi_enable_all_runtime_gpes(void) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_enable_all_runtime_gpes(); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) /****************************************************************************** * * FUNCTION: acpi_enable_all_wakeup_gpes * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Enable all "wakeup" GPEs and disable all of the other GPEs, in * all GPE blocks. * ******************************************************************************/ acpi_status acpi_enable_all_wakeup_gpes(void) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_enable_all_wakeup_gpes); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_hw_enable_all_wakeup_gpes(); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) /****************************************************************************** * * FUNCTION: acpi_any_gpe_status_set * * PARAMETERS: gpe_skip_number - Number of the GPE to skip * * RETURN: Whether or not the status bit is set for any GPE * * DESCRIPTION: Check the status bits of all enabled GPEs, except for the one * represented by the "skip" argument, and return TRUE if any of * them is set or FALSE otherwise. * ******************************************************************************/ u32 acpi_any_gpe_status_set(u32 gpe_skip_number) { acpi_status status; acpi_handle gpe_device; u8 ret; ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set); status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return (FALSE); } status = acpi_get_gpe_device(gpe_skip_number, &gpe_device); if (ACPI_FAILURE(status)) { gpe_device = NULL; } ret = acpi_hw_check_all_gpes(gpe_device, gpe_skip_number); (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return (ret); } ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set) /******************************************************************************* * * FUNCTION: acpi_install_gpe_block * * PARAMETERS: gpe_device - Handle to the parent GPE Block Device * gpe_block_address - Address and space_ID * register_count - Number of GPE register pairs in the block * interrupt_number - H/W interrupt for the block * * RETURN: Status * * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not * enabled here. * ******************************************************************************/ acpi_status acpi_install_gpe_block(acpi_handle gpe_device, struct acpi_generic_address *gpe_block_address, u32 register_count, u32 interrupt_number) { acpi_status status; union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; struct acpi_gpe_block_info *gpe_block; ACPI_FUNCTION_TRACE(acpi_install_gpe_block); if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } node = acpi_ns_validate_handle(gpe_device); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Validate the parent device */ if (node->type != ACPI_TYPE_DEVICE) { status = AE_TYPE; goto unlock_and_exit; } if (node->object) { status = AE_ALREADY_EXISTS; goto unlock_and_exit; } /* * For user-installed GPE Block Devices, the gpe_block_base_number * is always zero */ status = acpi_ev_create_gpe_block(node, gpe_block_address->address, gpe_block_address->space_id, register_count, 0, interrupt_number, &gpe_block); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Install block in the device_object attached to the node */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { /* * No object, create a new one (Device nodes do not always have * an attached object) */ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); if (!obj_desc) { status = AE_NO_MEMORY; goto unlock_and_exit; } status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); /* Remove local reference to the object */ acpi_ut_remove_reference(obj_desc); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } } /* Now install the GPE block in the device_object */ obj_desc->device.gpe_block = gpe_block; unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) /******************************************************************************* * * FUNCTION: acpi_remove_gpe_block * * PARAMETERS: gpe_device - Handle to the parent GPE Block Device * * RETURN: Status * * DESCRIPTION: Remove a previously installed block of GPE registers * ******************************************************************************/ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) { union acpi_operand_object *obj_desc; acpi_status status; struct acpi_namespace_node *node; ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); if (!gpe_device) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } node = acpi_ns_validate_handle(gpe_device); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Validate the parent device */ if (node->type != ACPI_TYPE_DEVICE) { status = AE_TYPE; goto unlock_and_exit; } /* Get the device_object attached to the node */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc || !obj_desc->device.gpe_block) { return_ACPI_STATUS(AE_NULL_OBJECT); } /* Delete the GPE block (but not the device_object) */ status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); if (ACPI_SUCCESS(status)) { obj_desc->device.gpe_block = NULL; } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) /******************************************************************************* * * FUNCTION: acpi_get_gpe_device * * PARAMETERS: index - System GPE index (0-current_gpe_count) * gpe_device - Where the parent GPE Device is returned * * RETURN: Status * * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL * gpe device indicates that the gpe number is contained in one of * the FADT-defined gpe blocks. Otherwise, the GPE block device. * ******************************************************************************/ acpi_status acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) { struct acpi_gpe_device_info info; acpi_status status; ACPI_FUNCTION_TRACE(acpi_get_gpe_device); if (!gpe_device) { return_ACPI_STATUS(AE_BAD_PARAMETER); } if (index >= acpi_current_gpe_count) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Setup and walk the GPE list */ info.index = index; info.status = AE_NOT_EXIST; info.gpe_device = NULL; info.next_block_base_index = 0; status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } *gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device); return_ACPI_STATUS(info.status); } ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) #endif /* !ACPI_REDUCED_HARDWARE */
linux-master
drivers/acpi/acpica/evxfgpe.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: utaddress - op_region address range check * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utaddress") /******************************************************************************* * * FUNCTION: acpi_ut_add_address_range * * PARAMETERS: space_id - Address space ID * address - op_region start address * length - op_region length * region_node - op_region namespace node * * RETURN: Status * * DESCRIPTION: Add the Operation Region address range to the global list. * The only supported Space IDs are Memory and I/O. Called when * the op_region address/length operands are fully evaluated. * * MUTEX: Locks the namespace * * NOTE: Because this interface is only called when an op_region argument * list is evaluated, there cannot be any duplicate region_nodes. * Duplicate Address/Length values are allowed, however, so that multiple * address conflicts can be detected. * ******************************************************************************/ acpi_status acpi_ut_add_address_range(acpi_adr_space_type space_id, acpi_physical_address address, u32 length, struct acpi_namespace_node *region_node) { struct acpi_address_range *range_info; ACPI_FUNCTION_TRACE(ut_add_address_range); if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { return_ACPI_STATUS(AE_OK); } /* Allocate/init a new info block, add it to the appropriate list */ range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range)); if (!range_info) { return_ACPI_STATUS(AE_NO_MEMORY); } range_info->start_address = address; range_info->end_address = (address + length - 1); range_info->region_node = region_node; range_info->next = acpi_gbl_address_range_list[space_id]; acpi_gbl_address_range_list[space_id] = range_info; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n", acpi_ut_get_node_name(range_info->region_node), ACPI_FORMAT_UINT64(address), ACPI_FORMAT_UINT64(range_info->end_address))); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_remove_address_range * * PARAMETERS: space_id - Address space ID * region_node - op_region namespace node * * RETURN: None * * DESCRIPTION: Remove the Operation Region from the global list. The only * supported Space IDs are Memory and I/O. Called when an * op_region is deleted. * * MUTEX: Assumes the namespace is locked * ******************************************************************************/ void acpi_ut_remove_address_range(acpi_adr_space_type space_id, struct acpi_namespace_node *region_node) { struct acpi_address_range *range_info; struct acpi_address_range *prev; ACPI_FUNCTION_TRACE(ut_remove_address_range); if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { return_VOID; } /* Get the appropriate list head and check the list */ range_info = prev = acpi_gbl_address_range_list[space_id]; while (range_info) { if (range_info->region_node == region_node) { if (range_info == prev) { /* Found at list head */ acpi_gbl_address_range_list[space_id] = range_info->next; } else { prev->next = range_info->next; } ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n", acpi_ut_get_node_name(range_info-> region_node), ACPI_FORMAT_UINT64(range_info-> start_address), ACPI_FORMAT_UINT64(range_info-> end_address))); ACPI_FREE(range_info); return_VOID; } prev = range_info; range_info = range_info->next; } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_check_address_range * * PARAMETERS: space_id - Address space ID * address - Start address * length - Length of address range * warn - TRUE if warning on overlap desired * * RETURN: Count of the number of conflicts detected. Zero is always * returned for Space IDs other than Memory or I/O. * * DESCRIPTION: Check if the input address range overlaps any of the * ASL operation region address ranges. The only supported * Space IDs are Memory and I/O. * * MUTEX: Assumes the namespace is locked. * ******************************************************************************/ u32 acpi_ut_check_address_range(acpi_adr_space_type space_id, acpi_physical_address address, u32 length, u8 warn) { struct acpi_address_range *range_info; acpi_physical_address end_address; char *pathname; u32 overlap_count = 0; ACPI_FUNCTION_TRACE(ut_check_address_range); if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) && (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { return_UINT32(0); } range_info = acpi_gbl_address_range_list[space_id]; end_address = address + length - 1; /* Check entire list for all possible conflicts */ while (range_info) { /* * Check if the requested address/length overlaps this * address range. There are four cases to consider: * * 1) Input address/length is contained completely in the * address range * 2) Input address/length overlaps range at the range start * 3) Input address/length overlaps range at the range end * 4) Input address/length completely encompasses the range */ if ((address <= range_info->end_address) && (end_address >= range_info->start_address)) { /* Found an address range overlap */ overlap_count++; if (warn) { /* Optional warning message */ pathname = acpi_ns_get_normalized_pathname(range_info-> region_node, TRUE); ACPI_WARNING((AE_INFO, "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)", acpi_ut_get_region_name(space_id), ACPI_FORMAT_UINT64(address), ACPI_FORMAT_UINT64(end_address), ACPI_FORMAT_UINT64(range_info-> start_address), ACPI_FORMAT_UINT64(range_info-> end_address), pathname)); ACPI_FREE(pathname); } } range_info = range_info->next; } return_UINT32(overlap_count); } /******************************************************************************* * * FUNCTION: acpi_ut_delete_address_lists * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Delete all global address range lists (called during * subsystem shutdown). * ******************************************************************************/ void acpi_ut_delete_address_lists(void) { struct acpi_address_range *next; struct acpi_address_range *range_info; int i; /* Delete all elements in all address range lists */ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) { next = acpi_gbl_address_range_list[i]; while (next) { range_info = next; next = range_info->next; ACPI_FREE(range_info); } acpi_gbl_address_range_list[i] = NULL; } }
linux-master
drivers/acpi/acpica/utaddress.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: pstree - Parser op tree manipulation/traversal/search * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "amlcode.h" #include "acconvert.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("pstree") /* Local prototypes */ #ifdef ACPI_OBSOLETE_FUNCTIONS union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op); #endif /******************************************************************************* * * FUNCTION: acpi_ps_get_arg * * PARAMETERS: op - Get an argument for this op * argn - Nth argument to get * * RETURN: The argument (as an Op object). NULL if argument does not exist * * DESCRIPTION: Get the specified op's argument. * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn) { union acpi_parse_object *arg = NULL; const struct acpi_opcode_info *op_info; ACPI_FUNCTION_ENTRY(); /* if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP) { return (Op->Common.Value.Arg); } */ /* Get the info structure for this opcode */ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); if (op_info->class == AML_CLASS_UNKNOWN) { /* Invalid opcode or ASCII character */ return (NULL); } /* Check if this opcode requires argument sub-objects */ if (!(op_info->flags & AML_HAS_ARGS)) { /* Has no linked argument objects */ return (NULL); } /* Get the requested argument object */ arg = op->common.value.arg; while (arg && argn) { argn--; arg = arg->common.next; } return (arg); } /******************************************************************************* * * FUNCTION: acpi_ps_append_arg * * PARAMETERS: op - Append an argument to this Op. * arg - Argument Op to append * * RETURN: None. * * DESCRIPTION: Append an argument to an op's argument list (a NULL arg is OK) * ******************************************************************************/ void acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg) { union acpi_parse_object *prev_arg; const struct acpi_opcode_info *op_info; ACPI_FUNCTION_TRACE(ps_append_arg); if (!op) { return_VOID; } /* Get the info structure for this opcode */ op_info = acpi_ps_get_opcode_info(op->common.aml_opcode); if (op_info->class == AML_CLASS_UNKNOWN) { /* Invalid opcode */ ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X", op->common.aml_opcode)); return_VOID; } /* Check if this opcode requires argument sub-objects */ if (!(op_info->flags & AML_HAS_ARGS)) { /* Has no linked argument objects */ return_VOID; } /* Append the argument to the linked argument list */ if (op->common.value.arg) { /* Append to existing argument list */ prev_arg = op->common.value.arg; while (prev_arg->common.next) { prev_arg = prev_arg->common.next; } prev_arg->common.next = arg; } else { /* No argument list, this will be the first argument */ op->common.value.arg = arg; } /* Set the parent in this arg and any args linked after it */ while (arg) { arg->common.parent = op; arg = arg->common.next; op->common.arg_list_length++; } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ps_get_depth_next * * PARAMETERS: origin - Root of subtree to search * op - Last (previous) Op that was found * * RETURN: Next Op found in the search. * * DESCRIPTION: Get next op in tree (walking the tree in depth-first order) * Return NULL when reaching "origin" or when walking up from root * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin, union acpi_parse_object *op) { union acpi_parse_object *next = NULL; union acpi_parse_object *parent; union acpi_parse_object *arg; ACPI_FUNCTION_ENTRY(); if (!op) { return (NULL); } /* Look for an argument or child */ next = acpi_ps_get_arg(op, 0); if (next) { ASL_CV_LABEL_FILENODE(next); return (next); } /* Look for a sibling */ next = op->common.next; if (next) { ASL_CV_LABEL_FILENODE(next); return (next); } /* Look for a sibling of parent */ parent = op->common.parent; while (parent) { arg = acpi_ps_get_arg(parent, 0); while (arg && (arg != origin) && (arg != op)) { ASL_CV_LABEL_FILENODE(arg); arg = arg->common.next; } if (arg == origin) { /* Reached parent of origin, end search */ return (NULL); } if (parent->common.next) { /* Found sibling of parent */ ASL_CV_LABEL_FILENODE(parent->common.next); return (parent->common.next); } op = parent; parent = parent->common.parent; } ASL_CV_LABEL_FILENODE(next); return (next); } #ifdef ACPI_OBSOLETE_FUNCTIONS /******************************************************************************* * * FUNCTION: acpi_ps_get_child * * PARAMETERS: op - Get the child of this Op * * RETURN: Child Op, Null if none is found. * * DESCRIPTION: Get op's children or NULL if none * ******************************************************************************/ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op) { union acpi_parse_object *child = NULL; ACPI_FUNCTION_ENTRY(); switch (op->common.aml_opcode) { case AML_SCOPE_OP: case AML_ELSE_OP: case AML_DEVICE_OP: case AML_THERMAL_ZONE_OP: case AML_INT_METHODCALL_OP: child = acpi_ps_get_arg(op, 0); break; case AML_BUFFER_OP: case AML_PACKAGE_OP: case AML_VARIABLE_PACKAGE_OP: case AML_METHOD_OP: case AML_IF_OP: case AML_WHILE_OP: case AML_FIELD_OP: child = acpi_ps_get_arg(op, 1); break; case AML_POWER_RESOURCE_OP: case AML_INDEX_FIELD_OP: child = acpi_ps_get_arg(op, 2); break; case AML_PROCESSOR_OP: case AML_BANK_FIELD_OP: child = acpi_ps_get_arg(op, 3); break; default: /* All others have no children */ break; } return (child); } #endif
linux-master
drivers/acpi/acpica/pstree.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: dbexec - debugger control method execution * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acdebug.h" #include "acnamesp.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbexec") static struct acpi_db_method_info acpi_gbl_db_method_info; /* Local prototypes */ static acpi_status acpi_db_execute_method(struct acpi_db_method_info *info, struct acpi_buffer *return_obj); static acpi_status acpi_db_execute_setup(struct acpi_db_method_info *info); static u32 acpi_db_get_outstanding_allocations(void); static void ACPI_SYSTEM_XFACE acpi_db_method_thread(void *context); static acpi_status acpi_db_execution_walk(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value); static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context); /******************************************************************************* * * FUNCTION: acpi_db_delete_objects * * PARAMETERS: count - Count of objects in the list * objects - Array of ACPI_OBJECTs to be deleted * * RETURN: None * * DESCRIPTION: Delete a list of ACPI_OBJECTS. Handles packages and nested * packages via recursion. * ******************************************************************************/ void acpi_db_delete_objects(u32 count, union acpi_object *objects) { u32 i; for (i = 0; i < count; i++) { switch (objects[i].type) { case ACPI_TYPE_BUFFER: ACPI_FREE(objects[i].buffer.pointer); break; case ACPI_TYPE_PACKAGE: /* Recursive call to delete package elements */ acpi_db_delete_objects(objects[i].package.count, objects[i].package.elements); /* Free the elements array */ ACPI_FREE(objects[i].package.elements); break; default: break; } } } /******************************************************************************* * * FUNCTION: acpi_db_execute_method * * PARAMETERS: info - Valid info segment * return_obj - Where to put return object * * RETURN: Status * * DESCRIPTION: Execute a control method. Used to evaluate objects via the * "EXECUTE" or "EVALUATE" commands. * ******************************************************************************/ static acpi_status acpi_db_execute_method(struct acpi_db_method_info *info, struct acpi_buffer *return_obj) { acpi_status status; struct acpi_object_list param_objects; union acpi_object params[ACPI_DEBUGGER_MAX_ARGS + 1]; u32 i; ACPI_FUNCTION_TRACE(db_execute_method); if (acpi_gbl_db_output_to_file && !acpi_dbg_level) { acpi_os_printf("Warning: debug output is not enabled!\n"); } param_objects.count = 0; param_objects.pointer = NULL; /* Pass through any command-line arguments */ if (info->args && info->args[0]) { /* Get arguments passed on the command line */ for (i = 0; (info->args[i] && *(info->args[i])); i++) { /* Convert input string (token) to an actual union acpi_object */ status = acpi_db_convert_to_object(info->types[i], info->args[i], &params[i]); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While parsing method arguments")); goto cleanup; } } param_objects.count = i; param_objects.pointer = params; } /* Prepare for a return object of arbitrary size */ return_obj->pointer = acpi_gbl_db_buffer; return_obj->length = ACPI_DEBUG_BUFFER_SIZE; /* Do the actual method execution */ acpi_gbl_method_executing = TRUE; status = acpi_evaluate_object(NULL, info->pathname, &param_objects, return_obj); acpi_gbl_cm_single_step = FALSE; acpi_gbl_method_executing = FALSE; if (ACPI_FAILURE(status)) { if ((status == AE_ABORT_METHOD) || acpi_gbl_abort_method) { /* Clear the abort and fall back to the debugger prompt */ ACPI_EXCEPTION((AE_INFO, status, "Aborting top-level method")); acpi_gbl_abort_method = FALSE; status = AE_OK; goto cleanup; } ACPI_EXCEPTION((AE_INFO, status, "while executing %s from AML Debugger", info->pathname)); if (status == AE_BUFFER_OVERFLOW) { ACPI_ERROR((AE_INFO, "Possible buffer overflow within AML Debugger " "buffer (size 0x%X needed 0x%X)", ACPI_DEBUG_BUFFER_SIZE, (u32)return_obj->length)); } } cleanup: acpi_db_delete_objects(param_objects.count, params); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_db_execute_setup * * PARAMETERS: info - Valid method info * * RETURN: None * * DESCRIPTION: Setup info segment prior to method execution * ******************************************************************************/ static acpi_status acpi_db_execute_setup(struct acpi_db_method_info *info) { acpi_status status; ACPI_FUNCTION_NAME(db_execute_setup); /* Concatenate the current scope to the supplied name */ info->pathname[0] = 0; if ((info->name[0] != '\\') && (info->name[0] != '/')) { if (acpi_ut_safe_strcat(info->pathname, sizeof(info->pathname), acpi_gbl_db_scope_buf)) { status = AE_BUFFER_OVERFLOW; goto error_exit; } } if (acpi_ut_safe_strcat(info->pathname, sizeof(info->pathname), info->name)) { status = AE_BUFFER_OVERFLOW; goto error_exit; } acpi_db_prep_namestring(info->pathname); acpi_db_set_output_destination(ACPI_DB_DUPLICATE_OUTPUT); acpi_os_printf("Evaluating %s\n", info->pathname); if (info->flags & EX_SINGLE_STEP) { acpi_gbl_cm_single_step = TRUE; acpi_db_set_output_destination(ACPI_DB_CONSOLE_OUTPUT); } else { /* No single step, allow redirection to a file */ acpi_db_set_output_destination(ACPI_DB_REDIRECTABLE_OUTPUT); } return (AE_OK); error_exit: ACPI_EXCEPTION((AE_INFO, status, "During setup for method execution")); return (status); } #ifdef ACPI_DBG_TRACK_ALLOCATIONS u32 acpi_db_get_cache_info(struct acpi_memory_list *cache) { return (cache->total_allocated - cache->total_freed - cache->current_depth); } #endif /******************************************************************************* * * FUNCTION: acpi_db_get_outstanding_allocations * * PARAMETERS: None * * RETURN: Current global allocation count minus cache entries * * DESCRIPTION: Determine the current number of "outstanding" allocations -- * those allocations that have not been freed and also are not * in one of the various object caches. * ******************************************************************************/ static u32 acpi_db_get_outstanding_allocations(void) { u32 outstanding = 0; #ifdef ACPI_DBG_TRACK_ALLOCATIONS outstanding += acpi_db_get_cache_info(acpi_gbl_state_cache); outstanding += acpi_db_get_cache_info(acpi_gbl_ps_node_cache); outstanding += acpi_db_get_cache_info(acpi_gbl_ps_node_ext_cache); outstanding += acpi_db_get_cache_info(acpi_gbl_operand_cache); #endif return (outstanding); } /******************************************************************************* * * FUNCTION: acpi_db_execution_walk * * PARAMETERS: WALK_CALLBACK * * RETURN: Status * * DESCRIPTION: Execute a control method. Name is relative to the current * scope. * ******************************************************************************/ static acpi_status acpi_db_execution_walk(acpi_handle obj_handle, u32 nesting_level, void *context, void **return_value) { union acpi_operand_object *obj_desc; struct acpi_namespace_node *node = (struct acpi_namespace_node *)obj_handle; struct acpi_buffer return_obj; acpi_status status; obj_desc = acpi_ns_get_attached_object(node); if (obj_desc->method.param_count) { return (AE_OK); } return_obj.pointer = NULL; return_obj.length = ACPI_ALLOCATE_BUFFER; acpi_ns_print_node_pathname(node, "Evaluating"); /* Do the actual method execution */ acpi_os_printf("\n"); acpi_gbl_method_executing = TRUE; status = acpi_evaluate_object(node, NULL, NULL, &return_obj); acpi_gbl_method_executing = FALSE; acpi_os_printf("Evaluation of [%4.4s] returned %s\n", acpi_ut_get_node_name(node), acpi_format_exception(status)); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_db_execute * * PARAMETERS: name - Name of method to execute * args - Parameters to the method * Types - * flags - single step/no single step * * RETURN: None * * DESCRIPTION: Execute a control method. Name is relative to the current * scope. Function used for the "EXECUTE", "EVALUATE", and * "ALL" commands * ******************************************************************************/ void acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags) { acpi_status status; struct acpi_buffer return_obj; char *name_string; #ifdef ACPI_DEBUG_OUTPUT u32 previous_allocations; u32 allocations; #endif /* * Allow one execution to be performed by debugger or single step * execution will be dead locked by the interpreter mutexes. */ if (acpi_gbl_method_executing) { acpi_os_printf("Only one debugger execution is allowed.\n"); return; } #ifdef ACPI_DEBUG_OUTPUT /* Memory allocation tracking */ previous_allocations = acpi_db_get_outstanding_allocations(); #endif if (*name == '*') { (void)acpi_walk_namespace(ACPI_TYPE_METHOD, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_db_execution_walk, NULL, NULL, NULL); return; } if ((flags & EX_ALL) && (strlen(name) > 4)) { acpi_os_printf("Input name (%s) must be a 4-char NameSeg\n", name); return; } name_string = ACPI_ALLOCATE(strlen(name) + 1); if (!name_string) { return; } memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info)); strcpy(name_string, name); acpi_ut_strupr(name_string); /* Subcommand to Execute all predefined names in the namespace */ if (!strncmp(name_string, "PREDEF", 6)) { acpi_db_evaluate_predefined_names(); ACPI_FREE(name_string); return; } /* Command (ALL <nameseg>) to execute all methods of a particular name */ else if (flags & EX_ALL) { acpi_gbl_db_method_info.name = name_string; return_obj.pointer = NULL; return_obj.length = ACPI_ALLOCATE_BUFFER; acpi_db_evaluate_all(name_string); ACPI_FREE(name_string); return; } else { acpi_gbl_db_method_info.name = name_string; acpi_gbl_db_method_info.args = args; acpi_gbl_db_method_info.types = types; acpi_gbl_db_method_info.flags = flags; return_obj.pointer = NULL; return_obj.length = ACPI_ALLOCATE_BUFFER; } status = acpi_db_execute_setup(&acpi_gbl_db_method_info); if (ACPI_FAILURE(status)) { ACPI_FREE(name_string); return; } /* Get the NS node, determines existence also */ status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname, &acpi_gbl_db_method_info.method); if (ACPI_SUCCESS(status)) { status = acpi_db_execute_method(&acpi_gbl_db_method_info, &return_obj); } ACPI_FREE(name_string); /* * Allow any handlers in separate threads to complete. * (Such as Notify handlers invoked from AML executed above). */ acpi_os_sleep((u64)10); #ifdef ACPI_DEBUG_OUTPUT /* Memory allocation tracking */ allocations = acpi_db_get_outstanding_allocations() - previous_allocations; acpi_db_set_output_destination(ACPI_DB_DUPLICATE_OUTPUT); if (allocations > 0) { acpi_os_printf ("0x%X Outstanding allocations after evaluation of %s\n", allocations, acpi_gbl_db_method_info.pathname); } #endif if (ACPI_FAILURE(status)) { acpi_os_printf("Evaluation of %s failed with status %s\n", acpi_gbl_db_method_info.pathname, acpi_format_exception(status)); } else { /* Display a return object, if any */ if (return_obj.length) { acpi_os_printf("Evaluation of %s returned object %p, " "external buffer length %X\n", acpi_gbl_db_method_info.pathname, return_obj.pointer, (u32)return_obj.length); acpi_db_dump_external_object(return_obj.pointer, 1); acpi_os_printf("\n"); /* Dump a _PLD buffer if present */ if (ACPI_COMPARE_NAMESEG ((ACPI_CAST_PTR (struct acpi_namespace_node, acpi_gbl_db_method_info.method)->name.ascii), METHOD_NAME__PLD)) { acpi_db_dump_pld_buffer(return_obj.pointer); } } else { acpi_os_printf ("No object was returned from evaluation of %s\n", acpi_gbl_db_method_info.pathname); } } acpi_db_set_output_destination(ACPI_DB_CONSOLE_OUTPUT); } /******************************************************************************* * * FUNCTION: acpi_db_method_thread * * PARAMETERS: context - Execution info segment * * RETURN: None * * DESCRIPTION: Debugger execute thread. Waits for a command line, then * simply dispatches it. * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_db_method_thread(void *context) { acpi_status status; struct acpi_db_method_info *info = context; struct acpi_db_method_info local_info; u32 i; u8 allow; struct acpi_buffer return_obj; /* * acpi_gbl_db_method_info.Arguments will be passed as method arguments. * Prevent acpi_gbl_db_method_info from being modified by multiple threads * concurrently. * * Note: The arguments we are passing are used by the ASL test suite * (aslts). Do not change them without updating the tests. */ (void)acpi_os_wait_semaphore(info->info_gate, 1, ACPI_WAIT_FOREVER); if (info->init_args) { acpi_db_uint32_to_hex_string(info->num_created, info->index_of_thread_str); acpi_db_uint32_to_hex_string((u32)acpi_os_get_thread_id(), info->id_of_thread_str); } if (info->threads && (info->num_created < info->num_threads)) { info->threads[info->num_created++] = acpi_os_get_thread_id(); } local_info = *info; local_info.args = local_info.arguments; local_info.arguments[0] = local_info.num_threads_str; local_info.arguments[1] = local_info.id_of_thread_str; local_info.arguments[2] = local_info.index_of_thread_str; local_info.arguments[3] = NULL; local_info.types = local_info.arg_types; (void)acpi_os_signal_semaphore(info->info_gate, 1); for (i = 0; i < info->num_loops; i++) { status = acpi_db_execute_method(&local_info, &return_obj); if (ACPI_FAILURE(status)) { acpi_os_printf ("%s During evaluation of %s at iteration %X\n", acpi_format_exception(status), info->pathname, i); if (status == AE_ABORT_METHOD) { break; } } #if 0 if ((i % 100) == 0) { acpi_os_printf("%u loops, Thread 0x%x\n", i, acpi_os_get_thread_id()); } if (return_obj.length) { acpi_os_printf ("Evaluation of %s returned object %p Buflen %X\n", info->pathname, return_obj.pointer, (u32)return_obj.length); acpi_db_dump_external_object(return_obj.pointer, 1); } #endif } /* Signal our completion */ allow = 0; (void)acpi_os_wait_semaphore(info->thread_complete_gate, 1, ACPI_WAIT_FOREVER); info->num_completed++; if (info->num_completed == info->num_threads) { /* Do signal for main thread once only */ allow = 1; } (void)acpi_os_signal_semaphore(info->thread_complete_gate, 1); if (allow) { status = acpi_os_signal_semaphore(info->main_thread_gate, 1); if (ACPI_FAILURE(status)) { acpi_os_printf ("Could not signal debugger thread sync semaphore, %s\n", acpi_format_exception(status)); } } } /******************************************************************************* * * FUNCTION: acpi_db_single_execution_thread * * PARAMETERS: context - Method info struct * * RETURN: None * * DESCRIPTION: Create one thread and execute a method * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context) { struct acpi_db_method_info *info = context; acpi_status status; struct acpi_buffer return_obj; acpi_os_printf("\n"); status = acpi_db_execute_method(info, &return_obj); if (ACPI_FAILURE(status)) { acpi_os_printf("%s During evaluation of %s\n", acpi_format_exception(status), info->pathname); return; } /* Display a return object, if any */ if (return_obj.length) { acpi_os_printf("Evaluation of %s returned object %p, " "external buffer length %X\n", acpi_gbl_db_method_info.pathname, return_obj.pointer, (u32)return_obj.length); acpi_db_dump_external_object(return_obj.pointer, 1); } acpi_os_printf("\nBackground thread completed\n%c ", ACPI_DEBUGGER_COMMAND_PROMPT); } /******************************************************************************* * * FUNCTION: acpi_db_create_execution_thread * * PARAMETERS: method_name_arg - Control method to execute * arguments - Array of arguments to the method * types - Corresponding array of object types * * RETURN: None * * DESCRIPTION: Create a single thread to evaluate a namespace object. Handles * arguments passed on command line for control methods. * ******************************************************************************/ void acpi_db_create_execution_thread(char *method_name_arg, char **arguments, acpi_object_type *types) { acpi_status status; u32 i; memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info)); acpi_gbl_db_method_info.name = method_name_arg; acpi_gbl_db_method_info.init_args = 1; acpi_gbl_db_method_info.args = acpi_gbl_db_method_info.arguments; acpi_gbl_db_method_info.types = acpi_gbl_db_method_info.arg_types; /* Setup method arguments, up to 7 (0-6) */ for (i = 0; (i < ACPI_METHOD_NUM_ARGS) && *arguments; i++) { acpi_gbl_db_method_info.arguments[i] = *arguments; arguments++; acpi_gbl_db_method_info.arg_types[i] = *types; types++; } status = acpi_db_execute_setup(&acpi_gbl_db_method_info); if (ACPI_FAILURE(status)) { return; } /* Get the NS node, determines existence also */ status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname, &acpi_gbl_db_method_info.method); if (ACPI_FAILURE(status)) { acpi_os_printf("%s Could not get handle for %s\n", acpi_format_exception(status), acpi_gbl_db_method_info.pathname); return; } status = acpi_os_execute(OSL_DEBUGGER_EXEC_THREAD, acpi_db_single_execution_thread, &acpi_gbl_db_method_info); if (ACPI_FAILURE(status)) { return; } acpi_os_printf("\nBackground thread started\n"); } /******************************************************************************* * * FUNCTION: acpi_db_create_execution_threads * * PARAMETERS: num_threads_arg - Number of threads to create * num_loops_arg - Loop count for the thread(s) * method_name_arg - Control method to execute * * RETURN: None * * DESCRIPTION: Create threads to execute method(s) * ******************************************************************************/ void acpi_db_create_execution_threads(char *num_threads_arg, char *num_loops_arg, char *method_name_arg) { acpi_status status; u32 num_threads; u32 num_loops; u32 i; u32 size; acpi_mutex main_thread_gate; acpi_mutex thread_complete_gate; acpi_mutex info_gate; /* Get the arguments */ num_threads = strtoul(num_threads_arg, NULL, 0); num_loops = strtoul(num_loops_arg, NULL, 0); if (!num_threads || !num_loops) { acpi_os_printf("Bad argument: Threads %X, Loops %X\n", num_threads, num_loops); return; } /* * Create the semaphore for synchronization of * the created threads with the main thread. */ status = acpi_os_create_semaphore(1, 0, &main_thread_gate); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not create semaphore for " "synchronization with the main thread, %s\n", acpi_format_exception(status)); return; } /* * Create the semaphore for synchronization * between the created threads. */ status = acpi_os_create_semaphore(1, 1, &thread_complete_gate); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not create semaphore for " "synchronization between the created threads, %s\n", acpi_format_exception(status)); (void)acpi_os_delete_semaphore(main_thread_gate); return; } status = acpi_os_create_semaphore(1, 1, &info_gate); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not create semaphore for " "synchronization of AcpiGbl_DbMethodInfo, %s\n", acpi_format_exception(status)); (void)acpi_os_delete_semaphore(thread_complete_gate); (void)acpi_os_delete_semaphore(main_thread_gate); return; } memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info)); /* Array to store IDs of threads */ acpi_gbl_db_method_info.num_threads = num_threads; size = sizeof(acpi_thread_id) * acpi_gbl_db_method_info.num_threads; acpi_gbl_db_method_info.threads = acpi_os_allocate(size); if (acpi_gbl_db_method_info.threads == NULL) { acpi_os_printf("No memory for thread IDs array\n"); (void)acpi_os_delete_semaphore(main_thread_gate); (void)acpi_os_delete_semaphore(thread_complete_gate); (void)acpi_os_delete_semaphore(info_gate); return; } memset(acpi_gbl_db_method_info.threads, 0, size); /* Setup the context to be passed to each thread */ acpi_gbl_db_method_info.name = method_name_arg; acpi_gbl_db_method_info.flags = 0; acpi_gbl_db_method_info.num_loops = num_loops; acpi_gbl_db_method_info.main_thread_gate = main_thread_gate; acpi_gbl_db_method_info.thread_complete_gate = thread_complete_gate; acpi_gbl_db_method_info.info_gate = info_gate; /* Init arguments to be passed to method */ acpi_gbl_db_method_info.init_args = 1; acpi_gbl_db_method_info.args = acpi_gbl_db_method_info.arguments; acpi_gbl_db_method_info.arguments[0] = acpi_gbl_db_method_info.num_threads_str; acpi_gbl_db_method_info.arguments[1] = acpi_gbl_db_method_info.id_of_thread_str; acpi_gbl_db_method_info.arguments[2] = acpi_gbl_db_method_info.index_of_thread_str; acpi_gbl_db_method_info.arguments[3] = NULL; acpi_gbl_db_method_info.types = acpi_gbl_db_method_info.arg_types; acpi_gbl_db_method_info.arg_types[0] = ACPI_TYPE_INTEGER; acpi_gbl_db_method_info.arg_types[1] = ACPI_TYPE_INTEGER; acpi_gbl_db_method_info.arg_types[2] = ACPI_TYPE_INTEGER; acpi_db_uint32_to_hex_string(num_threads, acpi_gbl_db_method_info.num_threads_str); status = acpi_db_execute_setup(&acpi_gbl_db_method_info); if (ACPI_FAILURE(status)) { goto cleanup_and_exit; } /* Get the NS node, determines existence also */ status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname, &acpi_gbl_db_method_info.method); if (ACPI_FAILURE(status)) { acpi_os_printf("%s Could not get handle for %s\n", acpi_format_exception(status), acpi_gbl_db_method_info.pathname); goto cleanup_and_exit; } /* Create the threads */ acpi_os_printf("Creating %X threads to execute %X times each\n", num_threads, num_loops); for (i = 0; i < (num_threads); i++) { status = acpi_os_execute(OSL_DEBUGGER_EXEC_THREAD, acpi_db_method_thread, &acpi_gbl_db_method_info); if (ACPI_FAILURE(status)) { break; } } /* Wait for all threads to complete */ (void)acpi_os_wait_semaphore(main_thread_gate, 1, ACPI_WAIT_FOREVER); acpi_db_set_output_destination(ACPI_DB_DUPLICATE_OUTPUT); acpi_os_printf("All threads (%X) have completed\n", num_threads); acpi_db_set_output_destination(ACPI_DB_CONSOLE_OUTPUT); cleanup_and_exit: /* Cleanup and exit */ (void)acpi_os_delete_semaphore(main_thread_gate); (void)acpi_os_delete_semaphore(thread_complete_gate); (void)acpi_os_delete_semaphore(info_gate); acpi_os_free(acpi_gbl_db_method_info.threads); acpi_gbl_db_method_info.threads = NULL; }
linux-master
drivers/acpi/acpica/dbexec.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exutils - interpreter/scanner utilities * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ /* * DEFINE_AML_GLOBALS is tested in amlcode.h * to determine whether certain global names should be "defined" or only * "declared" in the current compilation. This enhances maintainability * by enabling a single header file to embody all knowledge of the names * in question. * * Exactly one module of any executable should #define DEFINE_GLOBALS * before #including the header files which use this convention. The * names in question will be defined and initialized in that module, * and declared as extern in all other modules which #include those * header files. */ #define DEFINE_AML_GLOBALS #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exutils") /* Local prototypes */ static u32 acpi_ex_digits_needed(u64 value, u32 base); /******************************************************************************* * * FUNCTION: acpi_ex_enter_interpreter * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Enter the interpreter execution region. Failure to enter * the interpreter region is a fatal system error. Used in * conjunction with exit_interpreter. * ******************************************************************************/ void acpi_ex_enter_interpreter(void) { acpi_status status; ACPI_FUNCTION_TRACE(ex_enter_interpreter); status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not acquire AML Interpreter mutex")); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not acquire AML Namespace mutex")); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ex_exit_interpreter * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Exit the interpreter execution region. This is the top level * routine used to exit the interpreter when all processing has * been completed, or when the method blocks. * * Cases where the interpreter is unlocked internally: * 1) Method will be blocked on a Sleep() AML opcode * 2) Method will be blocked on an Acquire() AML opcode * 3) Method will be blocked on a Wait() AML opcode * 4) Method will be blocked to acquire the global lock * 5) Method will be blocked waiting to execute a serialized control * method that is currently executing * 6) About to invoke a user-installed opregion handler * ******************************************************************************/ void acpi_ex_exit_interpreter(void) { acpi_status status; ACPI_FUNCTION_TRACE(ex_exit_interpreter); status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not release AML Namespace mutex")); } status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not release AML Interpreter mutex")); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ex_truncate_for32bit_table * * PARAMETERS: obj_desc - Object to be truncated * * RETURN: TRUE if a truncation was performed, FALSE otherwise. * * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is * 32-bit, as determined by the revision of the DSDT. * ******************************************************************************/ u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc) { ACPI_FUNCTION_ENTRY(); /* * Object must be a valid number and we must be executing * a control method. Object could be NS node for AML_INT_NAMEPATH_OP. */ if ((!obj_desc) || (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) || (obj_desc->common.type != ACPI_TYPE_INTEGER)) { return (FALSE); } if ((acpi_gbl_integer_byte_width == 4) && (obj_desc->integer.value > (u64)ACPI_UINT32_MAX)) { /* * We are executing in a 32-bit ACPI table. Truncate * the value to 32 bits by zeroing out the upper 32-bit field */ obj_desc->integer.value &= (u64)ACPI_UINT32_MAX; return (TRUE); } return (FALSE); } /******************************************************************************* * * FUNCTION: acpi_ex_acquire_global_lock * * PARAMETERS: field_flags - Flags with Lock rule: * always_lock or never_lock * * RETURN: None * * DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field * flags specify that it is to be obtained before field access. * ******************************************************************************/ void acpi_ex_acquire_global_lock(u32 field_flags) { acpi_status status; ACPI_FUNCTION_TRACE(ex_acquire_global_lock); /* Only use the lock if the always_lock bit is set */ if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) { return_VOID; } /* Attempt to get the global lock, wait forever */ status = acpi_ex_acquire_mutex_object(ACPI_WAIT_FOREVER, acpi_gbl_global_lock_mutex, acpi_os_get_thread_id()); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not acquire Global Lock")); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ex_release_global_lock * * PARAMETERS: field_flags - Flags with Lock rule: * always_lock or never_lock * * RETURN: None * * DESCRIPTION: Release the ACPI hardware Global Lock * ******************************************************************************/ void acpi_ex_release_global_lock(u32 field_flags) { acpi_status status; ACPI_FUNCTION_TRACE(ex_release_global_lock); /* Only use the lock if the always_lock bit is set */ if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) { return_VOID; } /* Release the global lock */ status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex); if (ACPI_FAILURE(status)) { /* Report the error, but there isn't much else we can do */ ACPI_EXCEPTION((AE_INFO, status, "Could not release Global Lock")); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ex_digits_needed * * PARAMETERS: value - Value to be represented * base - Base of representation * * RETURN: The number of digits. * * DESCRIPTION: Calculate the number of digits needed to represent the Value * in the given Base (Radix) * ******************************************************************************/ static u32 acpi_ex_digits_needed(u64 value, u32 base) { u32 num_digits; u64 current_value; ACPI_FUNCTION_TRACE(ex_digits_needed); /* u64 is unsigned, so we don't worry about a '-' prefix */ if (value == 0) { return_UINT32(1); } current_value = value; num_digits = 0; /* Count the digits in the requested base */ while (current_value) { (void)acpi_ut_short_divide(current_value, base, &current_value, NULL); num_digits++; } return_UINT32(num_digits); } /******************************************************************************* * * FUNCTION: acpi_ex_eisa_id_to_string * * PARAMETERS: out_string - Where to put the converted string (8 bytes) * compressed_id - EISAID to be converted * * RETURN: None * * DESCRIPTION: Convert a numeric EISAID to string representation. Return * buffer must be large enough to hold the string. The string * returned is always exactly of length ACPI_EISAID_STRING_SIZE * (includes null terminator). The EISAID is always 32 bits. * ******************************************************************************/ void acpi_ex_eisa_id_to_string(char *out_string, u64 compressed_id) { u32 swapped_id; ACPI_FUNCTION_ENTRY(); /* The EISAID should be a 32-bit integer */ if (compressed_id > ACPI_UINT32_MAX) { ACPI_WARNING((AE_INFO, "Expected EISAID is larger than 32 bits: " "0x%8.8X%8.8X, truncating", ACPI_FORMAT_UINT64(compressed_id))); } /* Swap ID to big-endian to get contiguous bits */ swapped_id = acpi_ut_dword_byte_swap((u32)compressed_id); /* First 3 bytes are uppercase letters. Next 4 bytes are hexadecimal */ out_string[0] = (char)(0x40 + (((unsigned long)swapped_id >> 26) & 0x1F)); out_string[1] = (char)(0x40 + ((swapped_id >> 21) & 0x1F)); out_string[2] = (char)(0x40 + ((swapped_id >> 16) & 0x1F)); out_string[3] = acpi_ut_hex_to_ascii_char((u64) swapped_id, 12); out_string[4] = acpi_ut_hex_to_ascii_char((u64) swapped_id, 8); out_string[5] = acpi_ut_hex_to_ascii_char((u64) swapped_id, 4); out_string[6] = acpi_ut_hex_to_ascii_char((u64) swapped_id, 0); out_string[7] = 0; } /******************************************************************************* * * FUNCTION: acpi_ex_integer_to_string * * PARAMETERS: out_string - Where to put the converted string. At least * 21 bytes are needed to hold the largest * possible 64-bit integer. * value - Value to be converted * * RETURN: Converted string in out_string * * DESCRIPTION: Convert a 64-bit integer to decimal string representation. * Assumes string buffer is large enough to hold the string. The * largest string is (ACPI_MAX64_DECIMAL_DIGITS + 1). * ******************************************************************************/ void acpi_ex_integer_to_string(char *out_string, u64 value) { u32 count; u32 digits_needed; u32 remainder; ACPI_FUNCTION_ENTRY(); digits_needed = acpi_ex_digits_needed(value, 10); out_string[digits_needed] = 0; for (count = digits_needed; count > 0; count--) { (void)acpi_ut_short_divide(value, 10, &value, &remainder); out_string[count - 1] = (char)('0' + remainder); } } /******************************************************************************* * * FUNCTION: acpi_ex_pci_cls_to_string * * PARAMETERS: out_string - Where to put the converted string (7 bytes) * class_code - PCI class code to be converted (3 bytes) * * RETURN: Converted string in out_string * * DESCRIPTION: Convert 3-bytes PCI class code to string representation. * Return buffer must be large enough to hold the string. The * string returned is always exactly of length * ACPI_PCICLS_STRING_SIZE (includes null terminator). * ******************************************************************************/ void acpi_ex_pci_cls_to_string(char *out_string, u8 class_code[3]) { ACPI_FUNCTION_ENTRY(); /* All 3 bytes are hexadecimal */ out_string[0] = acpi_ut_hex_to_ascii_char((u64)class_code[0], 4); out_string[1] = acpi_ut_hex_to_ascii_char((u64)class_code[0], 0); out_string[2] = acpi_ut_hex_to_ascii_char((u64)class_code[1], 4); out_string[3] = acpi_ut_hex_to_ascii_char((u64)class_code[1], 0); out_string[4] = acpi_ut_hex_to_ascii_char((u64)class_code[2], 4); out_string[5] = acpi_ut_hex_to_ascii_char((u64)class_code[2], 0); out_string[6] = 0; } /******************************************************************************* * * FUNCTION: acpi_is_valid_space_id * * PARAMETERS: space_id - ID to be validated * * RETURN: TRUE if space_id is a valid/supported ID. * * DESCRIPTION: Validate an operation region space_ID. * ******************************************************************************/ u8 acpi_is_valid_space_id(u8 space_id) { if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) && (space_id < ACPI_USER_REGION_BEGIN) && (space_id != ACPI_ADR_SPACE_DATA_TABLE) && (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { return (FALSE); } return (TRUE); }
linux-master
drivers/acpi/acpica/exutils.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: evevent - Fixed Event handling and dispatch * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evevent") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* Local prototypes */ static acpi_status acpi_ev_fixed_event_initialize(void); static u32 acpi_ev_fixed_event_dispatch(u32 event); /******************************************************************************* * * FUNCTION: acpi_ev_initialize_events * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize global data structures for ACPI events (Fixed, GPE) * ******************************************************************************/ acpi_status acpi_ev_initialize_events(void) { acpi_status status; ACPI_FUNCTION_TRACE(ev_initialize_events); /* If Hardware Reduced flag is set, there are no fixed events */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } /* * Initialize the Fixed and General Purpose Events. This is done prior to * enabling SCIs to prevent interrupts from occurring before the handlers * are installed. */ status = acpi_ev_fixed_event_initialize(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to initialize fixed events")); return_ACPI_STATUS(status); } status = acpi_ev_gpe_initialize(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to initialize general purpose events")); return_ACPI_STATUS(status); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_install_xrupt_handlers * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Install interrupt handlers for the SCI and Global Lock * ******************************************************************************/ acpi_status acpi_ev_install_xrupt_handlers(void) { acpi_status status; ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers); /* If Hardware Reduced flag is set, there is no ACPI h/w */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } /* Install the SCI handler */ status = acpi_ev_install_sci_handler(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to install System Control Interrupt handler")); return_ACPI_STATUS(status); } /* Install the handler for the Global Lock */ status = acpi_ev_init_global_lock_handler(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to initialize Global Lock handler")); return_ACPI_STATUS(status); } acpi_gbl_events_initialized = TRUE; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_fixed_event_initialize * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Install the fixed event handlers and disable all fixed events. * ******************************************************************************/ static acpi_status acpi_ev_fixed_event_initialize(void) { u32 i; acpi_status status; /* * Initialize the structure that keeps track of fixed event handlers and * disable all of the fixed events. */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { acpi_gbl_fixed_event_handlers[i].handler = NULL; acpi_gbl_fixed_event_handlers[i].context = NULL; /* Disable the fixed event */ if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { status = acpi_write_bit_register(acpi_gbl_fixed_event_info [i].enable_register_id, ACPI_DISABLE_EVENT); if (ACPI_FAILURE(status)) { return (status); } } } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_fixed_event_detect * * PARAMETERS: None * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Checks the PM status register for active fixed events * ******************************************************************************/ u32 acpi_ev_fixed_event_detect(void) { u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u32 fixed_status; u32 fixed_enable; u32 i; acpi_status status; ACPI_FUNCTION_NAME(ev_fixed_event_detect); /* * Read the fixed feature status and enable registers, as all the cases * depend on their values. Ignore errors here. */ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); status |= acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); if (ACPI_FAILURE(status)) { return (int_status); } ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, "Fixed Event Block: Enable %08X Status %08X\n", fixed_enable, fixed_status)); /* * Check for all possible Fixed Events and dispatch those that are active */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { /* Both the status and enable bits must be on for this event */ if ((fixed_status & acpi_gbl_fixed_event_info[i]. status_bit_mask) && (fixed_enable & acpi_gbl_fixed_event_info[i]. enable_bit_mask)) { /* * Found an active (signalled) event. Invoke global event * handler if present. */ acpi_fixed_event_count[i]++; if (acpi_gbl_global_event_handler) { acpi_gbl_global_event_handler (ACPI_EVENT_TYPE_FIXED, NULL, i, acpi_gbl_global_event_handler_context); } int_status |= acpi_ev_fixed_event_dispatch(i); } } return (int_status); } /******************************************************************************* * * FUNCTION: acpi_ev_fixed_event_dispatch * * PARAMETERS: event - Event type * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Clears the status bit for the requested event, calls the * handler that previously registered for the event. * NOTE: If there is no handler for the event, the event is * disabled to prevent further interrupts. * ******************************************************************************/ static u32 acpi_ev_fixed_event_dispatch(u32 event) { ACPI_FUNCTION_ENTRY(); /* Clear the status bit */ (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. status_register_id, ACPI_CLEAR_STATUS); /* * Make sure that a handler exists. If not, report an error * and disable the event to prevent further interrupts. */ if (!acpi_gbl_fixed_event_handlers[event].handler) { (void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, ACPI_DISABLE_EVENT); ACPI_ERROR((AE_INFO, "No installed handler for fixed event - %s (%u), disabling", acpi_ut_get_event_name(event), event)); return (ACPI_INTERRUPT_NOT_HANDLED); } /* Invoke the Fixed Event handler */ return ((acpi_gbl_fixed_event_handlers[event]. handler) (acpi_gbl_fixed_event_handlers[event].context)); } /******************************************************************************* * * FUNCTION: acpi_any_fixed_event_status_set * * PARAMETERS: None * * RETURN: TRUE or FALSE * * DESCRIPTION: Checks the PM status register for active fixed events * ******************************************************************************/ u32 acpi_any_fixed_event_status_set(void) { acpi_status status; u32 in_status; u32 in_enable; u32 i; status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable); if (ACPI_FAILURE(status)) { return (FALSE); } status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status); if (ACPI_FAILURE(status)) { return (FALSE); } /* * Check for all possible Fixed Events and dispatch those that are active */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { /* Both the status and enable bits must be on for this event */ if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) && (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) { return (TRUE); } } return (FALSE); } #endif /* !ACPI_REDUCED_HARDWARE */
linux-master
drivers/acpi/acpica/evevent.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: utosi - Support for the _OSI predefined control method * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utosi") /****************************************************************************** * * ACPICA policy for new _OSI strings: * * It is the stated policy of ACPICA that new _OSI strings will be integrated * into this module as soon as possible after they are defined. It is strongly * recommended that all ACPICA hosts mirror this policy and integrate any * changes to this module as soon as possible. There are several historical * reasons behind this policy: * * 1) New BIOSs tend to test only the case where the host responds TRUE to * the latest version of Windows, which would respond to the latest/newest * _OSI string. Not responding TRUE to the latest version of Windows will * risk executing untested code paths throughout the DSDT and SSDTs. * * 2) If a new _OSI string is recognized only after a significant delay, this * has the potential to cause problems on existing working machines because * of the possibility that a new and different path through the ASL code * will be executed. * * 3) New _OSI strings are tending to come out about once per year. A delay * in recognizing a new string for a significant amount of time risks the * release of another string which only compounds the initial problem. * *****************************************************************************/ /* * Strings supported by the _OSI predefined control method (which is * implemented internally within this module.) * * March 2009: Removed "Linux" as this host no longer wants to respond true * for this string. Basically, the only safe OS strings are windows-related * and in many or most cases represent the only test path within the * BIOS-provided ASL code. * * The last element of each entry is used to track the newest version of * Windows that the BIOS has requested. */ static struct acpi_interface_info acpi_default_supported_interfaces[] = { /* Operating System Vendor Strings */ {"Windows 2000", NULL, 0, ACPI_OSI_WIN_2000}, /* Windows 2000 */ {"Windows 2001", NULL, 0, ACPI_OSI_WIN_XP}, /* Windows XP */ {"Windows 2001 SP1", NULL, 0, ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */ {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */ {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */ {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */ {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows vista - Added 03/2006 */ {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */ {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */ {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */ {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */ {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */ {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8_1}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */ {"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */ {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */ {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */ {"Windows 2017.2", NULL, 0, ACPI_OSI_WIN_10_RS3}, /* Windows 10 version 1709 - Added 02/2018 */ {"Windows 2018", NULL, 0, ACPI_OSI_WIN_10_RS4}, /* Windows 10 version 1803 - Added 11/2018 */ {"Windows 2018.2", NULL, 0, ACPI_OSI_WIN_10_RS5}, /* Windows 10 version 1809 - Added 11/2018 */ {"Windows 2019", NULL, 0, ACPI_OSI_WIN_10_19H1}, /* Windows 10 version 1903 - Added 08/2019 */ {"Windows 2020", NULL, 0, ACPI_OSI_WIN_10_20H1}, /* Windows 10 version 2004 - Added 08/2021 */ {"Windows 2021", NULL, 0, ACPI_OSI_WIN_11}, /* Windows 11 - Added 01/2022 */ /* Feature Group Strings */ {"Extended Address Space Descriptor", NULL, ACPI_OSI_FEATURE, 0}, /* * All "optional" feature group strings (features that are implemented * by the host) should be dynamically modified to VALID by the host via * acpi_install_interface or acpi_update_interfaces. Such optional feature * group strings are set as INVALID by default here. */ {"Module Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0}, {"Processor Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0}, {"3.0 Thermal Model", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0}, {"3.0 _SCP Extensions", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0}, {"Processor Aggregator Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0} }; /******************************************************************************* * * FUNCTION: acpi_ut_initialize_interfaces * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize the global _OSI supported interfaces list * ******************************************************************************/ acpi_status acpi_ut_initialize_interfaces(void) { acpi_status status; u32 i; status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { return (status); } acpi_gbl_supported_interfaces = acpi_default_supported_interfaces; /* Link the static list of supported interfaces */ for (i = 0; i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1); i++) { acpi_default_supported_interfaces[i].next = &acpi_default_supported_interfaces[(acpi_size)i + 1]; } acpi_os_release_mutex(acpi_gbl_osi_mutex); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_interface_terminate * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Delete all interfaces in the global list. Sets * acpi_gbl_supported_interfaces to NULL. * ******************************************************************************/ acpi_status acpi_ut_interface_terminate(void) { acpi_status status; struct acpi_interface_info *next_interface; status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { return (status); } next_interface = acpi_gbl_supported_interfaces; while (next_interface) { acpi_gbl_supported_interfaces = next_interface->next; if (next_interface->flags & ACPI_OSI_DYNAMIC) { /* Only interfaces added at runtime can be freed */ ACPI_FREE(next_interface->name); ACPI_FREE(next_interface); } else { /* Interface is in static list. Reset it to invalid or valid. */ if (next_interface->flags & ACPI_OSI_DEFAULT_INVALID) { next_interface->flags |= ACPI_OSI_INVALID; } else { next_interface->flags &= ~ACPI_OSI_INVALID; } } next_interface = acpi_gbl_supported_interfaces; } acpi_os_release_mutex(acpi_gbl_osi_mutex); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_install_interface * * PARAMETERS: interface_name - The interface to install * * RETURN: Status * * DESCRIPTION: Install the interface into the global interface list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ acpi_status acpi_ut_install_interface(acpi_string interface_name) { struct acpi_interface_info *interface_info; /* Allocate info block and space for the name string */ interface_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_interface_info)); if (!interface_info) { return (AE_NO_MEMORY); } interface_info->name = ACPI_ALLOCATE_ZEROED(strlen(interface_name) + 1); if (!interface_info->name) { ACPI_FREE(interface_info); return (AE_NO_MEMORY); } /* Initialize new info and insert at the head of the global list */ strcpy(interface_info->name, interface_name); interface_info->flags = ACPI_OSI_DYNAMIC; interface_info->next = acpi_gbl_supported_interfaces; acpi_gbl_supported_interfaces = interface_info; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_remove_interface * * PARAMETERS: interface_name - The interface to remove * * RETURN: Status * * DESCRIPTION: Remove the interface from the global interface list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ acpi_status acpi_ut_remove_interface(acpi_string interface_name) { struct acpi_interface_info *previous_interface; struct acpi_interface_info *next_interface; previous_interface = next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (!strcmp(interface_name, next_interface->name)) { /* * Found: name is in either the static list * or was added at runtime */ if (next_interface->flags & ACPI_OSI_DYNAMIC) { /* Interface was added dynamically, remove and free it */ if (previous_interface == next_interface) { acpi_gbl_supported_interfaces = next_interface->next; } else { previous_interface->next = next_interface->next; } ACPI_FREE(next_interface->name); ACPI_FREE(next_interface); } else { /* * Interface is in static list. If marked invalid, then * it does not actually exist. Else, mark it invalid. */ if (next_interface->flags & ACPI_OSI_INVALID) { return (AE_NOT_EXIST); } next_interface->flags |= ACPI_OSI_INVALID; } return (AE_OK); } previous_interface = next_interface; next_interface = next_interface->next; } /* Interface was not found */ return (AE_NOT_EXIST); } /******************************************************************************* * * FUNCTION: acpi_ut_update_interfaces * * PARAMETERS: action - Actions to be performed during the * update * * RETURN: Status * * DESCRIPTION: Update _OSI interface strings, disabling or enabling OS vendor * strings or/and feature group strings. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ acpi_status acpi_ut_update_interfaces(u8 action) { struct acpi_interface_info *next_interface; next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (((next_interface->flags & ACPI_OSI_FEATURE) && (action & ACPI_FEATURE_STRINGS)) || (!(next_interface->flags & ACPI_OSI_FEATURE) && (action & ACPI_VENDOR_STRINGS))) { if (action & ACPI_DISABLE_INTERFACES) { /* Mark the interfaces as invalid */ next_interface->flags |= ACPI_OSI_INVALID; } else { /* Mark the interfaces as valid */ next_interface->flags &= ~ACPI_OSI_INVALID; } } next_interface = next_interface->next; } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_get_interface * * PARAMETERS: interface_name - The interface to find * * RETURN: struct acpi_interface_info if found. NULL if not found. * * DESCRIPTION: Search for the specified interface name in the global list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name) { struct acpi_interface_info *next_interface; next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (!strcmp(interface_name, next_interface->name)) { return (next_interface); } next_interface = next_interface->next; } return (NULL); } /******************************************************************************* * * FUNCTION: acpi_ut_osi_implementation * * PARAMETERS: walk_state - Current walk state * * RETURN: Status * Integer: TRUE (0) if input string is matched * FALSE (-1) if string is not matched * * DESCRIPTION: Implementation of the _OSI predefined control method. When * an invocation of _OSI is encountered in the system AML, * control is transferred to this function. * * (August 2016) * Note: _OSI is now defined to return "Ones" to indicate a match, for * compatibility with other ACPI implementations. On a 32-bit DSDT, Ones * is 0xFFFFFFFF. On a 64-bit DSDT, Ones is 0xFFFFFFFFFFFFFFFF * (ACPI_UINT64_MAX). * * This function always returns ACPI_UINT64_MAX for TRUE, and later code * will truncate this to 32 bits if necessary. * ******************************************************************************/ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state) { union acpi_operand_object *string_desc; union acpi_operand_object *return_desc; struct acpi_interface_info *interface_info; acpi_interface_handler interface_handler; acpi_status status; u64 return_value; ACPI_FUNCTION_TRACE(ut_osi_implementation); /* Validate the string input argument (from the AML caller) */ string_desc = walk_state->arguments[0].object; if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) { return_ACPI_STATUS(AE_TYPE); } /* Create a return object */ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Default return value is 0, NOT SUPPORTED */ return_value = 0; status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(return_desc); return_ACPI_STATUS(status); } /* Lookup the interface in the global _OSI list */ interface_info = acpi_ut_get_interface(string_desc->string.pointer); if (interface_info && !(interface_info->flags & ACPI_OSI_INVALID)) { /* * The interface is supported. * Update the osi_data if necessary. We keep track of the latest * version of Windows that has been requested by the BIOS. */ if (interface_info->value > acpi_gbl_osi_data) { acpi_gbl_osi_data = interface_info->value; } return_value = ACPI_UINT64_MAX; } acpi_os_release_mutex(acpi_gbl_osi_mutex); /* * Invoke an optional _OSI interface handler. The host OS may wish * to do some interface-specific handling. For example, warn about * certain interfaces or override the true/false support value. */ interface_handler = acpi_gbl_interface_handler; if (interface_handler) { if (interface_handler (string_desc->string.pointer, (u32)return_value)) { return_value = ACPI_UINT64_MAX; } } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, "ACPI: BIOS _OSI(\"%s\") is %ssupported\n", string_desc->string.pointer, return_value == 0 ? "not " : "")); /* Complete the return object */ return_desc->integer.value = return_value; walk_state->return_desc = return_desc; return_ACPI_STATUS(AE_OK); }
linux-master
drivers/acpi/acpica/utosi.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exfldio - Aml Field I/O * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #include "acevents.h" #include "acdispat.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exfldio") /* Local prototypes */ static acpi_status acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset, u64 *value, u32 read_write); static u8 acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value); static acpi_status acpi_ex_setup_region(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset); /******************************************************************************* * * FUNCTION: acpi_ex_setup_region * * PARAMETERS: obj_desc - Field to be read or written * field_datum_byte_offset - Byte offset of this datum within the * parent field * * RETURN: Status * * DESCRIPTION: Common processing for acpi_ex_extract_from_field and * acpi_ex_insert_into_field. Initialize the Region if necessary and * validate the request. * ******************************************************************************/ static acpi_status acpi_ex_setup_region(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset) { acpi_status status = AE_OK; union acpi_operand_object *rgn_desc; u8 space_id; ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset); rgn_desc = obj_desc->common_field.region_obj; /* We must have a valid region */ if (rgn_desc->common.type != ACPI_TYPE_REGION) { ACPI_ERROR((AE_INFO, "Needed Region, found type 0x%X (%s)", rgn_desc->common.type, acpi_ut_get_object_type_name(rgn_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } space_id = rgn_desc->region.space_id; /* Validate the Space ID */ if (!acpi_is_valid_space_id(space_id)) { ACPI_ERROR((AE_INFO, "Invalid/unknown Address Space ID: 0x%2.2X", space_id)); return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); } /* * If the Region Address and Length have not been previously evaluated, * evaluate them now and save the results. */ if (!(rgn_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_region_arguments(rgn_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear * address space and the request cannot be directly validated */ if (space_id == ACPI_ADR_SPACE_SMBUS || space_id == ACPI_ADR_SPACE_GSBUS || space_id == ACPI_ADR_SPACE_IPMI) { /* SMBus or IPMI has a non-linear address space */ return_ACPI_STATUS(AE_OK); } #ifdef ACPI_UNDER_DEVELOPMENT /* * If the Field access is any_acc, we can now compute the optimal * access (because we know the length of the parent region) */ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } #endif /* * Validate the request. The entire request from the byte offset for a * length of one field datum (access width) must fit within the region. * (Region length is specified in bytes) */ if (rgn_desc->region.length < (obj_desc->common_field.base_byte_offset + field_datum_byte_offset + obj_desc->common_field.access_byte_width)) { if (acpi_gbl_enable_interpreter_slack) { /* * Slack mode only: We will go ahead and allow access to this * field if it is within the region length rounded up to the next * access width boundary. acpi_size cast for 64-bit compile. */ if (ACPI_ROUND_UP(rgn_desc->region.length, obj_desc->common_field. access_byte_width) >= ((acpi_size)obj_desc->common_field. base_byte_offset + obj_desc->common_field.access_byte_width + field_datum_byte_offset)) { return_ACPI_STATUS(AE_OK); } } if (rgn_desc->region.length < obj_desc->common_field.access_byte_width) { /* * This is the case where the access_type (acc_word, etc.) is wider * than the region itself. For example, a region of length one * byte, and a field with Dword access specified. */ ACPI_ERROR((AE_INFO, "Field [%4.4s] access width (%u bytes) " "too large for region [%4.4s] (length %u)", acpi_ut_get_node_name(obj_desc-> common_field.node), obj_desc->common_field.access_byte_width, acpi_ut_get_node_name(rgn_desc->region. node), rgn_desc->region.length)); } /* * Offset rounded up to next multiple of field width * exceeds region length, indicate an error */ ACPI_ERROR((AE_INFO, "Field [%4.4s] Base+Offset+Width %u+%u+%u " "is beyond end of region [%4.4s] (length %u)", acpi_ut_get_node_name(obj_desc->common_field.node), obj_desc->common_field.base_byte_offset, field_datum_byte_offset, obj_desc->common_field.access_byte_width, acpi_ut_get_node_name(rgn_desc->region.node), rgn_desc->region.length)); return_ACPI_STATUS(AE_AML_REGION_LIMIT); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_access_region * * PARAMETERS: obj_desc - Field to be read * field_datum_byte_offset - Byte offset of this datum within the * parent field * value - Where to store value (must at least * 64 bits) * function - Read or Write flag plus other region- * dependent flags * * RETURN: Status * * DESCRIPTION: Read or Write a single field datum to an Operation Region. * ******************************************************************************/ acpi_status acpi_ex_access_region(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset, u64 *value, u32 function) { acpi_status status; union acpi_operand_object *rgn_desc; u32 region_offset; ACPI_FUNCTION_TRACE(ex_access_region); /* * Ensure that the region operands are fully evaluated and verify * the validity of the request */ status = acpi_ex_setup_region(obj_desc, field_datum_byte_offset); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * The physical address of this field datum is: * * 1) The base of the region, plus * 2) The base offset of the field, plus * 3) The current offset into the field */ rgn_desc = obj_desc->common_field.region_obj; region_offset = obj_desc->common_field.base_byte_offset + field_datum_byte_offset; if ((function & ACPI_IO_MASK) == ACPI_READ) { ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[READ]")); } else { ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "[WRITE]")); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD, " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n", acpi_ut_get_region_name(rgn_desc->region. space_id), rgn_desc->region.space_id, obj_desc->common_field.access_byte_width, obj_desc->common_field.base_byte_offset, field_datum_byte_offset, ACPI_FORMAT_UINT64(rgn_desc->region.address + region_offset))); /* Invoke the appropriate address_space/op_region handler */ status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc, function, region_offset, ACPI_MUL_8(obj_desc-> common_field. access_byte_width), value); if (ACPI_FAILURE(status)) { if (status == AE_NOT_IMPLEMENTED) { ACPI_ERROR((AE_INFO, "Region %s (ID=%u) not implemented", acpi_ut_get_region_name(rgn_desc->region. space_id), rgn_desc->region.space_id)); } else if (status == AE_NOT_EXIST) { ACPI_ERROR((AE_INFO, "Region %s (ID=%u) has no handler", acpi_ut_get_region_name(rgn_desc->region. space_id), rgn_desc->region.space_id)); } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_register_overflow * * PARAMETERS: obj_desc - Register(Field) to be written * value - Value to be stored * * RETURN: TRUE if value overflows the field, FALSE otherwise * * DESCRIPTION: Check if a value is out of range of the field being written. * Used to check if the values written to Index and Bank registers * are out of range. Normally, the value is simply truncated * to fit the field, but this case is most likely a serious * coding error in the ASL. * ******************************************************************************/ static u8 acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) { if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) { /* * The field is large enough to hold the maximum integer, so we can * never overflow it. */ return (FALSE); } if (value >= ((u64) 1 << obj_desc->common_field.bit_length)) { /* * The Value is larger than the maximum value that can fit into * the register. */ ACPI_ERROR((AE_INFO, "Index value 0x%8.8X%8.8X overflows field width 0x%X", ACPI_FORMAT_UINT64(value), obj_desc->common_field.bit_length)); return (TRUE); } /* The Value will fit into the field with no truncation */ return (FALSE); } /******************************************************************************* * * FUNCTION: acpi_ex_field_datum_io * * PARAMETERS: obj_desc - Field to be read * field_datum_byte_offset - Byte offset of this datum within the * parent field * value - Where to store value (must be 64 bits) * read_write - Read or Write flag * * RETURN: Status * * DESCRIPTION: Read or Write a single datum of a field. The field_type is * demultiplexed here to handle the different types of fields * (buffer_field, region_field, index_field, bank_field) * ******************************************************************************/ static acpi_status acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, u32 field_datum_byte_offset, u64 *value, u32 read_write) { acpi_status status; u64 local_value; ACPI_FUNCTION_TRACE_U32(ex_field_datum_io, field_datum_byte_offset); if (read_write == ACPI_READ) { if (!value) { local_value = 0; /* To support reads without saving return value */ value = &local_value; } /* Clear the entire return buffer first, [Very Important!] */ *value = 0; } /* * The four types of fields are: * * buffer_field - Read/write from/to a Buffer * region_field - Read/write from/to a Operation Region. * bank_field - Write to a Bank Register, then read/write from/to an * operation_region * index_field - Write to an Index Register, then read/write from/to a * Data Register */ switch (obj_desc->common.type) { case ACPI_TYPE_BUFFER_FIELD: /* * If the buffer_field arguments have not been previously evaluated, * evaluate them now and save the results. */ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_buffer_field_arguments(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } if (read_write == ACPI_READ) { /* * Copy the data from the source buffer. * Length is the field width in bytes. */ memcpy(value, (obj_desc->buffer_field.buffer_obj)->buffer. pointer + obj_desc->buffer_field.base_byte_offset + field_datum_byte_offset, obj_desc->common_field.access_byte_width); } else { /* * Copy the data to the target buffer. * Length is the field width in bytes. */ memcpy((obj_desc->buffer_field.buffer_obj)->buffer. pointer + obj_desc->buffer_field.base_byte_offset + field_datum_byte_offset, value, obj_desc->common_field.access_byte_width); } status = AE_OK; break; case ACPI_TYPE_LOCAL_BANK_FIELD: /* * Ensure that the bank_value is not beyond the capacity of * the register */ if (acpi_ex_register_overflow(obj_desc->bank_field.bank_obj, (u64) obj_desc->bank_field. value)) { return_ACPI_STATUS(AE_AML_REGISTER_LIMIT); } /* * For bank_fields, we must write the bank_value to the bank_register * (itself a region_field) before we can access the data. */ status = acpi_ex_insert_into_field(obj_desc->bank_field.bank_obj, &obj_desc->bank_field.value, sizeof(obj_desc->bank_field. value)); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Now that the Bank has been selected, fall through to the * region_field case and write the datum to the Operation Region */ ACPI_FALLTHROUGH; case ACPI_TYPE_LOCAL_REGION_FIELD: /* * For simple region_fields, we just directly access the owning * Operation Region. */ status = acpi_ex_access_region(obj_desc, field_datum_byte_offset, value, read_write); break; case ACPI_TYPE_LOCAL_INDEX_FIELD: /* * Ensure that the index_value is not beyond the capacity of * the register */ if (acpi_ex_register_overflow(obj_desc->index_field.index_obj, (u64) obj_desc->index_field. value)) { return_ACPI_STATUS(AE_AML_REGISTER_LIMIT); } /* Write the index value to the index_register (itself a region_field) */ field_datum_byte_offset += obj_desc->index_field.value; ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Write to Index Register: Value %8.8X\n", field_datum_byte_offset)); status = acpi_ex_insert_into_field(obj_desc->index_field.index_obj, &field_datum_byte_offset, sizeof(field_datum_byte_offset)); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (read_write == ACPI_READ) { /* Read the datum from the data_register */ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Read from Data Register\n")); status = acpi_ex_extract_from_field(obj_desc->index_field. data_obj, value, sizeof(u64)); } else { /* Write the datum to the data_register */ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Write to Data Register: Value %8.8X%8.8X\n", ACPI_FORMAT_UINT64(*value))); status = acpi_ex_insert_into_field(obj_desc->index_field. data_obj, value, sizeof(u64)); } break; default: ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %u", obj_desc->common.type)); status = AE_AML_INTERNAL; break; } if (ACPI_SUCCESS(status)) { if (read_write == ACPI_READ) { ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Value Read %8.8X%8.8X, Width %u\n", ACPI_FORMAT_UINT64(*value), obj_desc->common_field. access_byte_width)); } else { ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Value Written %8.8X%8.8X, Width %u\n", ACPI_FORMAT_UINT64(*value), obj_desc->common_field. access_byte_width)); } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_write_with_update_rule * * PARAMETERS: obj_desc - Field to be written * mask - bitmask within field datum * field_value - Value to write * field_datum_byte_offset - Offset of datum within field * * RETURN: Status * * DESCRIPTION: Apply the field update rule to a field write * ******************************************************************************/ acpi_status acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc, u64 mask, u64 field_value, u32 field_datum_byte_offset) { acpi_status status = AE_OK; u64 merged_value; u64 current_value; ACPI_FUNCTION_TRACE_U32(ex_write_with_update_rule, mask); /* Start with the new bits */ merged_value = field_value; /* If the mask is all ones, we don't need to worry about the update rule */ if (mask != ACPI_UINT64_MAX) { /* Decode the update rule */ switch (obj_desc->common_field. field_flags & AML_FIELD_UPDATE_RULE_MASK) { case AML_FIELD_UPDATE_PRESERVE: /* * Check if update rule needs to be applied (not if mask is all * ones) The left shift drops the bits we want to ignore. */ if ((~mask << (ACPI_MUL_8(sizeof(mask)) - ACPI_MUL_8(obj_desc->common_field. access_byte_width))) != 0) { /* * Read the current contents of the byte/word/dword containing * the field, and merge with the new field value. */ status = acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset, &current_value, ACPI_READ); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } merged_value |= (current_value & ~mask); } break; case AML_FIELD_UPDATE_WRITE_AS_ONES: /* Set positions outside the field to all ones */ merged_value |= ~mask; break; case AML_FIELD_UPDATE_WRITE_AS_ZEROS: /* Set positions outside the field to all zeros */ merged_value &= mask; break; default: ACPI_ERROR((AE_INFO, "Unknown UpdateRule value: 0x%X", (obj_desc->common_field.field_flags & AML_FIELD_UPDATE_RULE_MASK))); return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } } ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "Mask %8.8X%8.8X, DatumOffset %X, Width %X, " "Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n", ACPI_FORMAT_UINT64(mask), field_datum_byte_offset, obj_desc->common_field.access_byte_width, ACPI_FORMAT_UINT64(field_value), ACPI_FORMAT_UINT64(merged_value))); /* Write the merged value */ status = acpi_ex_field_datum_io(obj_desc, field_datum_byte_offset, &merged_value, ACPI_WRITE); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_extract_from_field * * PARAMETERS: obj_desc - Field to be read * buffer - Where to store the field data * buffer_length - Length of Buffer * * RETURN: Status * * DESCRIPTION: Retrieve the current value of the given field * ******************************************************************************/ acpi_status acpi_ex_extract_from_field(union acpi_operand_object *obj_desc, void *buffer, u32 buffer_length) { acpi_status status; u64 raw_datum; u64 merged_datum; u32 field_offset = 0; u32 buffer_offset = 0; u32 buffer_tail_bits; u32 datum_count; u32 field_datum_count; u32 access_bit_width; u32 i; ACPI_FUNCTION_TRACE(ex_extract_from_field); /* Validate target buffer and clear it */ if (buffer_length < ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) { ACPI_ERROR((AE_INFO, "Field size %u (bits) is too large for buffer (%u)", obj_desc->common_field.bit_length, buffer_length)); return_ACPI_STATUS(AE_BUFFER_OVERFLOW); } memset(buffer, 0, buffer_length); access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width); /* Handle the simple case here */ if ((obj_desc->common_field.start_field_bit_offset == 0) && (obj_desc->common_field.bit_length == access_bit_width)) { if (buffer_length >= sizeof(u64)) { status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ); } else { /* Use raw_datum (u64) to handle buffers < 64 bits */ status = acpi_ex_field_datum_io(obj_desc, 0, &raw_datum, ACPI_READ); memcpy(buffer, &raw_datum, buffer_length); } return_ACPI_STATUS(status); } /* TBD: Move to common setup code */ /* Field algorithm is limited to sizeof(u64), truncate if needed */ if (obj_desc->common_field.access_byte_width > sizeof(u64)) { obj_desc->common_field.access_byte_width = sizeof(u64); access_bit_width = sizeof(u64) * 8; } /* Compute the number of datums (access width data items) */ datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length, access_bit_width); field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length + obj_desc->common_field. start_field_bit_offset, access_bit_width); /* Priming read from the field */ status = acpi_ex_field_datum_io(obj_desc, field_offset, &raw_datum, ACPI_READ); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } merged_datum = raw_datum >> obj_desc->common_field.start_field_bit_offset; /* Read the rest of the field */ for (i = 1; i < field_datum_count; i++) { /* Get next input datum from the field */ field_offset += obj_desc->common_field.access_byte_width; status = acpi_ex_field_datum_io(obj_desc, field_offset, &raw_datum, ACPI_READ); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Merge with previous datum if necessary. * * Note: Before the shift, check if the shift value will be larger than * the integer size. If so, there is no need to perform the operation. * This avoids the differences in behavior between different compilers * concerning shift values larger than the target data width. */ if (access_bit_width - obj_desc->common_field.start_field_bit_offset < ACPI_INTEGER_BIT_SIZE) { merged_datum |= raw_datum << (access_bit_width - obj_desc->common_field. start_field_bit_offset); } if (i == datum_count) { break; } /* Write merged datum to target buffer */ memcpy(((char *)buffer) + buffer_offset, &merged_datum, ACPI_MIN(obj_desc->common_field.access_byte_width, buffer_length - buffer_offset)); buffer_offset += obj_desc->common_field.access_byte_width; merged_datum = raw_datum >> obj_desc->common_field.start_field_bit_offset; } /* Mask off any extra bits in the last datum */ buffer_tail_bits = obj_desc->common_field.bit_length % access_bit_width; if (buffer_tail_bits) { merged_datum &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits); } /* Write the last datum to the buffer */ memcpy(((char *)buffer) + buffer_offset, &merged_datum, ACPI_MIN(obj_desc->common_field.access_byte_width, buffer_length - buffer_offset)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_insert_into_field * * PARAMETERS: obj_desc - Field to be written * buffer - Data to be written * buffer_length - Length of Buffer * * RETURN: Status * * DESCRIPTION: Store the Buffer contents into the given field * ******************************************************************************/ acpi_status acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, void *buffer, u32 buffer_length) { void *new_buffer; acpi_status status; u64 mask; u64 width_mask; u64 merged_datum; u64 raw_datum = 0; u32 field_offset = 0; u32 buffer_offset = 0; u32 buffer_tail_bits; u32 datum_count; u32 field_datum_count; u32 access_bit_width; u32 required_length; u32 i; ACPI_FUNCTION_TRACE(ex_insert_into_field); /* Validate input buffer */ new_buffer = NULL; required_length = ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length); /* * We must have a buffer that is at least as long as the field * we are writing to. This is because individual fields are * indivisible and partial writes are not supported -- as per * the ACPI specification. */ if (buffer_length < required_length) { /* We need to create a new buffer */ new_buffer = ACPI_ALLOCATE_ZEROED(required_length); if (!new_buffer) { return_ACPI_STATUS(AE_NO_MEMORY); } /* * Copy the original data to the new buffer, starting * at Byte zero. All unused (upper) bytes of the * buffer will be 0. */ memcpy((char *)new_buffer, (char *)buffer, buffer_length); buffer = new_buffer; buffer_length = required_length; } /* TBD: Move to common setup code */ /* Algo is limited to sizeof(u64), so cut the access_byte_width */ if (obj_desc->common_field.access_byte_width > sizeof(u64)) { obj_desc->common_field.access_byte_width = sizeof(u64); } access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width); /* Create the bitmasks used for bit insertion */ width_mask = ACPI_MASK_BITS_ABOVE_64(access_bit_width); mask = width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset); /* Compute the number of datums (access width data items) */ datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length, access_bit_width); field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length + obj_desc->common_field. start_field_bit_offset, access_bit_width); /* Get initial Datum from the input buffer */ memcpy(&raw_datum, buffer, ACPI_MIN(obj_desc->common_field.access_byte_width, buffer_length - buffer_offset)); merged_datum = raw_datum << obj_desc->common_field.start_field_bit_offset; /* Write the entire field */ for (i = 1; i < field_datum_count; i++) { /* Write merged datum to the target field */ merged_datum &= mask; status = acpi_ex_write_with_update_rule(obj_desc, mask, merged_datum, field_offset); if (ACPI_FAILURE(status)) { goto exit; } field_offset += obj_desc->common_field.access_byte_width; /* * Start new output datum by merging with previous input datum * if necessary. * * Note: Before the shift, check if the shift value will be larger than * the integer size. If so, there is no need to perform the operation. * This avoids the differences in behavior between different compilers * concerning shift values larger than the target data width. */ if ((access_bit_width - obj_desc->common_field.start_field_bit_offset) < ACPI_INTEGER_BIT_SIZE) { merged_datum = raw_datum >> (access_bit_width - obj_desc->common_field. start_field_bit_offset); } else { merged_datum = 0; } mask = width_mask; if (i == datum_count) { break; } /* Get the next input datum from the buffer */ buffer_offset += obj_desc->common_field.access_byte_width; memcpy(&raw_datum, ((char *)buffer) + buffer_offset, ACPI_MIN(obj_desc->common_field.access_byte_width, buffer_length - buffer_offset)); merged_datum |= raw_datum << obj_desc->common_field.start_field_bit_offset; } /* Mask off any extra bits in the last datum */ buffer_tail_bits = (obj_desc->common_field.bit_length + obj_desc->common_field.start_field_bit_offset) % access_bit_width; if (buffer_tail_bits) { mask &= ACPI_MASK_BITS_ABOVE(buffer_tail_bits); } /* Write the last datum to the field */ merged_datum &= mask; status = acpi_ex_write_with_update_rule(obj_desc, mask, merged_datum, field_offset); exit: /* Free temporary buffer if we used one */ if (new_buffer) { ACPI_FREE(new_buffer); } return_ACPI_STATUS(status); }
linux-master
drivers/acpi/acpica/exfldio.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: nsconvert - Object conversions for objects returned by * predefined methods * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acinterp.h" #include "acpredef.h" #include "amlresrc.h" #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsconvert") /******************************************************************************* * * FUNCTION: acpi_ns_convert_to_integer * * PARAMETERS: original_object - Object to be converted * return_object - Where the new converted object is returned * * RETURN: Status. AE_OK if conversion was successful. * * DESCRIPTION: Attempt to convert a String/Buffer object to an Integer. * ******************************************************************************/ acpi_status acpi_ns_convert_to_integer(union acpi_operand_object *original_object, union acpi_operand_object **return_object) { union acpi_operand_object *new_object; acpi_status status; u64 value = 0; u32 i; switch (original_object->common.type) { case ACPI_TYPE_STRING: /* String-to-Integer conversion */ status = acpi_ut_strtoul64(original_object->string.pointer, &value); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_TYPE_BUFFER: /* Buffer-to-Integer conversion. Max buffer size is 64 bits. */ if (original_object->buffer.length > 8) { return (AE_AML_OPERAND_TYPE); } /* Extract each buffer byte to create the integer */ for (i = 0; i < original_object->buffer.length; i++) { value |= ((u64) original_object->buffer.pointer[i] << (i * 8)); } break; default: return (AE_AML_OPERAND_TYPE); } new_object = acpi_ut_create_integer_object(value); if (!new_object) { return (AE_NO_MEMORY); } *return_object = new_object; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_convert_to_string * * PARAMETERS: original_object - Object to be converted * return_object - Where the new converted object is returned * * RETURN: Status. AE_OK if conversion was successful. * * DESCRIPTION: Attempt to convert a Integer/Buffer object to a String. * ******************************************************************************/ acpi_status acpi_ns_convert_to_string(union acpi_operand_object *original_object, union acpi_operand_object **return_object) { union acpi_operand_object *new_object; acpi_size length; acpi_status status; switch (original_object->common.type) { case ACPI_TYPE_INTEGER: /* * Integer-to-String conversion. Commonly, convert * an integer of value 0 to a NULL string. The last element of * _BIF and _BIX packages occasionally need this fix. */ if (original_object->integer.value == 0) { /* Allocate a new NULL string object */ new_object = acpi_ut_create_string_object(0); if (!new_object) { return (AE_NO_MEMORY); } } else { status = acpi_ex_convert_to_string(original_object, &new_object, ACPI_IMPLICIT_CONVERT_HEX); if (ACPI_FAILURE(status)) { return (status); } } break; case ACPI_TYPE_BUFFER: /* * Buffer-to-String conversion. Use a to_string * conversion, no transform performed on the buffer data. The best * example of this is the _BIF method, where the string data from * the battery is often (incorrectly) returned as buffer object(s). */ length = 0; while ((length < original_object->buffer.length) && (original_object->buffer.pointer[length])) { length++; } /* Allocate a new string object */ new_object = acpi_ut_create_string_object(length); if (!new_object) { return (AE_NO_MEMORY); } /* * Copy the raw buffer data with no transform. String is already NULL * terminated at Length+1. */ memcpy(new_object->string.pointer, original_object->buffer.pointer, length); break; default: return (AE_AML_OPERAND_TYPE); } *return_object = new_object; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_convert_to_buffer * * PARAMETERS: original_object - Object to be converted * return_object - Where the new converted object is returned * * RETURN: Status. AE_OK if conversion was successful. * * DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer. * ******************************************************************************/ acpi_status acpi_ns_convert_to_buffer(union acpi_operand_object *original_object, union acpi_operand_object **return_object) { union acpi_operand_object *new_object; acpi_status status; union acpi_operand_object **elements; u32 *dword_buffer; u32 count; u32 i; switch (original_object->common.type) { case ACPI_TYPE_INTEGER: /* * Integer-to-Buffer conversion. * Convert the Integer to a packed-byte buffer. _MAT and other * objects need this sometimes, if a read has been performed on a * Field object that is less than or equal to the global integer * size (32 or 64 bits). */ status = acpi_ex_convert_to_buffer(original_object, &new_object); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_TYPE_STRING: /* String-to-Buffer conversion. Simple data copy */ new_object = acpi_ut_create_buffer_object (original_object->string.length); if (!new_object) { return (AE_NO_MEMORY); } memcpy(new_object->buffer.pointer, original_object->string.pointer, original_object->string.length); break; case ACPI_TYPE_PACKAGE: /* * This case is often seen for predefined names that must return a * Buffer object with multiple DWORD integers within. For example, * _FDE and _GTM. The Package can be converted to a Buffer. */ /* All elements of the Package must be integers */ elements = original_object->package.elements; count = original_object->package.count; for (i = 0; i < count; i++) { if ((!*elements) || ((*elements)->common.type != ACPI_TYPE_INTEGER)) { return (AE_AML_OPERAND_TYPE); } elements++; } /* Create the new buffer object to replace the Package */ new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count)); if (!new_object) { return (AE_NO_MEMORY); } /* Copy the package elements (integers) to the buffer as DWORDs */ elements = original_object->package.elements; dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer); for (i = 0; i < count; i++) { *dword_buffer = (u32)(*elements)->integer.value; dword_buffer++; elements++; } break; default: return (AE_AML_OPERAND_TYPE); } *return_object = new_object; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_convert_to_unicode * * PARAMETERS: scope - Namespace node for the method/object * original_object - ASCII String Object to be converted * return_object - Where the new converted object is returned * * RETURN: Status. AE_OK if conversion was successful. * * DESCRIPTION: Attempt to convert a String object to a Unicode string Buffer. * ******************************************************************************/ acpi_status acpi_ns_convert_to_unicode(struct acpi_namespace_node *scope, union acpi_operand_object *original_object, union acpi_operand_object **return_object) { union acpi_operand_object *new_object; char *ascii_string; u16 *unicode_buffer; u32 unicode_length; u32 i; if (!original_object) { return (AE_OK); } /* If a Buffer was returned, it must be at least two bytes long */ if (original_object->common.type == ACPI_TYPE_BUFFER) { if (original_object->buffer.length < 2) { return (AE_AML_OPERAND_VALUE); } *return_object = NULL; return (AE_OK); } /* * The original object is an ASCII string. Convert this string to * a unicode buffer. */ ascii_string = original_object->string.pointer; unicode_length = (original_object->string.length * 2) + 2; /* Create a new buffer object for the Unicode data */ new_object = acpi_ut_create_buffer_object(unicode_length); if (!new_object) { return (AE_NO_MEMORY); } unicode_buffer = ACPI_CAST_PTR(u16, new_object->buffer.pointer); /* Convert ASCII to Unicode */ for (i = 0; i < original_object->string.length; i++) { unicode_buffer[i] = (u16)ascii_string[i]; } *return_object = new_object; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_convert_to_resource * * PARAMETERS: scope - Namespace node for the method/object * original_object - Object to be converted * return_object - Where the new converted object is returned * * RETURN: Status. AE_OK if conversion was successful * * DESCRIPTION: Attempt to convert a Integer object to a resource_template * Buffer. * ******************************************************************************/ acpi_status acpi_ns_convert_to_resource(struct acpi_namespace_node *scope, union acpi_operand_object *original_object, union acpi_operand_object **return_object) { union acpi_operand_object *new_object; u8 *buffer; /* * We can fix the following cases for an expected resource template: * 1. No return value (interpreter slack mode is disabled) * 2. A "Return (Zero)" statement * 3. A "Return empty buffer" statement * * We will return a buffer containing a single end_tag * resource descriptor. */ if (original_object) { switch (original_object->common.type) { case ACPI_TYPE_INTEGER: /* We can only repair an Integer==0 */ if (original_object->integer.value) { return (AE_AML_OPERAND_TYPE); } break; case ACPI_TYPE_BUFFER: if (original_object->buffer.length) { /* Additional checks can be added in the future */ *return_object = NULL; return (AE_OK); } break; case ACPI_TYPE_STRING: default: return (AE_AML_OPERAND_TYPE); } } /* Create the new buffer object for the resource descriptor */ new_object = acpi_ut_create_buffer_object(2); if (!new_object) { return (AE_NO_MEMORY); } buffer = ACPI_CAST_PTR(u8, new_object->buffer.pointer); /* Initialize the Buffer with a single end_tag descriptor */ buffer[0] = (ACPI_RESOURCE_NAME_END_TAG | ASL_RDESC_END_TAG_SIZE); buffer[1] = 0x00; *return_object = new_object; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ns_convert_to_reference * * PARAMETERS: scope - Namespace node for the method/object * original_object - Object to be converted * return_object - Where the new converted object is returned * * RETURN: Status. AE_OK if conversion was successful * * DESCRIPTION: Attempt to convert a Integer object to a object_reference. * Buffer. * ******************************************************************************/ acpi_status acpi_ns_convert_to_reference(struct acpi_namespace_node *scope, union acpi_operand_object *original_object, union acpi_operand_object **return_object) { union acpi_operand_object *new_object = NULL; acpi_status status; struct acpi_namespace_node *node; union acpi_generic_state scope_info; char *name; ACPI_FUNCTION_NAME(ns_convert_to_reference); /* Convert path into internal presentation */ status = acpi_ns_internalize_name(original_object->string.pointer, &name); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Find the namespace node */ scope_info.scope.node = ACPI_CAST_PTR(struct acpi_namespace_node, scope); status = acpi_ns_lookup(&scope_info, name, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, NULL, &node); if (ACPI_FAILURE(status)) { /* Check if we are resolving a named reference within a package */ ACPI_ERROR_NAMESPACE(&scope_info, original_object->string.pointer, status); goto error_exit; } /* Create and init a new internal ACPI object */ new_object = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE); if (!new_object) { status = AE_NO_MEMORY; goto error_exit; } new_object->reference.node = node; new_object->reference.object = node->object; new_object->reference.class = ACPI_REFCLASS_NAME; /* * Increase reference of the object if needed (the object is likely a * null for device nodes). */ acpi_ut_add_reference(node->object); error_exit: ACPI_FREE(name); *return_object = new_object; return (status); }
linux-master
drivers/acpi/acpica/nsconvert.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: dbutils - AML debugger utilities * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acdebug.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbutils") /* Local prototypes */ #ifdef ACPI_OBSOLETE_FUNCTIONS acpi_status acpi_db_second_pass_parse(union acpi_parse_object *root); void acpi_db_dump_buffer(u32 address); #endif /******************************************************************************* * * FUNCTION: acpi_db_match_argument * * PARAMETERS: user_argument - User command line * arguments - Array of commands to match against * * RETURN: Index into command array or ACPI_TYPE_NOT_FOUND if not found * * DESCRIPTION: Search command array for a command match * ******************************************************************************/ acpi_object_type acpi_db_match_argument(char *user_argument, struct acpi_db_argument_info *arguments) { u32 i; if (!user_argument || user_argument[0] == 0) { return (ACPI_TYPE_NOT_FOUND); } for (i = 0; arguments[i].name; i++) { if (strstr(ACPI_CAST_PTR(char, arguments[i].name), ACPI_CAST_PTR(char, user_argument)) == arguments[i].name) { return (i); } } /* Argument not recognized */ return (ACPI_TYPE_NOT_FOUND); } /******************************************************************************* * * FUNCTION: acpi_db_set_output_destination * * PARAMETERS: output_flags - Current flags word * * RETURN: None * * DESCRIPTION: Set the current destination for debugger output. Also sets * the debug output level accordingly. * ******************************************************************************/ void acpi_db_set_output_destination(u32 output_flags) { acpi_gbl_db_output_flags = (u8)output_flags; if ((output_flags & ACPI_DB_REDIRECTABLE_OUTPUT) && acpi_gbl_db_output_to_file) { acpi_dbg_level = acpi_gbl_db_debug_level; } else { acpi_dbg_level = acpi_gbl_db_console_debug_level; } } /******************************************************************************* * * FUNCTION: acpi_db_dump_external_object * * PARAMETERS: obj_desc - External ACPI object to dump * level - Nesting level. * * RETURN: None * * DESCRIPTION: Dump the contents of an ACPI external object * ******************************************************************************/ void acpi_db_dump_external_object(union acpi_object *obj_desc, u32 level) { u32 i; if (!obj_desc) { acpi_os_printf("[Null Object]\n"); return; } for (i = 0; i < level; i++) { acpi_os_printf(" "); } switch (obj_desc->type) { case ACPI_TYPE_ANY: acpi_os_printf("[Null Object] (Type=0)\n"); break; case ACPI_TYPE_INTEGER: acpi_os_printf("[Integer] = %8.8X%8.8X\n", ACPI_FORMAT_UINT64(obj_desc->integer.value)); break; case ACPI_TYPE_STRING: acpi_os_printf("[String] Length %.2X = ", obj_desc->string.length); acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX); acpi_os_printf("\n"); break; case ACPI_TYPE_BUFFER: acpi_os_printf("[Buffer] Length %.2X = ", obj_desc->buffer.length); if (obj_desc->buffer.length) { if (obj_desc->buffer.length > 16) { acpi_os_printf("\n"); } acpi_ut_debug_dump_buffer(ACPI_CAST_PTR (u8, obj_desc->buffer.pointer), obj_desc->buffer.length, DB_BYTE_DISPLAY, _COMPONENT); } else { acpi_os_printf("\n"); } break; case ACPI_TYPE_PACKAGE: acpi_os_printf("[Package] Contains %u Elements:\n", obj_desc->package.count); for (i = 0; i < obj_desc->package.count; i++) { acpi_db_dump_external_object(&obj_desc->package. elements[i], level + 1); } break; case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("[Object Reference] = "); acpi_db_display_internal_object(obj_desc->reference.handle, NULL); break; case ACPI_TYPE_PROCESSOR: acpi_os_printf("[Processor]\n"); break; case ACPI_TYPE_POWER: acpi_os_printf("[Power Resource]\n"); break; default: acpi_os_printf("[Unknown Type] %X\n", obj_desc->type); break; } } /******************************************************************************* * * FUNCTION: acpi_db_prep_namestring * * PARAMETERS: name - String to prepare * * RETURN: None * * DESCRIPTION: Translate all forward slashes and dots to backslashes. * ******************************************************************************/ void acpi_db_prep_namestring(char *name) { if (!name) { return; } acpi_ut_strupr(name); /* Convert a leading forward slash to a backslash */ if (*name == '/') { *name = '\\'; } /* Ignore a leading backslash, this is the root prefix */ if (ACPI_IS_ROOT_PREFIX(*name)) { name++; } /* Convert all slash path separators to dots */ while (*name) { if ((*name == '/') || (*name == '\\')) { *name = '.'; } name++; } } /******************************************************************************* * * FUNCTION: acpi_db_local_ns_lookup * * PARAMETERS: name - Name to lookup * * RETURN: Pointer to a namespace node, null on failure * * DESCRIPTION: Lookup a name in the ACPI namespace * * Note: Currently begins search from the root. Could be enhanced to use * the current prefix (scope) node as the search beginning point. * ******************************************************************************/ struct acpi_namespace_node *acpi_db_local_ns_lookup(char *name) { char *internal_path; acpi_status status; struct acpi_namespace_node *node = NULL; acpi_db_prep_namestring(name); /* Build an internal namestring */ status = acpi_ns_internalize_name(name, &internal_path); if (ACPI_FAILURE(status)) { acpi_os_printf("Invalid namestring: %s\n", name); return (NULL); } /* * Lookup the name. * (Uses root node as the search starting point) */ status = acpi_ns_lookup(NULL, internal_path, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE, NULL, &node); if (ACPI_FAILURE(status)) { acpi_os_printf("Could not locate name: %s, %s\n", name, acpi_format_exception(status)); } ACPI_FREE(internal_path); return (node); } /******************************************************************************* * * FUNCTION: acpi_db_uint32_to_hex_string * * PARAMETERS: value - The value to be converted to string * buffer - Buffer for result (not less than 11 bytes) * * RETURN: None * * DESCRIPTION: Convert the unsigned 32-bit value to the hexadecimal image * * NOTE: It is the caller's responsibility to ensure that the length of buffer * is sufficient. * ******************************************************************************/ void acpi_db_uint32_to_hex_string(u32 value, char *buffer) { int i; if (value == 0) { strcpy(buffer, "0"); return; } buffer[8] = '\0'; for (i = 7; i >= 0; i--) { buffer[i] = acpi_gbl_upper_hex_digits[value & 0x0F]; value = value >> 4; } } #ifdef ACPI_OBSOLETE_FUNCTIONS /******************************************************************************* * * FUNCTION: acpi_db_second_pass_parse * * PARAMETERS: root - Root of the parse tree * * RETURN: Status * * DESCRIPTION: Second pass parse of the ACPI tables. We need to wait until * second pass to parse the control methods * ******************************************************************************/ acpi_status acpi_db_second_pass_parse(union acpi_parse_object *root) { union acpi_parse_object *op = root; union acpi_parse_object *method; union acpi_parse_object *search_op; union acpi_parse_object *start_op; acpi_status status = AE_OK; u32 base_aml_offset; struct acpi_walk_state *walk_state; ACPI_FUNCTION_ENTRY(); acpi_os_printf("Pass two parse ....\n"); while (op) { if (op->common.aml_opcode == AML_METHOD_OP) { method = op; /* Create a new walk state for the parse */ walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL); if (!walk_state) { return (AE_NO_MEMORY); } /* Init the Walk State */ walk_state->parser_state.aml = walk_state->parser_state.aml_start = method->named.data; walk_state->parser_state.aml_end = walk_state->parser_state.pkg_end = method->named.data + method->named.length; walk_state->parser_state.start_scope = op; walk_state->descending_callback = acpi_ds_load1_begin_op; walk_state->ascending_callback = acpi_ds_load1_end_op; /* Perform the AML parse */ status = acpi_ps_parse_aml(walk_state); base_aml_offset = (method->common.value.arg)->common.aml_offset + 1; start_op = (method->common.value.arg)->common.next; search_op = start_op; while (search_op) { search_op->common.aml_offset += base_aml_offset; search_op = acpi_ps_get_depth_next(start_op, search_op); } } if (op->common.aml_opcode == AML_REGION_OP) { /* TBD: [Investigate] this isn't quite the right thing to do! */ /* * * Method = (ACPI_DEFERRED_OP *) Op; * Status = acpi_ps_parse_aml (Op, Method->Body, Method->body_length); */ } if (ACPI_FAILURE(status)) { break; } op = acpi_ps_get_depth_next(root, op); } return (status); } /******************************************************************************* * * FUNCTION: acpi_db_dump_buffer * * PARAMETERS: address - Pointer to the buffer * * RETURN: None * * DESCRIPTION: Print a portion of a buffer * ******************************************************************************/ void acpi_db_dump_buffer(u32 address) { acpi_os_printf("\nLocation %X:\n", address); acpi_dbg_level |= ACPI_LV_TABLES; acpi_ut_debug_dump_buffer(ACPI_TO_POINTER(address), 64, DB_BYTE_DISPLAY, ACPI_UINT32_MAX); } #endif
linux-master
drivers/acpi/acpica/dbutils.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: dsmethod - Parser/Interpreter interface - control method parsing * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" #include "acparser.h" #include "amlcode.h" #include "acdebug.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsmethod") /* Local prototypes */ static acpi_status acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, union acpi_parse_object **out_op); static acpi_status acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); /******************************************************************************* * * FUNCTION: acpi_ds_auto_serialize_method * * PARAMETERS: node - Namespace Node of the method * obj_desc - Method object attached to node * * RETURN: Status * * DESCRIPTION: Parse a control method AML to scan for control methods that * need serialization due to the creation of named objects. * * NOTE: It is a bit of overkill to mark all such methods serialized, since * there is only a problem if the method actually blocks during execution. * A blocking operation is, for example, a Sleep() operation, or any access * to an operation region. However, it is probably not possible to easily * detect whether a method will block or not, so we simply mark all suspicious * methods as serialized. * * NOTE2: This code is essentially a generic routine for parsing a single * control method. * ******************************************************************************/ acpi_status acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, union acpi_operand_object *obj_desc) { acpi_status status; union acpi_parse_object *op = NULL; struct acpi_walk_state *walk_state; ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node); ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method auto-serialization parse [%4.4s] %p\n", acpi_ut_get_node_name(node), node)); /* Create/Init a root op for the method parse tree */ op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); if (!op) { return_ACPI_STATUS(AE_NO_MEMORY); } acpi_ps_set_name(op, node->name.integer); op->common.node = node; /* Create and initialize a new walk state */ walk_state = acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); if (!walk_state) { acpi_ps_free_op(op); return_ACPI_STATUS(AE_NO_MEMORY); } status = acpi_ds_init_aml_walk(walk_state, op, node, obj_desc->method.aml_start, obj_desc->method.aml_length, NULL, 0); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); acpi_ps_free_op(op); return_ACPI_STATUS(status); } walk_state->descending_callback = acpi_ds_detect_named_opcodes; /* Parse the method, scan for creation of named objects */ status = acpi_ps_parse_aml(walk_state); acpi_ps_delete_parse_tree(op); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_detect_named_opcodes * * PARAMETERS: walk_state - Current state of the parse tree walk * out_op - Unused, required for parser interface * * RETURN: Status * * DESCRIPTION: Descending callback used during the loading of ACPI tables. * Currently used to detect methods that must be marked serialized * in order to avoid problems with the creation of named objects. * ******************************************************************************/ static acpi_status acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, union acpi_parse_object **out_op) { ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes); /* We are only interested in opcodes that create a new name */ if (! (walk_state->op_info-> flags & (AML_NAMED | AML_CREATE | AML_FIELD))) { return (AE_OK); } /* * At this point, we know we have a Named object opcode. * Mark the method as serialized. Later code will create a mutex for * this method to enforce serialization. * * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the * Sync Level mechanism for this method, even though it is now serialized. * Otherwise, there can be conflicts with existing ASL code that actually * uses sync levels. */ walk_state->method_desc->method.sync_level = 0; walk_state->method_desc->method.info_flags |= (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Method serialized [%4.4s] %p - [%s] (%4.4X)\n", walk_state->method_node->name.ascii, walk_state->method_node, walk_state->op_info->name, walk_state->opcode)); /* Abort the parse, no need to examine this method any further */ return (AE_CTRL_TERMINATE); } /******************************************************************************* * * FUNCTION: acpi_ds_method_error * * PARAMETERS: status - Execution status * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Called on method error. Invoke the global exception handler if * present, dump the method data if the debugger is configured * * Note: Allows the exception handler to change the status code * ******************************************************************************/ acpi_status acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) { u32 aml_offset; acpi_name name = 0; ACPI_FUNCTION_ENTRY(); /* Ignore AE_OK and control exception codes */ if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) { return (status); } /* Invoke the global exception handler */ if (acpi_gbl_exception_handler) { /* Exit the interpreter, allow handler to execute methods */ acpi_ex_exit_interpreter(); /* * Handler can map the exception code to anything it wants, including * AE_OK, in which case the executing method will not be aborted. */ aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml, walk_state->parser_state. aml_start); if (walk_state->method_node) { name = walk_state->method_node->name.integer; } else if (walk_state->deferred_node) { name = walk_state->deferred_node->name.integer; } status = acpi_gbl_exception_handler(status, name, walk_state->opcode, aml_offset, NULL); acpi_ex_enter_interpreter(); } acpi_ds_clear_implicit_return(walk_state); if (ACPI_FAILURE(status)) { acpi_ds_dump_method_stack(status, walk_state, walk_state->op); /* Display method locals/args if debugger is present */ #ifdef ACPI_DEBUGGER acpi_db_dump_method_info(status, walk_state); #endif } return (status); } /******************************************************************************* * * FUNCTION: acpi_ds_create_method_mutex * * PARAMETERS: obj_desc - The method object * * RETURN: Status * * DESCRIPTION: Create a mutex object for a serialized control method * ******************************************************************************/ static acpi_status acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) { union acpi_operand_object *mutex_desc; acpi_status status; ACPI_FUNCTION_TRACE(ds_create_method_mutex); /* Create the new mutex object */ mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX); if (!mutex_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Create the actual OS Mutex */ status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex); if (ACPI_FAILURE(status)) { acpi_ut_delete_object_desc(mutex_desc); return_ACPI_STATUS(status); } mutex_desc->mutex.sync_level = method_desc->method.sync_level; method_desc->method.mutex = mutex_desc; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_begin_method_execution * * PARAMETERS: method_node - Node of the method * obj_desc - The method object * walk_state - current state, NULL if not yet executing * a method. * * RETURN: Status * * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, * increments the thread count, and waits at the method semaphore * for clearance to execute. * ******************************************************************************/ acpi_status acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, union acpi_operand_object *obj_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node); if (!method_node) { return_ACPI_STATUS(AE_NULL_ENTRY); } acpi_ex_start_trace_method(method_node, obj_desc, walk_state); /* Prevent wraparound of thread count */ if (obj_desc->method.thread_count == ACPI_UINT8_MAX) { ACPI_ERROR((AE_INFO, "Method reached maximum reentrancy limit (255)")); return_ACPI_STATUS(AE_AML_METHOD_LIMIT); } /* * If this method is serialized, we need to acquire the method mutex. */ if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) { /* * Create a mutex for the method if it is defined to be Serialized * and a mutex has not already been created. We defer the mutex creation * until a method is actually executed, to minimize the object count */ if (!obj_desc->method.mutex) { status = acpi_ds_create_method_mutex(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* * The current_sync_level (per-thread) must be less than or equal to * the sync level of the method. This mechanism provides some * deadlock prevention. * * If the method was auto-serialized, we just ignore the sync level * mechanism, because auto-serialization of methods can interfere * with ASL code that actually uses sync levels. * * Top-level method invocation has no walk state at this point */ if (walk_state && (!(obj_desc->method. info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL)) && (walk_state->thread->current_sync_level > obj_desc->method.mutex->mutex.sync_level)) { ACPI_ERROR((AE_INFO, "Cannot acquire Mutex for method [%4.4s]" ", current SyncLevel is too large (%u)", acpi_ut_get_node_name(method_node), walk_state->thread->current_sync_level)); return_ACPI_STATUS(AE_AML_MUTEX_ORDER); } /* * Obtain the method mutex if necessary. Do not acquire mutex for a * recursive call. */ if (!walk_state || !obj_desc->method.mutex->mutex.thread_id || (walk_state->thread->thread_id != obj_desc->method.mutex->mutex.thread_id)) { /* * Acquire the method mutex. This releases the interpreter if we * block (and reacquires it before it returns) */ status = acpi_ex_system_wait_mutex(obj_desc->method.mutex-> mutex.os_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Update the mutex and walk info and save the original sync_level */ if (walk_state) { obj_desc->method.mutex->mutex. original_sync_level = walk_state->thread->current_sync_level; obj_desc->method.mutex->mutex.thread_id = walk_state->thread->thread_id; /* * Update the current sync_level only if this is not an auto- * serialized method. In the auto case, we have to ignore * the sync level for the method mutex (created for the * auto-serialization) because we have no idea of what the * sync level should be. Therefore, just ignore it. */ if (!(obj_desc->method.info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL)) { walk_state->thread->current_sync_level = obj_desc->method.sync_level; } } else { obj_desc->method.mutex->mutex. original_sync_level = obj_desc->method.mutex->mutex.sync_level; obj_desc->method.mutex->mutex.thread_id = acpi_os_get_thread_id(); } } /* Always increase acquisition depth */ obj_desc->method.mutex->mutex.acquisition_depth++; } /* * Allocate an Owner ID for this method, only if this is the first thread * to begin concurrent execution. We only need one owner_id, even if the * method is invoked recursively. */ if (!obj_desc->method.owner_id) { status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); if (ACPI_FAILURE(status)) { goto cleanup; } } /* * Increment the method parse tree thread count since it has been * reentered one more time (even if it is the same thread) */ obj_desc->method.thread_count++; acpi_method_count++; return_ACPI_STATUS(status); cleanup: /* On error, must release the method mutex (if present) */ if (obj_desc->method.mutex) { acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_call_control_method * * PARAMETERS: thread - Info for this thread * this_walk_state - Current walk state * op - Current Op to be walked * * RETURN: Status * * DESCRIPTION: Transfer execution to a called control method * ******************************************************************************/ acpi_status acpi_ds_call_control_method(struct acpi_thread_state *thread, struct acpi_walk_state *this_walk_state, union acpi_parse_object *op) { acpi_status status; struct acpi_namespace_node *method_node; struct acpi_walk_state *next_walk_state = NULL; union acpi_operand_object *obj_desc; struct acpi_evaluate_info *info; u32 i; ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Calling method %p, currentstate=%p\n", this_walk_state->prev_op, this_walk_state)); /* * Get the namespace entry for the control method we are about to call */ method_node = this_walk_state->method_call_node; if (!method_node) { return_ACPI_STATUS(AE_NULL_ENTRY); } obj_desc = acpi_ns_get_attached_object(method_node); if (!obj_desc) { return_ACPI_STATUS(AE_NULL_OBJECT); } /* Init for new method, possibly wait on method mutex */ status = acpi_ds_begin_method_execution(method_node, obj_desc, this_walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Begin method parse/execution. Create a new walk state */ next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc, thread); if (!next_walk_state) { status = AE_NO_MEMORY; goto cleanup; } /* * The resolved arguments were put on the previous walk state's operand * stack. Operands on the previous walk state stack always * start at index 0. Also, null terminate the list of arguments */ this_walk_state->operands[this_walk_state->num_operands] = NULL; /* * Allocate and initialize the evaluation information block * TBD: this is somewhat inefficient, should change interface to * ds_init_aml_walk. For now, keeps this struct off the CPU stack */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; goto pop_walk_state; } info->parameters = &this_walk_state->operands[0]; status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, obj_desc->method.aml_start, obj_desc->method.aml_length, info, ACPI_IMODE_EXECUTE); ACPI_FREE(info); if (ACPI_FAILURE(status)) { goto pop_walk_state; } next_walk_state->method_nesting_depth = this_walk_state->method_nesting_depth + 1; /* * Delete the operands on the previous walkstate operand stack * (they were copied to new objects) */ for (i = 0; i < obj_desc->method.param_count; i++) { acpi_ut_remove_reference(this_walk_state->operands[i]); this_walk_state->operands[i] = NULL; } /* Clear the operand stack */ this_walk_state->num_operands = 0; ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Begin nested execution of [%4.4s] **** WalkState=%p\n", method_node->name.ascii, next_walk_state)); this_walk_state->method_pathname = acpi_ns_get_normalized_pathname(method_node, TRUE); this_walk_state->method_is_nested = TRUE; /* Optional object evaluation log */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION, "%-26s: %*s%s\n", " Nested method call", next_walk_state->method_nesting_depth * 3, " ", &this_walk_state->method_pathname[1])); /* Invoke an internal method if necessary */ if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) { status = obj_desc->method.dispatch.implementation(next_walk_state); if (status == AE_OK) { status = AE_CTRL_TERMINATE; } } return_ACPI_STATUS(status); pop_walk_state: /* On error, pop the walk state to be deleted from thread */ acpi_ds_pop_walk_state(thread); cleanup: /* On error, we must terminate the method properly */ acpi_ds_terminate_control_method(obj_desc, next_walk_state); acpi_ds_delete_walk_state(next_walk_state); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_restart_control_method * * PARAMETERS: walk_state - State for preempted method (caller) * return_desc - Return value from the called method * * RETURN: Status * * DESCRIPTION: Restart a method that was preempted by another (nested) method * invocation. Handle the return value (if any) from the callee. * ******************************************************************************/ acpi_status acpi_ds_restart_control_method(struct acpi_walk_state *walk_state, union acpi_operand_object *return_desc) { acpi_status status; int same_as_implicit_return; ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n", acpi_ut_get_node_name(walk_state->method_node), walk_state->method_call_op, return_desc)); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n", walk_state->return_used, walk_state->results, walk_state)); /* Did the called method return a value? */ if (return_desc) { /* Is the implicit return object the same as the return desc? */ same_as_implicit_return = (walk_state->implicit_return_obj == return_desc); /* Are we actually going to use the return value? */ if (walk_state->return_used) { /* Save the return value from the previous method */ status = acpi_ds_result_push(return_desc, walk_state); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(return_desc); return_ACPI_STATUS(status); } /* * Save as THIS method's return value in case it is returned * immediately to yet another method */ walk_state->return_desc = return_desc; } /* * The following code is the optional support for the so-called * "implicit return". Some AML code assumes that the last value of the * method is "implicitly" returned to the caller, in the absence of an * explicit return value. * * Just save the last result of the method as the return value. * * NOTE: this is optional because the ASL language does not actually * support this behavior. */ else if (!acpi_ds_do_implicit_return (return_desc, walk_state, FALSE) || same_as_implicit_return) { /* * Delete the return value if it will not be used by the * calling method or remove one reference if the explicit return * is the same as the implicit return value. */ acpi_ut_remove_reference(return_desc); } } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_terminate_control_method * * PARAMETERS: method_desc - Method object * walk_state - State associated with the method * * RETURN: None * * DESCRIPTION: Terminate a control method. Delete everything that the method * created, delete all locals and arguments, and delete the parse * tree if requested. * * MUTEX: Interpreter is locked * ******************************************************************************/ void acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, struct acpi_walk_state *walk_state) { ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state); /* method_desc is required, walk_state is optional */ if (!method_desc) { return_VOID; } if (walk_state) { /* Delete all arguments and locals */ acpi_ds_method_data_delete_all(walk_state); /* * Delete any namespace objects created anywhere within the * namespace by the execution of this method. Unless: * 1) This method is a module-level executable code method, in which * case we want make the objects permanent. * 2) There are other threads executing the method, in which case we * will wait until the last thread has completed. */ if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) && (method_desc->method.thread_count == 1)) { /* Delete any direct children of (created by) this method */ (void)acpi_ex_exit_interpreter(); acpi_ns_delete_namespace_subtree(walk_state-> method_node); (void)acpi_ex_enter_interpreter(); /* * Delete any objects that were created by this method * elsewhere in the namespace (if any were created). * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the * deletion such that we don't have to perform an entire * namespace walk for every control method execution. */ if (method_desc->method. info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) { (void)acpi_ex_exit_interpreter(); acpi_ns_delete_namespace_by_owner(method_desc-> method. owner_id); (void)acpi_ex_enter_interpreter(); method_desc->method.info_flags &= ~ACPI_METHOD_MODIFIED_NAMESPACE; } } /* * If method is serialized, release the mutex and restore the * current sync level for this thread */ if (method_desc->method.mutex) { /* Acquisition Depth handles recursive calls */ method_desc->method.mutex->mutex.acquisition_depth--; if (!method_desc->method.mutex->mutex.acquisition_depth) { walk_state->thread->current_sync_level = method_desc->method.mutex->mutex. original_sync_level; acpi_os_release_mutex(method_desc->method. mutex->mutex.os_mutex); method_desc->method.mutex->mutex.thread_id = 0; } } } /* Decrement the thread count on the method */ if (method_desc->method.thread_count) { method_desc->method.thread_count--; } else { ACPI_ERROR((AE_INFO, "Invalid zero thread count in method")); } /* Are there any other threads currently executing this method? */ if (method_desc->method.thread_count) { /* * Additional threads. Do not release the owner_id in this case, * we immediately reuse it for the next thread executing this method */ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "*** Completed execution of one thread, %u threads remaining\n", method_desc->method.thread_count)); } else { /* This is the only executing thread for this method */ /* * Support to dynamically change a method from not_serialized to * Serialized if it appears that the method is incorrectly written and * does not support multiple thread execution. The best example of this * is if such a method creates namespace objects and blocks. A second * thread will fail with an AE_ALREADY_EXISTS exception. * * This code is here because we must wait until the last thread exits * before marking the method as serialized. */ if (method_desc->method. info_flags & ACPI_METHOD_SERIALIZED_PENDING) { if (walk_state) { ACPI_INFO(("Marking method %4.4s as Serialized " "because of AE_ALREADY_EXISTS error", walk_state->method_node->name. ascii)); } /* * Method tried to create an object twice and was marked as * "pending serialized". The probable cause is that the method * cannot handle reentrancy. * * The method was created as not_serialized, but it tried to create * a named object and then blocked, causing the second thread * entrance to begin and then fail. Workaround this problem by * marking the method permanently as Serialized when the last * thread exits here. */ method_desc->method.info_flags &= ~ACPI_METHOD_SERIALIZED_PENDING; method_desc->method.info_flags |= (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL); method_desc->method.sync_level = 0; } /* No more threads, we can free the owner_id */ if (! (method_desc->method. info_flags & ACPI_METHOD_MODULE_LEVEL)) { acpi_ut_release_owner_id(&method_desc->method.owner_id); } } acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc-> method.node, method_desc, walk_state); return_VOID; }
linux-master
drivers/acpi/acpica/dsmethod.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: evsci - System Control Interrupt configuration and * legacy to ACPI mode state transition functions * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evsci") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* Local prototypes */ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context); /******************************************************************************* * * FUNCTION: acpi_ev_sci_dispatch * * PARAMETERS: None * * RETURN: Status code indicates whether interrupt was handled. * * DESCRIPTION: Dispatch the SCI to all host-installed SCI handlers. * ******************************************************************************/ u32 acpi_ev_sci_dispatch(void) { struct acpi_sci_handler_info *sci_handler; acpi_cpu_flags flags; u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; ACPI_FUNCTION_NAME(ev_sci_dispatch); /* Are there any host-installed SCI handlers? */ if (!acpi_gbl_sci_handler_list) { return (int_status); } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Invoke all host-installed SCI handlers */ sci_handler = acpi_gbl_sci_handler_list; while (sci_handler) { /* Invoke the installed handler (at interrupt level) */ int_status |= sci_handler->address(sci_handler->context); sci_handler = sci_handler->next; } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return (int_status); } /******************************************************************************* * * FUNCTION: acpi_ev_sci_xrupt_handler * * PARAMETERS: context - Calling Context * * RETURN: Status code indicates whether interrupt was handled. * * DESCRIPTION: Interrupt handler that will figure out what function or * control method to call to deal with a SCI. * ******************************************************************************/ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context) { struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED; ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler); /* * We are guaranteed by the ACPICA initialization/shutdown code that * if this interrupt handler is installed, ACPI is enabled. */ /* * Fixed Events: * Check for and dispatch any Fixed Events that have occurred */ interrupt_handled |= acpi_ev_fixed_event_detect(); /* * General Purpose Events: * Check for and dispatch any GPEs that have occurred */ interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); /* Invoke all host-installed SCI handlers */ interrupt_handled |= acpi_ev_sci_dispatch(); acpi_sci_count++; return_UINT32(interrupt_handled); } /******************************************************************************* * * FUNCTION: acpi_ev_gpe_xrupt_handler * * PARAMETERS: context - Calling Context * * RETURN: Status code indicates whether interrupt was handled. * * DESCRIPTION: Handler for GPE Block Device interrupts * ******************************************************************************/ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context) { struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED; ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler); /* * We are guaranteed by the ACPICA initialization/shutdown code that * if this interrupt handler is installed, ACPI is enabled. */ /* GPEs: Check for and dispatch any GPEs that have occurred */ interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list); return_UINT32(interrupt_handled); } /****************************************************************************** * * FUNCTION: acpi_ev_install_sci_handler * * PARAMETERS: none * * RETURN: Status * * DESCRIPTION: Installs SCI handler. * ******************************************************************************/ u32 acpi_ev_install_sci_handler(void) { u32 status = AE_OK; ACPI_FUNCTION_TRACE(ev_install_sci_handler); status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt, acpi_ev_sci_xrupt_handler, acpi_gbl_gpe_xrupt_list_head); return_ACPI_STATUS(status); } /****************************************************************************** * * FUNCTION: acpi_ev_remove_all_sci_handlers * * PARAMETERS: none * * RETURN: AE_OK if handler uninstalled, AE_ERROR if handler was not * installed to begin with * * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be * taken. Remove all host-installed SCI handlers. * * Note: It doesn't seem important to disable all events or set the event * enable registers to their original values. The OS should disable * the SCI interrupt level when the handler is removed, so no more * events will come in. * ******************************************************************************/ acpi_status acpi_ev_remove_all_sci_handlers(void) { struct acpi_sci_handler_info *sci_handler; acpi_cpu_flags flags; acpi_status status; ACPI_FUNCTION_TRACE(ev_remove_all_sci_handlers); /* Just let the OS remove the handler and disable the level */ status = acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt, acpi_ev_sci_xrupt_handler); if (!acpi_gbl_sci_handler_list) { return (status); } flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Free all host-installed SCI handlers */ while (acpi_gbl_sci_handler_list) { sci_handler = acpi_gbl_sci_handler_list; acpi_gbl_sci_handler_list = sci_handler->next; ACPI_FREE(sci_handler); } acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return_ACPI_STATUS(status); } #endif /* !ACPI_REDUCED_HARDWARE */
linux-master
drivers/acpi/acpica/evsci.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exdebug - Support for stores to the AML Debug Object * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exdebug") #ifndef ACPI_NO_ERROR_MESSAGES /******************************************************************************* * * FUNCTION: acpi_ex_do_debug_object * * PARAMETERS: source_desc - Object to be output to "Debug Object" * level - Indentation level (used for packages) * index - Current package element, zero if not pkg * * RETURN: None * * DESCRIPTION: Handles stores to the AML Debug Object. For example: * Store(INT1, Debug) * * This function is not compiled if ACPI_NO_ERROR_MESSAGES is set. * * This function is only enabled if acpi_gbl_enable_aml_debug_object is set, or * if ACPI_LV_DEBUG_OBJECT is set in the acpi_dbg_level. Thus, in the normal * operational case, stores to the debug object are ignored but can be easily * enabled if necessary. * ******************************************************************************/ void acpi_ex_do_debug_object(union acpi_operand_object *source_desc, u32 level, u32 index) { u32 i; u32 timer; union acpi_operand_object *object_desc; u32 value; ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc); /* Output must be enabled via the debug_object global or the dbg_level */ if (!acpi_gbl_enable_aml_debug_object && !(acpi_dbg_level & ACPI_LV_DEBUG_OBJECT)) { return_VOID; } /* Newline -- don't emit the line header */ if (source_desc && (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) && (source_desc->common.type == ACPI_TYPE_STRING)) { if ((source_desc->string.length == 1) && (*source_desc->string.pointer == '\n')) { acpi_os_printf("\n"); return_VOID; } } /* * Print line header as long as we are not in the middle of an * object display */ if (!((level > 0) && index == 0)) { if (acpi_gbl_display_debug_timer) { /* * We will emit the current timer value (in microseconds) with each * debug output. Only need the lower 26 bits. This allows for 67 * million microseconds or 67 seconds before rollover. * * Convert 100 nanosecond units to microseconds */ timer = ((u32)acpi_os_get_timer() / 10); timer &= 0x03FFFFFF; acpi_os_printf("ACPI Debug: T=0x%8.8X %*s", timer, level, " "); } else { acpi_os_printf("ACPI Debug: %*s", level, " "); } } /* Display the index for package output only */ if (index > 0) { acpi_os_printf("(%.2u) ", index - 1); } if (!source_desc) { acpi_os_printf("[Null Object]\n"); return_VOID; } if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) { /* No object type prefix needed for integers and strings */ if ((source_desc->common.type != ACPI_TYPE_INTEGER) && (source_desc->common.type != ACPI_TYPE_STRING)) { acpi_os_printf("%s ", acpi_ut_get_object_type_name (source_desc)); } if (!acpi_ut_valid_internal_object(source_desc)) { acpi_os_printf("%p, Invalid Internal Object!\n", source_desc); return_VOID; } } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) { acpi_os_printf("%s (Node %p)\n", acpi_ut_get_type_name(((struct acpi_namespace_node *) source_desc)->type), source_desc); return_VOID; } else { return_VOID; } /* source_desc is of type ACPI_DESC_TYPE_OPERAND */ switch (source_desc->common.type) { case ACPI_TYPE_INTEGER: /* Output correct integer width */ if (acpi_gbl_integer_byte_width == 4) { acpi_os_printf("0x%8.8X\n", (u32)source_desc->integer.value); } else { acpi_os_printf("0x%8.8X%8.8X\n", ACPI_FORMAT_UINT64(source_desc->integer. value)); } break; case ACPI_TYPE_BUFFER: acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length); acpi_ut_dump_buffer(source_desc->buffer.pointer, (source_desc->buffer.length < 256) ? source_desc->buffer.length : 256, DB_BYTE_DISPLAY, 0); break; case ACPI_TYPE_STRING: acpi_os_printf("\"%s\"\n", source_desc->string.pointer); break; case ACPI_TYPE_PACKAGE: acpi_os_printf("(Contains 0x%.2X Elements):\n", source_desc->package.count); /* Output the entire contents of the package */ for (i = 0; i < source_desc->package.count; i++) { acpi_ex_do_debug_object(source_desc->package. elements[i], level + 4, i + 1); } break; case ACPI_TYPE_LOCAL_REFERENCE: acpi_os_printf("[%s] ", acpi_ut_get_reference_name(source_desc)); /* Decode the reference */ switch (source_desc->reference.class) { case ACPI_REFCLASS_INDEX: acpi_os_printf("0x%X\n", source_desc->reference.value); break; case ACPI_REFCLASS_TABLE: /* Case for ddb_handle */ acpi_os_printf("Table Index 0x%X\n", source_desc->reference.value); return_VOID; default: break; } acpi_os_printf(" "); /* Check for valid node first, then valid object */ if (source_desc->reference.node) { if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.node) != ACPI_DESC_TYPE_NAMED) { acpi_os_printf (" %p - Not a valid namespace node\n", source_desc->reference.node); } else { acpi_os_printf("Node %p [%4.4s] ", source_desc->reference.node, (source_desc->reference.node)-> name.ascii); switch ((source_desc->reference.node)->type) { /* These types have no attached object */ case ACPI_TYPE_DEVICE: acpi_os_printf("Device\n"); break; case ACPI_TYPE_THERMAL: acpi_os_printf("Thermal Zone\n"); break; default: acpi_ex_do_debug_object((source_desc-> reference. node)->object, level + 4, 0); break; } } } else if (source_desc->reference.object) { if (ACPI_GET_DESCRIPTOR_TYPE (source_desc->reference.object) == ACPI_DESC_TYPE_NAMED) { /* Reference object is a namespace node */ acpi_ex_do_debug_object(ACPI_CAST_PTR (union acpi_operand_object, source_desc->reference. object), level + 4, 0); } else { object_desc = source_desc->reference.object; value = source_desc->reference.value; switch (object_desc->common.type) { case ACPI_TYPE_BUFFER: acpi_os_printf("Buffer[%u] = 0x%2.2X\n", value, *source_desc->reference. index_pointer); break; case ACPI_TYPE_STRING: acpi_os_printf ("String[%u] = \"%c\" (0x%2.2X)\n", value, *source_desc->reference. index_pointer, *source_desc->reference. index_pointer); break; case ACPI_TYPE_PACKAGE: acpi_os_printf("Package[%u] = ", value); if (!(*source_desc->reference.where)) { acpi_os_printf ("[Uninitialized Package Element]\n"); } else { acpi_ex_do_debug_object (*source_desc->reference. where, level + 4, 0); } break; default: acpi_os_printf ("Unknown Reference object type %X\n", object_desc->common.type); break; } } } break; default: acpi_os_printf("(Descriptor %p)\n", source_desc); break; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n")); return_VOID; } #endif
linux-master
drivers/acpi/acpica/exdebug.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: utstrsuppt - Support functions for string-to-integer conversion * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utstrsuppt") /* Local prototypes */ static acpi_status acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit); static acpi_status acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product); static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum); /******************************************************************************* * * FUNCTION: acpi_ut_convert_octal_string * * PARAMETERS: string - Null terminated input string * return_value_ptr - Where the converted value is returned * * RETURN: Status and 64-bit converted integer * * DESCRIPTION: Performs a base 8 conversion of the input string to an * integer value, either 32 or 64 bits. * * NOTE: Maximum 64-bit unsigned octal value is 01777777777777777777777 * Maximum 32-bit unsigned octal value is 037777777777 * ******************************************************************************/ acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value_ptr) { u64 accumulated_value = 0; acpi_status status = AE_OK; /* Convert each ASCII byte in the input string */ while (*string) { /* * Character must be ASCII 0-7, otherwise: * 1) Runtime: terminate with no error, per the ACPI spec * 2) Compiler: return an error */ if (!(ACPI_IS_OCTAL_DIGIT(*string))) { #ifdef ACPI_ASL_COMPILER status = AE_BAD_OCTAL_CONSTANT; #endif break; } /* Convert and insert this octal digit into the accumulator */ status = acpi_ut_insert_digit(&accumulated_value, 8, *string); if (ACPI_FAILURE(status)) { status = AE_OCTAL_OVERFLOW; break; } string++; } /* Always return the value that has been accumulated */ *return_value_ptr = accumulated_value; return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_convert_decimal_string * * PARAMETERS: string - Null terminated input string * return_value_ptr - Where the converted value is returned * * RETURN: Status and 64-bit converted integer * * DESCRIPTION: Performs a base 10 conversion of the input string to an * integer value, either 32 or 64 bits. * * NOTE: Maximum 64-bit unsigned decimal value is 18446744073709551615 * Maximum 32-bit unsigned decimal value is 4294967295 * ******************************************************************************/ acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr) { u64 accumulated_value = 0; acpi_status status = AE_OK; /* Convert each ASCII byte in the input string */ while (*string) { /* * Character must be ASCII 0-9, otherwise: * 1) Runtime: terminate with no error, per the ACPI spec * 2) Compiler: return an error */ if (!isdigit((int)*string)) { #ifdef ACPI_ASL_COMPILER status = AE_BAD_DECIMAL_CONSTANT; #endif break; } /* Convert and insert this decimal digit into the accumulator */ status = acpi_ut_insert_digit(&accumulated_value, 10, *string); if (ACPI_FAILURE(status)) { status = AE_DECIMAL_OVERFLOW; break; } string++; } /* Always return the value that has been accumulated */ *return_value_ptr = accumulated_value; return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_convert_hex_string * * PARAMETERS: string - Null terminated input string * return_value_ptr - Where the converted value is returned * * RETURN: Status and 64-bit converted integer * * DESCRIPTION: Performs a base 16 conversion of the input string to an * integer value, either 32 or 64 bits. * * NOTE: Maximum 64-bit unsigned hex value is 0xFFFFFFFFFFFFFFFF * Maximum 32-bit unsigned hex value is 0xFFFFFFFF * ******************************************************************************/ acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr) { u64 accumulated_value = 0; acpi_status status = AE_OK; /* Convert each ASCII byte in the input string */ while (*string) { /* * Character must be ASCII A-F, a-f, or 0-9, otherwise: * 1) Runtime: terminate with no error, per the ACPI spec * 2) Compiler: return an error */ if (!isxdigit((int)*string)) { #ifdef ACPI_ASL_COMPILER status = AE_BAD_HEX_CONSTANT; #endif break; } /* Convert and insert this hex digit into the accumulator */ status = acpi_ut_insert_digit(&accumulated_value, 16, *string); if (ACPI_FAILURE(status)) { status = AE_HEX_OVERFLOW; break; } string++; } /* Always return the value that has been accumulated */ *return_value_ptr = accumulated_value; return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_remove_leading_zeros * * PARAMETERS: string - Pointer to input ASCII string * * RETURN: Next character after any leading zeros. This character may be * used by the caller to detect end-of-string. * * DESCRIPTION: Remove any leading zeros in the input string. Return the * next character after the final ASCII zero to enable the caller * to check for the end of the string (NULL terminator). * ******************************************************************************/ char acpi_ut_remove_leading_zeros(char **string) { while (**string == ACPI_ASCII_ZERO) { *string += 1; } return (**string); } /******************************************************************************* * * FUNCTION: acpi_ut_remove_whitespace * * PARAMETERS: string - Pointer to input ASCII string * * RETURN: Next character after any whitespace. This character may be * used by the caller to detect end-of-string. * * DESCRIPTION: Remove any leading whitespace in the input string. Return the * next character after the final ASCII zero to enable the caller * to check for the end of the string (NULL terminator). * ******************************************************************************/ char acpi_ut_remove_whitespace(char **string) { while (isspace((u8)**string)) { *string += 1; } return (**string); } /******************************************************************************* * * FUNCTION: acpi_ut_detect_hex_prefix * * PARAMETERS: string - Pointer to input ASCII string * * RETURN: TRUE if a "0x" prefix was found at the start of the string * * DESCRIPTION: Detect and remove a hex "0x" prefix * ******************************************************************************/ u8 acpi_ut_detect_hex_prefix(char **string) { char *initial_position = *string; acpi_ut_remove_hex_prefix(string); if (*string != initial_position) { return (TRUE); /* String is past leading 0x */ } return (FALSE); /* Not a hex string */ } /******************************************************************************* * * FUNCTION: acpi_ut_remove_hex_prefix * * PARAMETERS: string - Pointer to input ASCII string * * RETURN: none * * DESCRIPTION: Remove a hex "0x" prefix * ******************************************************************************/ void acpi_ut_remove_hex_prefix(char **string) { if ((**string == ACPI_ASCII_ZERO) && (tolower((int)*(*string + 1)) == 'x')) { *string += 2; /* Go past the leading 0x */ } } /******************************************************************************* * * FUNCTION: acpi_ut_detect_octal_prefix * * PARAMETERS: string - Pointer to input ASCII string * * RETURN: True if an octal "0" prefix was found at the start of the * string * * DESCRIPTION: Detect and remove an octal prefix (zero) * ******************************************************************************/ u8 acpi_ut_detect_octal_prefix(char **string) { if (**string == ACPI_ASCII_ZERO) { *string += 1; /* Go past the leading 0 */ return (TRUE); } return (FALSE); /* Not an octal string */ } /******************************************************************************* * * FUNCTION: acpi_ut_insert_digit * * PARAMETERS: accumulated_value - Current value of the integer value * accumulator. The new value is * returned here. * base - Radix, either 8/10/16 * ascii_digit - ASCII single digit to be inserted * * RETURN: Status and result of the convert/insert operation. The only * possible returned exception code is numeric overflow of * either the multiply or add conversion operations. * * DESCRIPTION: Generic conversion and insertion function for all bases: * * 1) Multiply the current accumulated/converted value by the * base in order to make room for the new character. * * 2) Convert the new character to binary and add it to the * current accumulated value. * * Note: The only possible exception indicates an integer * overflow (AE_NUMERIC_OVERFLOW) * ******************************************************************************/ static acpi_status acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit) { acpi_status status; u64 product; /* Make room in the accumulated value for the incoming digit */ status = acpi_ut_strtoul_multiply64(*accumulated_value, base, &product); if (ACPI_FAILURE(status)) { return (status); } /* Add in the new digit, and store the sum to the accumulated value */ status = acpi_ut_strtoul_add64(product, acpi_ut_ascii_char_to_hex(ascii_digit), accumulated_value); return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_strtoul_multiply64 * * PARAMETERS: multiplicand - Current accumulated converted integer * base - Base/Radix * out_product - Where the product is returned * * RETURN: Status and 64-bit product * * DESCRIPTION: Multiply two 64-bit values, with checking for 64-bit overflow as * well as 32-bit overflow if necessary (if the current global * integer width is 32). * ******************************************************************************/ static acpi_status acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product) { u64 product; u64 quotient; /* Exit if either operand is zero */ *out_product = 0; if (!multiplicand || !base) { return (AE_OK); } /* * Check for 64-bit overflow before the actual multiplication. * * Notes: 64-bit division is often not supported on 32-bit platforms * (it requires a library function), Therefore ACPICA has a local * 64-bit divide function. Also, Multiplier is currently only used * as the radix (8/10/16), to the 64/32 divide will always work. */ acpi_ut_short_divide(ACPI_UINT64_MAX, base, &quotient, NULL); if (multiplicand > quotient) { return (AE_NUMERIC_OVERFLOW); } product = multiplicand * base; /* Check for 32-bit overflow if necessary */ if ((acpi_gbl_integer_bit_width == 32) && (product > ACPI_UINT32_MAX)) { return (AE_NUMERIC_OVERFLOW); } *out_product = product; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_strtoul_add64 * * PARAMETERS: addend1 - Current accumulated converted integer * digit - New hex value/char * out_sum - Where sum is returned (Accumulator) * * RETURN: Status and 64-bit sum * * DESCRIPTION: Add two 64-bit values, with checking for 64-bit overflow as * well as 32-bit overflow if necessary (if the current global * integer width is 32). * ******************************************************************************/ static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum) { u64 sum; /* Check for 64-bit overflow before the actual addition */ if ((addend1 > 0) && (digit > (ACPI_UINT64_MAX - addend1))) { return (AE_NUMERIC_OVERFLOW); } sum = addend1 + digit; /* Check for 32-bit overflow if necessary */ if ((acpi_gbl_integer_bit_width == 32) && (sum > ACPI_UINT32_MAX)) { return (AE_NUMERIC_OVERFLOW); } *out_sum = sum; return (AE_OK); }
linux-master
drivers/acpi/acpica/utstrsuppt.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: dbinput - user front-end to the AML debugger * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acdebug.h" #ifdef ACPI_APPLICATION #include "acapps.h" #endif #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbinput") /* Local prototypes */ static u32 acpi_db_get_line(char *input_buffer); static u32 acpi_db_match_command(char *user_command); static void acpi_db_display_command_info(const char *command, u8 display_all); static void acpi_db_display_help(char *command); static u8 acpi_db_match_command_help(const char *command, const struct acpi_db_command_help *help); /* * Top-level debugger commands. * * This list of commands must match the string table below it */ enum acpi_ex_debugger_commands { CMD_NOT_FOUND = 0, CMD_NULL, CMD_ALL, CMD_ALLOCATIONS, CMD_ARGS, CMD_ARGUMENTS, CMD_BREAKPOINT, CMD_BUSINFO, CMD_CALL, CMD_DEBUG, CMD_DISASSEMBLE, CMD_DISASM, CMD_DUMP, CMD_EVALUATE, CMD_EXECUTE, CMD_EXIT, CMD_FIELDS, CMD_FIND, CMD_GO, CMD_HANDLERS, CMD_HELP, CMD_HELP2, CMD_HISTORY, CMD_HISTORY_EXE, CMD_HISTORY_LAST, CMD_INFORMATION, CMD_INTEGRITY, CMD_INTO, CMD_LEVEL, CMD_LIST, CMD_LOCALS, CMD_LOCKS, CMD_METHODS, CMD_NAMESPACE, CMD_NOTIFY, CMD_OBJECTS, CMD_OSI, CMD_OWNER, CMD_PATHS, CMD_PREDEFINED, CMD_PREFIX, CMD_QUIT, CMD_REFERENCES, CMD_RESOURCES, CMD_RESULTS, CMD_SET, CMD_STATS, CMD_STOP, CMD_TABLES, CMD_TEMPLATE, CMD_TRACE, CMD_TREE, CMD_TYPE, #ifdef ACPI_APPLICATION CMD_ENABLEACPI, CMD_EVENT, CMD_GPE, CMD_GPES, CMD_SCI, CMD_SLEEP, CMD_CLOSE, CMD_LOAD, CMD_OPEN, CMD_UNLOAD, CMD_TERMINATE, CMD_BACKGROUND, CMD_THREADS, CMD_TEST, CMD_INTERRUPT, #endif }; #define CMD_FIRST_VALID 2 /* Second parameter is the required argument count */ static const struct acpi_db_command_info acpi_gbl_db_commands[] = { {"<NOT FOUND>", 0}, {"<NULL>", 0}, {"ALL", 1}, {"ALLOCATIONS", 0}, {"ARGS", 0}, {"ARGUMENTS", 0}, {"BREAKPOINT", 1}, {"BUSINFO", 0}, {"CALL", 0}, {"DEBUG", 1}, {"DISASSEMBLE", 1}, {"DISASM", 1}, {"DUMP", 1}, {"EVALUATE", 1}, {"EXECUTE", 1}, {"EXIT", 0}, {"FIELDS", 1}, {"FIND", 1}, {"GO", 0}, {"HANDLERS", 0}, {"HELP", 0}, {"?", 0}, {"HISTORY", 0}, {"!", 1}, {"!!", 0}, {"INFORMATION", 0}, {"INTEGRITY", 0}, {"INTO", 0}, {"LEVEL", 0}, {"LIST", 0}, {"LOCALS", 0}, {"LOCKS", 0}, {"METHODS", 0}, {"NAMESPACE", 0}, {"NOTIFY", 2}, {"OBJECTS", 0}, {"OSI", 0}, {"OWNER", 1}, {"PATHS", 0}, {"PREDEFINED", 0}, {"PREFIX", 0}, {"QUIT", 0}, {"REFERENCES", 1}, {"RESOURCES", 0}, {"RESULTS", 0}, {"SET", 3}, {"STATS", 1}, {"STOP", 0}, {"TABLES", 0}, {"TEMPLATE", 1}, {"TRACE", 1}, {"TREE", 0}, {"TYPE", 1}, #ifdef ACPI_APPLICATION {"ENABLEACPI", 0}, {"EVENT", 1}, {"GPE", 1}, {"GPES", 0}, {"SCI", 0}, {"SLEEP", 0}, {"CLOSE", 0}, {"LOAD", 1}, {"OPEN", 1}, {"UNLOAD", 1}, {"TERMINATE", 0}, {"BACKGROUND", 1}, {"THREADS", 3}, {"TEST", 1}, {"INTERRUPT", 1}, #endif {NULL, 0} }; /* * Help for all debugger commands. First argument is the number of lines * of help to output for the command. * * Note: Some commands are not supported by the kernel-level version of * the debugger. */ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = { {0, "\nNamespace Access:", "\n"}, {1, " Businfo", "Display system bus info\n"}, {1, " Disassemble <Method>", "Disassemble a control method\n"}, {1, " Find <AcpiName> (? is wildcard)", "Find ACPI name(s) with wildcards\n"}, {1, " Integrity", "Validate namespace integrity\n"}, {1, " Methods", "Display list of loaded control methods\n"}, {1, " Fields <AddressSpaceId>", "Display list of loaded field units by space ID\n"}, {1, " Namespace [Object] [Depth]", "Display loaded namespace tree/subtree\n"}, {1, " Notify <Object> <Value>", "Send a notification on Object\n"}, {1, " Objects [ObjectType]", "Display summary of all objects or just given type\n"}, {1, " Owner <OwnerId> [Depth]", "Display loaded namespace by object owner\n"}, {1, " Paths", "Display full pathnames of namespace objects\n"}, {1, " Predefined", "Check all predefined names\n"}, {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"}, {1, " References <Addr>", "Find all references to object at addr\n"}, {1, " Resources [DeviceName]", "Display Device resources (no arg = all devices)\n"}, {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"}, {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"}, {1, " Type <Object>", "Display object type\n"}, {0, "\nControl Method Execution:", "\n"}, {1, " All <NameSeg>", "Evaluate all objects named NameSeg\n"}, {1, " Evaluate <Namepath> [Arguments]", "Evaluate object or control method\n"}, {1, " Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"}, #ifdef ACPI_APPLICATION {1, " Background <Namepath> [Arguments]", "Evaluate object/method in a separate thread\n"}, {1, " Thread <Threads><Loops><NamePath>", "Spawn threads to execute method(s)\n"}, #endif {1, " Debug <Namepath> [Arguments]", "Single-Step a control method\n"}, {7, " [Arguments] formats:", "Control method argument formats\n"}, {1, " Hex Integer", "Integer\n"}, {1, " \"Ascii String\"", "String\n"}, {1, " (Hex Byte List)", "Buffer\n"}, {1, " (01 42 7A BF)", "Buffer example (4 bytes)\n"}, {1, " [Package Element List]", "Package\n"}, {1, " [0x01 0x1234 \"string\"]", "Package example (3 elements)\n"}, {0, "\nMiscellaneous:", "\n"}, {1, " Allocations", "Display list of current memory allocations\n"}, {2, " Dump <Address>|<Namepath>", "\n"}, {0, " [Byte|Word|Dword|Qword]", "Display ACPI objects or memory\n"}, {1, " Handlers", "Info about global handlers\n"}, {1, " Help [Command]", "This help screen or individual command\n"}, {1, " History", "Display command history buffer\n"}, {1, " Level <DebugLevel>] [console]", "Get/Set debug level for file or console\n"}, {1, " Locks", "Current status of internal mutexes\n"}, {1, " Osi [Install|Remove <name>]", "Display or modify global _OSI list\n"}, {1, " Quit or Exit", "Exit this command\n"}, {8, " Stats <SubCommand>", "Display namespace and memory statistics\n"}, {1, " Allocations", "Display list of current memory allocations\n"}, {1, " Memory", "Dump internal memory lists\n"}, {1, " Misc", "Namespace search and mutex stats\n"}, {1, " Objects", "Summary of namespace objects\n"}, {1, " Sizes", "Sizes for each of the internal objects\n"}, {1, " Stack", "Display CPU stack usage\n"}, {1, " Tables", "Info about current ACPI table(s)\n"}, {1, " Tables", "Display info about loaded ACPI tables\n"}, #ifdef ACPI_APPLICATION {1, " Terminate", "Delete namespace and all internal objects\n"}, #endif {1, " ! <CommandNumber>", "Execute command from history buffer\n"}, {1, " !!", "Execute last command again\n"}, {0, "\nMethod and Namespace Debugging:", "\n"}, {5, " Trace <State> [<Namepath>] [Once]", "Trace control method execution\n"}, {1, " Enable", "Enable all messages\n"}, {1, " Disable", "Disable tracing\n"}, {1, " Method", "Enable method execution messages\n"}, {1, " Opcode", "Enable opcode execution messages\n"}, {3, " Test <TestName>", "Invoke a debug test\n"}, {1, " Objects", "Read/write/compare all namespace data objects\n"}, {1, " Predefined", "Validate all ACPI predefined names (_STA, etc.)\n"}, {1, " Execute predefined", "Execute all predefined (public) methods\n"}, {0, "\nControl Method Single-Step Execution:", "\n"}, {1, " Arguments (or Args)", "Display method arguments\n"}, {1, " Breakpoint <AmlOffset>", "Set an AML execution breakpoint\n"}, {1, " Call", "Run to next control method invocation\n"}, {1, " Go", "Allow method to run to completion\n"}, {1, " Information", "Display info about the current method\n"}, {1, " Into", "Step into (not over) a method call\n"}, {1, " List [# of Aml Opcodes]", "Display method ASL statements\n"}, {1, " Locals", "Display method local variables\n"}, {1, " Results", "Display method result stack\n"}, {1, " Set <A|L> <#> <Value>", "Set method data (Arguments/Locals)\n"}, {1, " Stop", "Terminate control method\n"}, {1, " Tree", "Display control method calling tree\n"}, {1, " <Enter>", "Single step next AML opcode (over calls)\n"}, #ifdef ACPI_APPLICATION {0, "\nFile Operations:", "\n"}, {1, " Close", "Close debug output file\n"}, {1, " Load <Input Filename>", "Load ACPI table from a file\n"}, {1, " Open <Output Filename>", "Open a file for debug output\n"}, {1, " Unload <Namepath>", "Unload an ACPI table via namespace object\n"}, {0, "\nHardware Simulation:", "\n"}, {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"}, {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"}, {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"}, {1, " Gpes", "Display info on all GPE devices\n"}, {1, " Sci", "Generate an SCI\n"}, {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"}, {1, " Interrupt <GSIV>", "Simulate an interrupt\n"}, #endif {0, NULL, NULL} }; /******************************************************************************* * * FUNCTION: acpi_db_match_command_help * * PARAMETERS: command - Command string to match * help - Help table entry to attempt match * * RETURN: TRUE if command matched, FALSE otherwise * * DESCRIPTION: Attempt to match a command in the help table in order to * print help information for a single command. * ******************************************************************************/ static u8 acpi_db_match_command_help(const char *command, const struct acpi_db_command_help *help) { char *invocation = help->invocation; u32 line_count; /* Valid commands in the help table begin with a couple of spaces */ if (*invocation != ' ') { return (FALSE); } while (*invocation == ' ') { invocation++; } /* Match command name (full command or substring) */ while ((*command) && (*invocation) && (*invocation != ' ')) { if (tolower((int)*command) != tolower((int)*invocation)) { return (FALSE); } invocation++; command++; } /* Print the appropriate number of help lines */ line_count = help->line_count; while (line_count) { acpi_os_printf("%-38s : %s", help->invocation, help->description); help++; line_count--; } return (TRUE); } /******************************************************************************* * * FUNCTION: acpi_db_display_command_info * * PARAMETERS: command - Command string to match * display_all - Display all matching commands, or just * the first one (substring match) * * RETURN: None * * DESCRIPTION: Display help information for a Debugger command. * ******************************************************************************/ static void acpi_db_display_command_info(const char *command, u8 display_all) { const struct acpi_db_command_help *next; u8 matched; next = acpi_gbl_db_command_help; while (next->invocation) { matched = acpi_db_match_command_help(command, next); if (!display_all && matched) { return; } next++; } } /******************************************************************************* * * FUNCTION: acpi_db_display_help * * PARAMETERS: command - Optional command string to display help. * if not specified, all debugger command * help strings are displayed * * RETURN: None * * DESCRIPTION: Display help for a single debugger command, or all of them. * ******************************************************************************/ static void acpi_db_display_help(char *command) { const struct acpi_db_command_help *next = acpi_gbl_db_command_help; if (!command) { /* No argument to help, display help for all commands */ acpi_os_printf("\nSummary of AML Debugger Commands\n\n"); while (next->invocation) { acpi_os_printf("%-38s%s", next->invocation, next->description); next++; } acpi_os_printf("\n"); } else { /* Display help for all commands that match the substring */ acpi_db_display_command_info(command, TRUE); } } /******************************************************************************* * * FUNCTION: acpi_db_get_next_token * * PARAMETERS: string - Command buffer * next - Return value, end of next token * * RETURN: Pointer to the start of the next token. * * DESCRIPTION: Command line parsing. Get the next token on the command line * ******************************************************************************/ char *acpi_db_get_next_token(char *string, char **next, acpi_object_type *return_type) { char *start; u32 depth; acpi_object_type type = ACPI_TYPE_INTEGER; /* At end of buffer? */ if (!string || !(*string)) { return (NULL); } /* Remove any spaces at the beginning, ignore blank lines */ while (*string && isspace((int)*string)) { string++; } if (!(*string)) { return (NULL); } switch (*string) { case '"': /* This is a quoted string, scan until closing quote */ string++; start = string; type = ACPI_TYPE_STRING; /* Find end of string */ while (*string && (*string != '"')) { string++; } break; case '(': /* This is the start of a buffer, scan until closing paren */ string++; start = string; type = ACPI_TYPE_BUFFER; /* Find end of buffer */ while (*string && (*string != ')')) { string++; } break; case '{': /* This is the start of a field unit, scan until closing brace */ string++; start = string; type = ACPI_TYPE_FIELD_UNIT; /* Find end of buffer */ while (*string && (*string != '}')) { string++; } break; case '[': /* This is the start of a package, scan until closing bracket */ string++; depth = 1; start = string; type = ACPI_TYPE_PACKAGE; /* Find end of package (closing bracket) */ while (*string) { /* Handle String package elements */ if (*string == '"') { /* Find end of string */ string++; while (*string && (*string != '"')) { string++; } if (!(*string)) { break; } } else if (*string == '[') { depth++; /* A nested package declaration */ } else if (*string == ']') { depth--; if (depth == 0) { /* Found final package closing bracket */ break; } } string++; } break; default: start = string; /* Find end of token */ while (*string && !isspace((int)*string)) { string++; } break; } if (!(*string)) { *next = NULL; } else { *string = 0; *next = string + 1; } *return_type = type; return (start); } /******************************************************************************* * * FUNCTION: acpi_db_get_line * * PARAMETERS: input_buffer - Command line buffer * * RETURN: Count of arguments to the command * * DESCRIPTION: Get the next command line from the user. Gets entire line * up to the next newline * ******************************************************************************/ static u32 acpi_db_get_line(char *input_buffer) { u32 i; u32 count; char *next; char *this; if (acpi_ut_safe_strcpy (acpi_gbl_db_parsed_buf, sizeof(acpi_gbl_db_parsed_buf), input_buffer)) { acpi_os_printf ("Buffer overflow while parsing input line (max %u characters)\n", (u32)sizeof(acpi_gbl_db_parsed_buf)); return (0); } this = acpi_gbl_db_parsed_buf; for (i = 0; i < ACPI_DEBUGGER_MAX_ARGS; i++) { acpi_gbl_db_args[i] = acpi_db_get_next_token(this, &next, &acpi_gbl_db_arg_types [i]); if (!acpi_gbl_db_args[i]) { break; } this = next; } /* Uppercase the actual command */ acpi_ut_strupr(acpi_gbl_db_args[0]); count = i; if (count) { count--; /* Number of args only */ } return (count); } /******************************************************************************* * * FUNCTION: acpi_db_match_command * * PARAMETERS: user_command - User command line * * RETURN: Index into command array, -1 if not found * * DESCRIPTION: Search command array for a command match * ******************************************************************************/ static u32 acpi_db_match_command(char *user_command) { u32 i; if (!user_command || user_command[0] == 0) { return (CMD_NULL); } for (i = CMD_FIRST_VALID; acpi_gbl_db_commands[i].name; i++) { if (strstr (ACPI_CAST_PTR(char, acpi_gbl_db_commands[i].name), user_command) == acpi_gbl_db_commands[i].name) { return (i); } } /* Command not recognized */ return (CMD_NOT_FOUND); } /******************************************************************************* * * FUNCTION: acpi_db_command_dispatch * * PARAMETERS: input_buffer - Command line buffer * walk_state - Current walk * op - Current (executing) parse op * * RETURN: Status * * DESCRIPTION: Command dispatcher. * ******************************************************************************/ acpi_status acpi_db_command_dispatch(char *input_buffer, struct acpi_walk_state *walk_state, union acpi_parse_object *op) { u32 temp; u64 temp64; u32 command_index; u32 param_count; char *command_line; acpi_status status = AE_CTRL_TRUE; /* If acpi_terminate has been called, terminate this thread */ if (acpi_gbl_db_terminate_loop) { return (AE_CTRL_TERMINATE); } /* Find command and add to the history buffer */ param_count = acpi_db_get_line(input_buffer); command_index = acpi_db_match_command(acpi_gbl_db_args[0]); /* * We don't want to add the !! command to the history buffer. It * would cause an infinite loop because it would always be the * previous command. */ if (command_index != CMD_HISTORY_LAST) { acpi_db_add_to_history(input_buffer); } /* Verify that we have the minimum number of params */ if (param_count < acpi_gbl_db_commands[command_index].min_args) { acpi_os_printf ("%u parameters entered, [%s] requires %u parameters\n", param_count, acpi_gbl_db_commands[command_index].name, acpi_gbl_db_commands[command_index].min_args); acpi_db_display_command_info(acpi_gbl_db_commands [command_index].name, FALSE); return (AE_CTRL_TRUE); } /* Decode and dispatch the command */ switch (command_index) { case CMD_NULL: if (op) { return (AE_OK); } break; case CMD_ALL: acpi_os_printf("Executing all objects with NameSeg: %s\n", acpi_gbl_db_args[1]); acpi_db_execute(acpi_gbl_db_args[1], &acpi_gbl_db_args[2], &acpi_gbl_db_arg_types[2], EX_NO_SINGLE_STEP | EX_ALL); break; case CMD_ALLOCATIONS: #ifdef ACPI_DBG_TRACK_ALLOCATIONS acpi_ut_dump_allocations((u32)-1, NULL); #endif break; case CMD_ARGS: case CMD_ARGUMENTS: acpi_db_display_arguments(); break; case CMD_BREAKPOINT: acpi_db_set_method_breakpoint(acpi_gbl_db_args[1], walk_state, op); break; case CMD_BUSINFO: acpi_db_get_bus_info(); break; case CMD_CALL: acpi_db_set_method_call_breakpoint(op); status = AE_OK; break; case CMD_DEBUG: acpi_db_execute(acpi_gbl_db_args[1], &acpi_gbl_db_args[2], &acpi_gbl_db_arg_types[2], EX_SINGLE_STEP); break; case CMD_DISASSEMBLE: case CMD_DISASM: #ifdef ACPI_DISASSEMBLER (void)acpi_db_disassemble_method(acpi_gbl_db_args[1]); #else acpi_os_printf ("The AML Disassembler is not configured/present\n"); #endif break; case CMD_DUMP: acpi_db_decode_and_display_object(acpi_gbl_db_args[1], acpi_gbl_db_args[2]); break; case CMD_EVALUATE: case CMD_EXECUTE: acpi_db_execute(acpi_gbl_db_args[1], &acpi_gbl_db_args[2], &acpi_gbl_db_arg_types[2], EX_NO_SINGLE_STEP); break; case CMD_FIND: status = acpi_db_find_name_in_namespace(acpi_gbl_db_args[1]); break; case CMD_FIELDS: status = acpi_ut_strtoul64(acpi_gbl_db_args[1], &temp64); if (ACPI_FAILURE(status) || temp64 >= ACPI_NUM_PREDEFINED_REGIONS) { acpi_os_printf ("Invalid address space ID: must be between 0 and %u inclusive\n", ACPI_NUM_PREDEFINED_REGIONS - 1); return (AE_OK); } status = acpi_db_display_fields((u32)temp64); break; case CMD_GO: acpi_gbl_cm_single_step = FALSE; return (AE_OK); case CMD_HANDLERS: acpi_db_display_handlers(); break; case CMD_HELP: case CMD_HELP2: acpi_db_display_help(acpi_gbl_db_args[1]); break; case CMD_HISTORY: acpi_db_display_history(); break; case CMD_HISTORY_EXE: /* ! command */ command_line = acpi_db_get_from_history(acpi_gbl_db_args[1]); if (!command_line) { return (AE_CTRL_TRUE); } status = acpi_db_command_dispatch(command_line, walk_state, op); return (status); case CMD_HISTORY_LAST: /* !! command */ command_line = acpi_db_get_from_history(NULL); if (!command_line) { return (AE_CTRL_TRUE); } status = acpi_db_command_dispatch(command_line, walk_state, op); return (status); case CMD_INFORMATION: acpi_db_display_method_info(op); break; case CMD_INTEGRITY: acpi_db_check_integrity(); break; case CMD_INTO: if (op) { acpi_gbl_cm_single_step = TRUE; return (AE_OK); } break; case CMD_LEVEL: if (param_count == 0) { acpi_os_printf ("Current debug level for file output is: %8.8X\n", acpi_gbl_db_debug_level); acpi_os_printf ("Current debug level for console output is: %8.8X\n", acpi_gbl_db_console_debug_level); } else if (param_count == 2) { temp = acpi_gbl_db_console_debug_level; acpi_gbl_db_console_debug_level = strtoul(acpi_gbl_db_args[1], NULL, 16); acpi_os_printf ("Debug Level for console output was %8.8X, now %8.8X\n", temp, acpi_gbl_db_console_debug_level); } else { temp = acpi_gbl_db_debug_level; acpi_gbl_db_debug_level = strtoul(acpi_gbl_db_args[1], NULL, 16); acpi_os_printf ("Debug Level for file output was %8.8X, now %8.8X\n", temp, acpi_gbl_db_debug_level); } break; case CMD_LIST: #ifdef ACPI_DISASSEMBLER acpi_db_disassemble_aml(acpi_gbl_db_args[1], op); #else acpi_os_printf ("The AML Disassembler is not configured/present\n"); #endif break; case CMD_LOCKS: acpi_db_display_locks(); break; case CMD_LOCALS: acpi_db_display_locals(); break; case CMD_METHODS: status = acpi_db_display_objects("METHOD", acpi_gbl_db_args[1]); break; case CMD_NAMESPACE: acpi_db_dump_namespace(acpi_gbl_db_args[1], acpi_gbl_db_args[2]); break; case CMD_NOTIFY: temp = strtoul(acpi_gbl_db_args[2], NULL, 0); acpi_db_send_notify(acpi_gbl_db_args[1], temp); break; case CMD_OBJECTS: acpi_ut_strupr(acpi_gbl_db_args[1]); status = acpi_db_display_objects(acpi_gbl_db_args[1], acpi_gbl_db_args[2]); break; case CMD_OSI: acpi_db_display_interfaces(acpi_gbl_db_args[1], acpi_gbl_db_args[2]); break; case CMD_OWNER: acpi_db_dump_namespace_by_owner(acpi_gbl_db_args[1], acpi_gbl_db_args[2]); break; case CMD_PATHS: acpi_db_dump_namespace_paths(); break; case CMD_PREFIX: acpi_db_set_scope(acpi_gbl_db_args[1]); break; case CMD_REFERENCES: acpi_db_find_references(acpi_gbl_db_args[1]); break; case CMD_RESOURCES: acpi_db_display_resources(acpi_gbl_db_args[1]); break; case CMD_RESULTS: acpi_db_display_results(); break; case CMD_SET: acpi_db_set_method_data(acpi_gbl_db_args[1], acpi_gbl_db_args[2], acpi_gbl_db_args[3]); break; case CMD_STATS: status = acpi_db_display_statistics(acpi_gbl_db_args[1]); break; case CMD_STOP: return (AE_NOT_IMPLEMENTED); case CMD_TABLES: acpi_db_display_table_info(acpi_gbl_db_args[1]); break; case CMD_TEMPLATE: acpi_db_display_template(acpi_gbl_db_args[1]); break; case CMD_TRACE: acpi_db_trace(acpi_gbl_db_args[1], acpi_gbl_db_args[2], acpi_gbl_db_args[3]); break; case CMD_TREE: acpi_db_display_calling_tree(); break; case CMD_TYPE: acpi_db_display_object_type(acpi_gbl_db_args[1]); break; #ifdef ACPI_APPLICATION /* Hardware simulation commands. */ case CMD_ENABLEACPI: #if (!ACPI_REDUCED_HARDWARE) status = acpi_enable(); if (ACPI_FAILURE(status)) { acpi_os_printf("AcpiEnable failed (Status=%X)\n", status); return (status); } #endif /* !ACPI_REDUCED_HARDWARE */ break; case CMD_EVENT: acpi_os_printf("Event command not implemented\n"); break; case CMD_INTERRUPT: acpi_db_generate_interrupt(acpi_gbl_db_args[1]); break; case CMD_GPE: acpi_db_generate_gpe(acpi_gbl_db_args[1], acpi_gbl_db_args[2]); break; case CMD_GPES: acpi_db_display_gpes(); break; case CMD_SCI: acpi_db_generate_sci(); break; case CMD_SLEEP: status = acpi_db_sleep(acpi_gbl_db_args[1]); break; /* File I/O commands. */ case CMD_CLOSE: acpi_db_close_debug_file(); break; case CMD_LOAD:{ struct acpi_new_table_desc *list_head = NULL; status = ac_get_all_tables_from_file(acpi_gbl_db_args[1], ACPI_GET_ALL_TABLES, &list_head); if (ACPI_SUCCESS(status)) { acpi_db_load_tables(list_head); } } break; case CMD_OPEN: acpi_db_open_debug_file(acpi_gbl_db_args[1]); break; /* User space commands. */ case CMD_TERMINATE: acpi_db_set_output_destination(ACPI_DB_REDIRECTABLE_OUTPUT); acpi_ut_subsystem_shutdown(); /* * TBD: [Restructure] Need some way to re-initialize without * re-creating the semaphores! */ acpi_gbl_db_terminate_loop = TRUE; /* acpi_initialize (NULL); */ break; case CMD_BACKGROUND: acpi_db_create_execution_thread(acpi_gbl_db_args[1], &acpi_gbl_db_args[2], &acpi_gbl_db_arg_types[2]); break; case CMD_THREADS: acpi_db_create_execution_threads(acpi_gbl_db_args[1], acpi_gbl_db_args[2], acpi_gbl_db_args[3]); break; /* Debug test commands. */ case CMD_PREDEFINED: acpi_db_check_predefined_names(); break; case CMD_TEST: acpi_db_execute_test(acpi_gbl_db_args[1]); break; case CMD_UNLOAD: acpi_db_unload_acpi_table(acpi_gbl_db_args[1]); break; #endif case CMD_EXIT: case CMD_QUIT: if (op) { acpi_os_printf("Method execution terminated\n"); return (AE_CTRL_TERMINATE); } if (!acpi_gbl_db_output_to_file) { acpi_dbg_level = ACPI_DEBUG_DEFAULT; } #ifdef ACPI_APPLICATION acpi_db_close_debug_file(); #endif acpi_gbl_db_terminate_loop = TRUE; return (AE_CTRL_TERMINATE); case CMD_NOT_FOUND: default: acpi_os_printf("%s: unknown command\n", acpi_gbl_db_args[0]); return (AE_CTRL_TRUE); } if (ACPI_SUCCESS(status)) { status = AE_CTRL_TRUE; } return (status); } /******************************************************************************* * * FUNCTION: acpi_db_execute_thread * * PARAMETERS: context - Not used * * RETURN: None * * DESCRIPTION: Debugger execute thread. Waits for a command line, then * simply dispatches it. * ******************************************************************************/ void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context) { (void)acpi_db_user_commands(); acpi_gbl_db_threads_terminated = TRUE; } /******************************************************************************* * * FUNCTION: acpi_db_user_commands * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Command line execution for the AML debugger. Commands are * matched and dispatched here. * ******************************************************************************/ acpi_status acpi_db_user_commands(void) { acpi_status status = AE_OK; acpi_os_printf("\n"); /* TBD: [Restructure] Need a separate command line buffer for step mode */ while (!acpi_gbl_db_terminate_loop) { /* Wait the readiness of the command */ status = acpi_os_wait_command_ready(); if (ACPI_FAILURE(status)) { break; } /* Just call to the command line interpreter */ acpi_gbl_method_executing = FALSE; acpi_gbl_step_to_next_call = FALSE; (void)acpi_db_command_dispatch(acpi_gbl_db_line_buf, NULL, NULL); /* Notify the completion of the command */ status = acpi_os_notify_command_complete(); if (ACPI_FAILURE(status)) { break; } } if (ACPI_FAILURE(status) && status != AE_CTRL_TERMINATE) { ACPI_EXCEPTION((AE_INFO, status, "While parsing command line")); } return (status); }
linux-master
drivers/acpi/acpica/dbinput.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /******************************************************************************* * * Module Name: rsmisc - Miscellaneous resource descriptors * ******************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rsmisc") #define INIT_RESOURCE_TYPE(i) i->resource_offset #define INIT_RESOURCE_LENGTH(i) i->aml_offset #define INIT_TABLE_LENGTH(i) i->value #define COMPARE_OPCODE(i) i->resource_offset #define COMPARE_TARGET(i) i->aml_offset #define COMPARE_VALUE(i) i->value /******************************************************************************* * * FUNCTION: acpi_rs_convert_aml_to_resource * * PARAMETERS: resource - Pointer to the resource descriptor * aml - Where the AML descriptor is returned * info - Pointer to appropriate conversion table * * RETURN: Status * * DESCRIPTION: Convert an external AML resource descriptor to the corresponding * internal resource descriptor * ******************************************************************************/ acpi_status acpi_rs_convert_aml_to_resource(struct acpi_resource *resource, union aml_resource *aml, struct acpi_rsconvert_info *info) { acpi_rs_length aml_resource_length; void *source; void *destination; char *target; u8 count; u8 flags_mode = FALSE; u16 item_count = 0; u16 temp16 = 0; ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource); if (!info) { return_ACPI_STATUS(AE_BAD_PARAMETER); } if (((acpi_size)resource) & 0x3) { /* Each internal resource struct is expected to be 32-bit aligned */ ACPI_WARNING((AE_INFO, "Misaligned resource pointer (get): %p Type 0x%2.2X Length %u", resource, resource->type, resource->length)); } /* Extract the resource Length field (does not include header length) */ aml_resource_length = acpi_ut_get_resource_length(aml); /* * First table entry must be ACPI_RSC_INITxxx and must contain the * table length (# of table entries) */ count = INIT_TABLE_LENGTH(info); while (count) { target = NULL; /* * Source is the external AML byte stream buffer, * destination is the internal resource descriptor */ source = ACPI_ADD_PTR(void, aml, info->aml_offset); destination = ACPI_ADD_PTR(void, resource, info->resource_offset); switch (info->opcode) { case ACPI_RSC_INITGET: /* * Get the resource type and the initial (minimum) length */ memset(resource, 0, INIT_RESOURCE_LENGTH(info)); resource->type = INIT_RESOURCE_TYPE(info); resource->length = INIT_RESOURCE_LENGTH(info); break; case ACPI_RSC_INITSET: break; case ACPI_RSC_FLAGINIT: flags_mode = TRUE; break; case ACPI_RSC_1BITFLAG: /* * Mask and shift the flag bit */ ACPI_SET8(destination, ((ACPI_GET8(source) >> info->value) & 0x01)); break; case ACPI_RSC_2BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET8(destination, ((ACPI_GET8(source) >> info->value) & 0x03)); break; case ACPI_RSC_3BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET8(destination, ((ACPI_GET8(source) >> info->value) & 0x07)); break; case ACPI_RSC_6BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET8(destination, ((ACPI_GET8(source) >> info->value) & 0x3F)); break; case ACPI_RSC_COUNT: item_count = ACPI_GET8(source); ACPI_SET8(destination, item_count); resource->length = resource->length + (info->value * (item_count - 1)); break; case ACPI_RSC_COUNT16: item_count = aml_resource_length; ACPI_SET16(destination, item_count); resource->length = resource->length + (info->value * (item_count - 1)); break; case ACPI_RSC_COUNT_GPIO_PIN: target = ACPI_ADD_PTR(void, aml, info->value); item_count = ACPI_GET16(target) - ACPI_GET16(source); resource->length = resource->length + item_count; item_count = item_count / 2; ACPI_SET16(destination, item_count); break; case ACPI_RSC_COUNT_GPIO_VEN: item_count = ACPI_GET8(source); ACPI_SET8(destination, item_count); resource->length = resource->length + (info->value * item_count); break; case ACPI_RSC_COUNT_GPIO_RES: /* * Vendor data is optional (length/offset may both be zero) * Examine vendor data length field first */ target = ACPI_ADD_PTR(void, aml, (info->value + 2)); if (ACPI_GET16(target)) { /* Use vendor offset to get resource source length */ target = ACPI_ADD_PTR(void, aml, info->value); item_count = ACPI_GET16(target) - ACPI_GET16(source); } else { /* No vendor data to worry about */ item_count = aml->large_header.resource_length + sizeof(struct aml_resource_large_header) - ACPI_GET16(source); } resource->length = resource->length + item_count; ACPI_SET16(destination, item_count); break; case ACPI_RSC_COUNT_SERIAL_VEN: ACPI_MOVE_16_TO_16(&temp16, source); item_count = temp16 - info->value; resource->length = resource->length + item_count; ACPI_SET16(destination, item_count); break; case ACPI_RSC_COUNT_SERIAL_RES: ACPI_MOVE_16_TO_16(&temp16, source); item_count = (aml_resource_length + sizeof(struct aml_resource_large_header)) - temp16 - info->value; resource->length = resource->length + item_count; ACPI_SET16(destination, item_count); break; case ACPI_RSC_LENGTH: resource->length = resource->length + info->value; break; case ACPI_RSC_MOVE8: case ACPI_RSC_MOVE16: case ACPI_RSC_MOVE32: case ACPI_RSC_MOVE64: /* * Raw data move. Use the Info value field unless item_count has * been previously initialized via a COUNT opcode */ if (info->value) { item_count = info->value; } acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_GPIO_PIN: /* Generate and set the PIN data pointer */ target = (char *)ACPI_ADD_PTR(void, resource, (resource->length - item_count * 2)); *(u16 **)destination = ACPI_CAST_PTR(u16, target); /* Copy the PIN data */ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source)); acpi_rs_move_data(target, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_GPIO_RES: /* Generate and set the resource_source string pointer */ target = (char *)ACPI_ADD_PTR(void, resource, (resource->length - item_count)); *(u8 **)destination = ACPI_CAST_PTR(u8, target); /* Copy the resource_source string */ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source)); acpi_rs_move_data(target, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_SERIAL_VEN: /* Generate and set the Vendor Data pointer */ target = (char *)ACPI_ADD_PTR(void, resource, (resource->length - item_count)); *(u8 **)destination = ACPI_CAST_PTR(u8, target); /* Copy the Vendor Data */ source = ACPI_ADD_PTR(void, aml, info->value); acpi_rs_move_data(target, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_SERIAL_RES: /* Generate and set the resource_source string pointer */ target = (char *)ACPI_ADD_PTR(void, resource, (resource->length - item_count)); *(u8 **)destination = ACPI_CAST_PTR(u8, target); /* Copy the resource_source string */ ACPI_MOVE_16_TO_16(&temp16, source); source = ACPI_ADD_PTR(void, aml, (temp16 + info->value)); acpi_rs_move_data(target, source, item_count, info->opcode); break; case ACPI_RSC_SET8: memset(destination, info->aml_offset, info->value); break; case ACPI_RSC_DATA8: target = ACPI_ADD_PTR(char, resource, info->value); memcpy(destination, source, ACPI_GET16(target)); break; case ACPI_RSC_ADDRESS: /* * Common handler for address descriptor flags */ if (!acpi_rs_get_address_common(resource, aml)) { return_ACPI_STATUS (AE_AML_INVALID_RESOURCE_TYPE); } break; case ACPI_RSC_SOURCE: /* * Optional resource_source (Index and String) */ resource->length += acpi_rs_get_resource_source(aml_resource_length, info->value, destination, aml, NULL); break; case ACPI_RSC_SOURCEX: /* * Optional resource_source (Index and String). This is the more * complicated case used by the Interrupt() macro */ target = ACPI_ADD_PTR(char, resource, info->aml_offset + (item_count * 4)); resource->length += acpi_rs_get_resource_source(aml_resource_length, (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target); break; case ACPI_RSC_BITMASK: /* * 8-bit encoded bitmask (DMA macro) */ item_count = acpi_rs_decode_bitmask(ACPI_GET8(source), destination); if (item_count) { resource->length += (item_count - 1); } target = ACPI_ADD_PTR(char, resource, info->value); ACPI_SET8(target, item_count); break; case ACPI_RSC_BITMASK16: /* * 16-bit encoded bitmask (IRQ macro) */ ACPI_MOVE_16_TO_16(&temp16, source); item_count = acpi_rs_decode_bitmask(temp16, destination); if (item_count) { resource->length += (item_count - 1); } target = ACPI_ADD_PTR(char, resource, info->value); ACPI_SET8(target, item_count); break; case ACPI_RSC_EXIT_NE: /* * control - Exit conversion if not equal */ switch (info->resource_offset) { case ACPI_RSC_COMPARE_AML_LENGTH: if (aml_resource_length != info->value) { goto exit; } break; case ACPI_RSC_COMPARE_VALUE: if (ACPI_GET8(source) != info->value) { goto exit; } break; default: ACPI_ERROR((AE_INFO, "Invalid conversion sub-opcode")); return_ACPI_STATUS(AE_BAD_PARAMETER); } break; default: ACPI_ERROR((AE_INFO, "Invalid conversion opcode")); return_ACPI_STATUS(AE_BAD_PARAMETER); } count--; info++; } exit: if (!flags_mode) { /* Round the resource struct length up to the next boundary (32 or 64) */ resource->length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_rs_convert_resource_to_aml * * PARAMETERS: resource - Pointer to the resource descriptor * aml - Where the AML descriptor is returned * info - Pointer to appropriate conversion table * * RETURN: Status * * DESCRIPTION: Convert an internal resource descriptor to the corresponding * external AML resource descriptor. * ******************************************************************************/ acpi_status acpi_rs_convert_resource_to_aml(struct acpi_resource *resource, union aml_resource *aml, struct acpi_rsconvert_info *info) { void *source = NULL; void *destination; char *target; acpi_rsdesc_size aml_length = 0; u8 count; u16 temp16 = 0; u16 item_count = 0; ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml); if (!info) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * First table entry must be ACPI_RSC_INITxxx and must contain the * table length (# of table entries) */ count = INIT_TABLE_LENGTH(info); while (count) { /* * Source is the internal resource descriptor, * destination is the external AML byte stream buffer */ source = ACPI_ADD_PTR(void, resource, info->resource_offset); destination = ACPI_ADD_PTR(void, aml, info->aml_offset); switch (info->opcode) { case ACPI_RSC_INITSET: memset(aml, 0, INIT_RESOURCE_LENGTH(info)); aml_length = INIT_RESOURCE_LENGTH(info); acpi_rs_set_resource_header(INIT_RESOURCE_TYPE(info), aml_length, aml); break; case ACPI_RSC_INITGET: break; case ACPI_RSC_FLAGINIT: /* * Clear the flag byte */ ACPI_SET8(destination, 0); break; case ACPI_RSC_1BITFLAG: /* * Mask and shift the flag bit */ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8) ((ACPI_GET8(source) & 0x01) << info-> value)); break; case ACPI_RSC_2BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8) ((ACPI_GET8(source) & 0x03) << info-> value)); break; case ACPI_RSC_3BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8) ((ACPI_GET8(source) & 0x07) << info-> value)); break; case ACPI_RSC_6BITFLAG: /* * Mask and shift the flag bits */ ACPI_SET_BIT(*ACPI_CAST8(destination), (u8) ((ACPI_GET8(source) & 0x3F) << info-> value)); break; case ACPI_RSC_COUNT: item_count = ACPI_GET8(source); ACPI_SET8(destination, item_count); aml_length = (u16) (aml_length + (info->value * (item_count - 1))); break; case ACPI_RSC_COUNT16: item_count = ACPI_GET16(source); aml_length = (u16) (aml_length + item_count); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_GPIO_PIN: item_count = ACPI_GET16(source); ACPI_SET16(destination, aml_length); aml_length = (u16)(aml_length + item_count * 2); target = ACPI_ADD_PTR(void, aml, info->value); ACPI_SET16(target, aml_length); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_GPIO_VEN: item_count = ACPI_GET16(source); ACPI_SET16(destination, item_count); aml_length = (u16)(aml_length + (info->value * item_count)); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_GPIO_RES: /* Set resource source string length */ item_count = ACPI_GET16(source); ACPI_SET16(destination, aml_length); /* Compute offset for the Vendor Data */ aml_length = (u16)(aml_length + item_count); target = ACPI_ADD_PTR(void, aml, info->value); /* Set vendor offset only if there is vendor data */ ACPI_SET16(target, aml_length); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_SERIAL_VEN: item_count = ACPI_GET16(source); ACPI_SET16(destination, item_count + info->value); aml_length = (u16)(aml_length + item_count); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_COUNT_SERIAL_RES: item_count = ACPI_GET16(source); aml_length = (u16)(aml_length + item_count); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_LENGTH: acpi_rs_set_resource_length(info->value, aml); break; case ACPI_RSC_MOVE8: case ACPI_RSC_MOVE16: case ACPI_RSC_MOVE32: case ACPI_RSC_MOVE64: if (info->value) { item_count = info->value; } acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_GPIO_PIN: destination = (char *)ACPI_ADD_PTR(void, aml, ACPI_GET16 (destination)); source = *(u16 **)source; acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_GPIO_RES: /* Used for both resource_source string and vendor_data */ destination = (char *)ACPI_ADD_PTR(void, aml, ACPI_GET16 (destination)); source = *(u8 **)source; acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_SERIAL_VEN: destination = (char *)ACPI_ADD_PTR(void, aml, (aml_length - item_count)); source = *(u8 **)source; acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_MOVE_SERIAL_RES: destination = (char *)ACPI_ADD_PTR(void, aml, (aml_length - item_count)); source = *(u8 **)source; acpi_rs_move_data(destination, source, item_count, info->opcode); break; case ACPI_RSC_ADDRESS: /* Set the Resource Type, General Flags, and Type-Specific Flags */ acpi_rs_set_address_common(aml, resource); break; case ACPI_RSC_SOURCEX: /* * Optional resource_source (Index and String) */ aml_length = acpi_rs_set_resource_source(aml, (acpi_rs_length) aml_length, source); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_SOURCE: /* * Optional resource_source (Index and String). This is the more * complicated case used by the Interrupt() macro */ aml_length = acpi_rs_set_resource_source(aml, info->value, source); acpi_rs_set_resource_length(aml_length, aml); break; case ACPI_RSC_BITMASK: /* * 8-bit encoded bitmask (DMA macro) */ ACPI_SET8(destination, acpi_rs_encode_bitmask(source, *ACPI_ADD_PTR(u8, resource, info-> value))); break; case ACPI_RSC_BITMASK16: /* * 16-bit encoded bitmask (IRQ macro) */ temp16 = acpi_rs_encode_bitmask(source, *ACPI_ADD_PTR(u8, resource, info->value)); ACPI_MOVE_16_TO_16(destination, &temp16); break; case ACPI_RSC_EXIT_LE: /* * control - Exit conversion if less than or equal */ if (item_count <= info->value) { goto exit; } break; case ACPI_RSC_EXIT_NE: /* * control - Exit conversion if not equal */ switch (COMPARE_OPCODE(info)) { case ACPI_RSC_COMPARE_VALUE: if (*ACPI_ADD_PTR(u8, resource, COMPARE_TARGET(info)) != COMPARE_VALUE(info)) { goto exit; } break; default: ACPI_ERROR((AE_INFO, "Invalid conversion sub-opcode")); return_ACPI_STATUS(AE_BAD_PARAMETER); } break; case ACPI_RSC_EXIT_EQ: /* * control - Exit conversion if equal */ if (*ACPI_ADD_PTR(u8, resource, COMPARE_TARGET(info)) == COMPARE_VALUE(info)) { goto exit; } break; default: ACPI_ERROR((AE_INFO, "Invalid conversion opcode")); return_ACPI_STATUS(AE_BAD_PARAMETER); } count--; info++; } exit: return_ACPI_STATUS(AE_OK); } #if 0 /* Previous resource validations */ if (aml->ext_address64.revision_ID != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) { return_ACPI_STATUS(AE_SUPPORT); } if (resource->data.start_dpf.performance_robustness >= 3) { return_ACPI_STATUS(AE_AML_BAD_RESOURCE_VALUE); } if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) { /* * Only [active_high, edge_sensitive] or [active_low, level_sensitive] * polarity/trigger interrupts are allowed (ACPI spec, section * "IRQ Format"), so 0x00 and 0x09 are illegal. */ ACPI_ERROR((AE_INFO, "Invalid interrupt polarity/trigger in resource list, 0x%X", aml->irq.flags)); return_ACPI_STATUS(AE_BAD_DATA); } resource->data.extended_irq.interrupt_count = temp8; if (temp8 < 1) { /* Must have at least one IRQ */ return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH); } if (resource->data.dma.transfer == 0x03) { ACPI_ERROR((AE_INFO, "Invalid DMA.Transfer preference (3)")); return_ACPI_STATUS(AE_BAD_DATA); } #endif
linux-master
drivers/acpi/acpica/rsmisc.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: evgpeinit - System GPE initialization and update * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evgpeinit") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* * Note: History of _PRW support in ACPICA * * Originally (2000 - 2010), the GPE initialization code performed a walk of * the entire namespace to execute the _PRW methods and detect all GPEs * capable of waking the system. * * As of 10/2010, the _PRW method execution has been removed since it is * actually unnecessary. The host OS must in fact execute all _PRW methods * in order to identify the device/power-resource dependencies. We now put * the onus on the host OS to identify the wake GPEs as part of this process * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This * not only reduces the complexity of the ACPICA initialization code, but in * some cases (on systems with very large namespaces) it should reduce the * kernel boot time as well. */ #ifdef ACPI_GPE_USE_LOGICAL_ADDRESSES #define ACPI_FADT_GPE_BLOCK_ADDRESS(N) \ acpi_gbl_FADT.xgpe##N##_block.space_id == \ ACPI_ADR_SPACE_SYSTEM_MEMORY ? \ (u64)acpi_gbl_xgpe##N##_block_logical_address : \ acpi_gbl_FADT.xgpe##N##_block.address #else #define ACPI_FADT_GPE_BLOCK_ADDRESS(N) acpi_gbl_FADT.xgpe##N##_block.address #endif /* ACPI_GPE_USE_LOGICAL_ADDRESSES */ /******************************************************************************* * * FUNCTION: acpi_ev_gpe_initialize * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks * ******************************************************************************/ acpi_status acpi_ev_gpe_initialize(void) { u32 register_count0 = 0; u32 register_count1 = 0; u32 gpe_number_max = 0; acpi_status status; u64 address; ACPI_FUNCTION_TRACE(ev_gpe_initialize); ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Initializing General Purpose Events (GPEs):\n")); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Initialize the GPE Block(s) defined in the FADT * * Why the GPE register block lengths are divided by 2: From the ACPI * Spec, section "General-Purpose Event Registers", we have: * * "Each register block contains two registers of equal length * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN * The length of the GPE1_STS and GPE1_EN registers is equal to * half the GPE1_LEN. If a generic register block is not supported * then its respective block pointer and block length values in the * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need * to be the same size." */ /* * Determine the maximum GPE number for this machine. * * Note: both GPE0 and GPE1 are optional, and either can exist without * the other. * * If EITHER the register length OR the block address are zero, then that * particular block is not supported. */ address = ACPI_FADT_GPE_BLOCK_ADDRESS(0); if (acpi_gbl_FADT.gpe0_block_length && address) { /* GPE block 0 exists (has both length and address > 0) */ register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2); gpe_number_max = (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; /* Install GPE Block 0 */ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, address, acpi_gbl_FADT.xgpe0_block. space_id, register_count0, 0, acpi_gbl_FADT.sci_interrupt, &acpi_gbl_gpe_fadt_blocks[0]); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not create GPE Block 0")); } } address = ACPI_FADT_GPE_BLOCK_ADDRESS(1); if (acpi_gbl_FADT.gpe1_block_length && address) { /* GPE block 1 exists (has both length and address > 0) */ register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2); /* Check for GPE0/GPE1 overlap (if both banks exist) */ if ((register_count0) && (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) { ACPI_ERROR((AE_INFO, "GPE0 block (GPE 0 to %u) overlaps the GPE1 block " "(GPE %u to %u) - Ignoring GPE1", gpe_number_max, acpi_gbl_FADT.gpe1_base, acpi_gbl_FADT.gpe1_base + ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1))); /* Ignore GPE1 block by setting the register count to zero */ register_count1 = 0; } else { /* Install GPE Block 1 */ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, address, acpi_gbl_FADT.xgpe1_block. space_id, register_count1, acpi_gbl_FADT.gpe1_base, acpi_gbl_FADT. sci_interrupt, &acpi_gbl_gpe_fadt_blocks [1]); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not create GPE Block 1")); } /* * GPE0 and GPE1 do not have to be contiguous in the GPE number * space. However, GPE0 always starts at GPE number zero. */ } } /* Exit if there are no GPE registers */ if ((register_count0 + register_count1) == 0) { /* GPEs are not required by ACPI, this is OK */ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "There are no GPE blocks defined in the FADT\n")); goto cleanup; } cleanup: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_update_gpes * * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table * * RETURN: None * * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a * result of a Load() or load_table() operation. If new GPE * methods have been installed, register the new methods. * ******************************************************************************/ void acpi_ev_update_gpes(acpi_owner_id table_owner_id) { struct acpi_gpe_xrupt_info *gpe_xrupt_info; struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_walk_info walk_info; acpi_status status = AE_OK; /* * Find any _Lxx/_Exx GPE methods that have just been loaded. * * Any GPEs that correspond to new _Lxx/_Exx methods are immediately * enabled. * * Examine the namespace underneath each gpe_device within the * gpe_block lists. */ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return; } walk_info.count = 0; walk_info.owner_id = table_owner_id; walk_info.execute_by_owner_id = TRUE; /* Walk the interrupt level descriptor list */ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; while (gpe_xrupt_info) { /* Walk all Gpe Blocks attached to this interrupt level */ gpe_block = gpe_xrupt_info->gpe_block_list_head; while (gpe_block) { walk_info.gpe_block = gpe_block; walk_info.gpe_device = gpe_block->node; status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, walk_info.gpe_device, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ev_match_gpe_method, NULL, &walk_info, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "While decoding _Lxx/_Exx methods")); } gpe_block = gpe_block->next; } gpe_xrupt_info = gpe_xrupt_info->next; } if (walk_info.count) { ACPI_INFO(("Enabled %u new GPEs", walk_info.count)); } (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); return; } /******************************************************************************* * * FUNCTION: acpi_ev_match_gpe_method * * PARAMETERS: Callback from walk_namespace * * RETURN: Status * * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a * control method under the _GPE portion of the namespace. * Extract the name and GPE type from the object, saving this * information for quick lookup during GPE dispatch. Allows a * per-owner_id evaluation if execute_by_owner_id is TRUE in the * walk_info parameter block. * * The name of each GPE control method is of the form: * "_Lxx" or "_Exx", where: * L - means that the GPE is level triggered * E - means that the GPE is edge triggered * xx - is the GPE number [in HEX] * * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods * with that owner. * ******************************************************************************/ acpi_status acpi_ev_match_gpe_method(acpi_handle obj_handle, u32 level, void *context, void **return_value) { struct acpi_namespace_node *method_node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle); struct acpi_gpe_walk_info *walk_info = ACPI_CAST_PTR(struct acpi_gpe_walk_info, context); struct acpi_gpe_event_info *gpe_event_info; acpi_status status; u32 gpe_number; u8 temp_gpe_number; char name[ACPI_NAMESEG_SIZE + 1]; u8 type; ACPI_FUNCTION_TRACE(ev_match_gpe_method); /* Check if requested owner_id matches this owner_id */ if ((walk_info->execute_by_owner_id) && (method_node->owner_id != walk_info->owner_id)) { return_ACPI_STATUS(AE_OK); } /* * Match and decode the _Lxx and _Exx GPE method names * * 1) Extract the method name and null terminate it */ ACPI_MOVE_32_TO_32(name, &method_node->name.integer); name[ACPI_NAMESEG_SIZE] = 0; /* 2) Name must begin with an underscore */ if (name[0] != '_') { return_ACPI_STATUS(AE_OK); /* Ignore this method */ } /* * 3) Edge/Level determination is based on the 2nd character * of the method name */ switch (name[1]) { case 'L': type = ACPI_GPE_LEVEL_TRIGGERED; break; case 'E': type = ACPI_GPE_EDGE_TRIGGERED; break; default: /* Unknown method type, just ignore it */ ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Ignoring unknown GPE method type: %s " "(name not of form _Lxx or _Exx)", name)); return_ACPI_STATUS(AE_OK); } /* 4) The last two characters of the name are the hex GPE Number */ status = acpi_ut_ascii_to_hex_byte(&name[2], &temp_gpe_number); if (ACPI_FAILURE(status)) { /* Conversion failed; invalid method, just ignore it */ ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Could not extract GPE number from name: %s " "(name is not of form _Lxx or _Exx)", name)); return_ACPI_STATUS(AE_OK); } /* Ensure that we have a valid GPE number for this GPE block */ gpe_number = (u32)temp_gpe_number; gpe_event_info = acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block); if (!gpe_event_info) { /* * This gpe_number is not valid for this GPE block, just ignore it. * However, it may be valid for a different GPE block, since GPE0 * and GPE1 methods both appear under \_GPE. */ return_ACPI_STATUS(AE_OK); } if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == ACPI_GPE_DISPATCH_HANDLER) || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == ACPI_GPE_DISPATCH_RAW_HANDLER)) { /* If there is already a handler, ignore this GPE method */ return_ACPI_STATUS(AE_OK); } if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) == ACPI_GPE_DISPATCH_METHOD) { /* * If there is already a method, ignore this method. But check * for a type mismatch (if both the _Lxx AND _Exx exist) */ if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) { ACPI_ERROR((AE_INFO, "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods", gpe_number, gpe_number, gpe_number)); } return_ACPI_STATUS(AE_OK); } /* Disable the GPE in case it's been enabled already. */ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); /* * Add the GPE information from above to the gpe_event_info block for * use during dispatch of this GPE. */ gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK); gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); gpe_event_info->dispatch.method_node = method_node; ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Registered GPE method %s as GPE number 0x%.2X\n", name, gpe_number)); return_ACPI_STATUS(AE_OK); } #endif /* !ACPI_REDUCED_HARDWARE */
linux-master
drivers/acpi/acpica/evgpeinit.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and * Address Spaces. * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #define EXPORT_ACPI_INTERFACES #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acevents.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evxfregn") /******************************************************************************* * * FUNCTION: acpi_install_address_space_handler_internal * * PARAMETERS: device - Handle for the device * space_id - The address space ID * handler - Address of the handler * setup - Address of the setup function * context - Value passed to the handler on each access * Run_reg - Run _REG methods for this address space? * * RETURN: Status * * DESCRIPTION: Install a handler for all op_regions of a given space_id. * * NOTE: This function should only be called after acpi_enable_subsystem has * been called. This is because any _REG methods associated with the Space ID * are executed here, and these methods can only be safely executed after * the default handlers have been installed and the hardware has been * initialized (via acpi_enable_subsystem.) * To avoid this problem pass FALSE for Run_Reg and later on call * acpi_execute_reg_methods() to execute _REG. * ******************************************************************************/ static acpi_status acpi_install_address_space_handler_internal(acpi_handle device, acpi_adr_space_type space_id, acpi_adr_space_handler handler, acpi_adr_space_setup setup, void *context, u8 run_reg) { struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE(acpi_install_address_space_handler); /* Parameter validation */ if (!device) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Convert and validate the device handle */ node = acpi_ns_validate_handle(device); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Install the handler for all Regions for this Space ID */ status = acpi_ev_install_space_handler(node, space_id, handler, setup, context); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Run all _REG methods for this address space */ if (run_reg) { acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); } unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } acpi_status acpi_install_address_space_handler(acpi_handle device, acpi_adr_space_type space_id, acpi_adr_space_handler handler, acpi_adr_space_setup setup, void *context) { return acpi_install_address_space_handler_internal(device, space_id, handler, setup, context, TRUE); } ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler) acpi_status acpi_install_address_space_handler_no_reg(acpi_handle device, acpi_adr_space_type space_id, acpi_adr_space_handler handler, acpi_adr_space_setup setup, void *context) { return acpi_install_address_space_handler_internal(device, space_id, handler, setup, context, FALSE); } ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler_no_reg) /******************************************************************************* * * FUNCTION: acpi_remove_address_space_handler * * PARAMETERS: device - Handle for the device * space_id - The address space ID * handler - Address of the handler * * RETURN: Status * * DESCRIPTION: Remove a previously installed handler. * ******************************************************************************/ acpi_status acpi_remove_address_space_handler(acpi_handle device, acpi_adr_space_type space_id, acpi_adr_space_handler handler) { union acpi_operand_object *obj_desc; union acpi_operand_object *handler_obj; union acpi_operand_object *region_obj; union acpi_operand_object **last_obj_ptr; struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler); /* Parameter validation */ if (!device) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Convert and validate the device handle */ node = acpi_ns_validate_handle(device); if (!node || ((node->type != ACPI_TYPE_DEVICE) && (node->type != ACPI_TYPE_PROCESSOR) && (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node))) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Make sure the internal object exists */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { status = AE_NOT_EXIST; goto unlock_and_exit; } /* Find the address handler the user requested */ handler_obj = obj_desc->common_notify.handler; last_obj_ptr = &obj_desc->common_notify.handler; while (handler_obj) { /* We have a handler, see if user requested this one */ if (handler_obj->address_space.space_id == space_id) { /* Handler must be the same as the installed handler */ if (handler_obj->address_space.handler != handler) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Matched space_id, first dereference this in the Regions */ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Removing address handler %p(%p) for region %s " "on Device %p(%p)\n", handler_obj, handler, acpi_ut_get_region_name(space_id), node, obj_desc)); region_obj = handler_obj->address_space.region_list; /* Walk the handler's region list */ while (region_obj) { /* * First disassociate the handler from the region. * * NOTE: this doesn't mean that the region goes away * The region is just inaccessible as indicated to * the _REG method */ acpi_ev_detach_region(region_obj, TRUE); /* * Walk the list: Just grab the head because the * detach_region removed the previous head. */ region_obj = handler_obj->address_space.region_list; } /* Remove this Handler object from the list */ *last_obj_ptr = handler_obj->address_space.next; /* Now we can delete the handler object */ acpi_os_release_mutex(handler_obj->address_space. context_mutex); acpi_ut_remove_reference(handler_obj); goto unlock_and_exit; } /* Walk the linked list of handlers */ last_obj_ptr = &handler_obj->address_space.next; handler_obj = handler_obj->address_space.next; } /* The handler does not exist */ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n", handler, acpi_ut_get_region_name(space_id), space_id, node, obj_desc)); status = AE_NOT_EXIST; unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler) /******************************************************************************* * * FUNCTION: acpi_execute_reg_methods * * PARAMETERS: device - Handle for the device * space_id - The address space ID * * RETURN: Status * * DESCRIPTION: Execute _REG for all op_regions of a given space_id. * ******************************************************************************/ acpi_status acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id) { struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE(acpi_execute_reg_methods); /* Parameter validation */ if (!device) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Convert and validate the device handle */ node = acpi_ns_validate_handle(device); if (node) { /* Run all _REG methods for this address space */ acpi_ev_execute_reg_methods(node, space_id, ACPI_REG_CONNECT); } else { status = AE_BAD_PARAMETER; } (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
linux-master
drivers/acpi/acpica/evxfregn.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes) * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "acnamesp.h" #include "actables.h" #include "acdispat.h" #include "acevents.h" #include "amlcode.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exconfig") /* Local prototypes */ static acpi_status acpi_ex_add_table(u32 table_index, union acpi_operand_object **ddb_handle); static acpi_status acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer); /******************************************************************************* * * FUNCTION: acpi_ex_add_table * * PARAMETERS: table - Pointer to raw table * parent_node - Where to load the table (scope) * ddb_handle - Where to return the table handle. * * RETURN: Status * * DESCRIPTION: Common function to Install and Load an ACPI table with a * returned table handle. * ******************************************************************************/ static acpi_status acpi_ex_add_table(u32 table_index, union acpi_operand_object **ddb_handle) { union acpi_operand_object *obj_desc; ACPI_FUNCTION_TRACE(ex_add_table); /* Create an object to be the table handle */ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE); if (!obj_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Init the table handle */ obj_desc->common.flags |= AOPOBJ_DATA_VALID; obj_desc->reference.class = ACPI_REFCLASS_TABLE; obj_desc->reference.value = table_index; *ddb_handle = obj_desc; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_load_table_op * * PARAMETERS: walk_state - Current state with operands * return_desc - Where to store the return object * * RETURN: Status * * DESCRIPTION: Load an ACPI table from the RSDT/XSDT * ******************************************************************************/ acpi_status acpi_ex_load_table_op(struct acpi_walk_state *walk_state, union acpi_operand_object **return_desc) { acpi_status status; union acpi_operand_object **operand = &walk_state->operands[0]; struct acpi_namespace_node *parent_node; struct acpi_namespace_node *start_node; struct acpi_namespace_node *parameter_node = NULL; union acpi_operand_object *return_obj; union acpi_operand_object *ddb_handle; u32 table_index; ACPI_FUNCTION_TRACE(ex_load_table_op); /* Create the return object */ return_obj = acpi_ut_create_integer_object((u64)0); if (!return_obj) { return_ACPI_STATUS(AE_NO_MEMORY); } *return_desc = return_obj; /* Find the ACPI table in the RSDT/XSDT */ acpi_ex_exit_interpreter(); status = acpi_tb_find_table(operand[0]->string.pointer, operand[1]->string.pointer, operand[2]->string.pointer, &table_index); acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { return_ACPI_STATUS(status); } /* Table not found, return an Integer=0 and AE_OK */ return_ACPI_STATUS(AE_OK); } /* Default nodes */ start_node = walk_state->scope_info->scope.node; parent_node = acpi_gbl_root_node; /* root_path (optional parameter) */ if (operand[3]->string.length > 0) { /* * Find the node referenced by the root_path_string. This is the * location within the namespace where the table will be loaded. */ status = acpi_ns_get_node_unlocked(start_node, operand[3]->string.pointer, ACPI_NS_SEARCH_PARENT, &parent_node); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* parameter_path (optional parameter) */ if (operand[4]->string.length > 0) { if ((operand[4]->string.pointer[0] != AML_ROOT_PREFIX) && (operand[4]->string.pointer[0] != AML_PARENT_PREFIX)) { /* * Path is not absolute, so it will be relative to the node * referenced by the root_path_string (or the NS root if omitted) */ start_node = parent_node; } /* Find the node referenced by the parameter_path_string */ status = acpi_ns_get_node_unlocked(start_node, operand[4]->string.pointer, ACPI_NS_SEARCH_PARENT, &parameter_node); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Load the table into the namespace */ ACPI_INFO(("Dynamic OEM Table Load:")); acpi_ex_exit_interpreter(); status = acpi_tb_load_table(table_index, parent_node); acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_ex_add_table(table_index, &ddb_handle); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Complete the initialization/resolution of new objects */ acpi_ex_exit_interpreter(); acpi_ns_initialize_objects(); acpi_ex_enter_interpreter(); /* Parameter Data (optional) */ if (parameter_node) { /* Store the parameter data into the optional parameter object */ status = acpi_ex_store(operand[5], ACPI_CAST_PTR(union acpi_operand_object, parameter_node), walk_state); if (ACPI_FAILURE(status)) { (void)acpi_ex_unload_table(ddb_handle); acpi_ut_remove_reference(ddb_handle); return_ACPI_STATUS(status); } } /* Remove the reference to ddb_handle created by acpi_ex_add_table above */ acpi_ut_remove_reference(ddb_handle); /* Return -1 (non-zero) indicates success */ return_obj->integer.value = 0xFFFFFFFFFFFFFFFF; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_region_read * * PARAMETERS: obj_desc - Region descriptor * length - Number of bytes to read * buffer - Pointer to where to put the data * * RETURN: Status * * DESCRIPTION: Read data from an operation region. The read starts from the * beginning of the region. * ******************************************************************************/ static acpi_status acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer) { acpi_status status; u64 value; u32 region_offset = 0; u32 i; /* Bytewise reads */ for (i = 0; i < length; i++) { status = acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ, region_offset, 8, &value); if (ACPI_FAILURE(status)) { return (status); } *buffer = (u8)value; buffer++; region_offset++; } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_load_op * * PARAMETERS: obj_desc - Region or Buffer/Field where the table will be * obtained * target - Where the status of the load will be stored * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Load an ACPI table from a field or operation region * * NOTE: Region Fields (Field, bank_field, index_fields) are resolved to buffer * objects before this code is reached. * * If source is an operation region, it must refer to system_memory, as * per the ACPI specification. * ******************************************************************************/ acpi_status acpi_ex_load_op(union acpi_operand_object *obj_desc, union acpi_operand_object *target, struct acpi_walk_state *walk_state) { union acpi_operand_object *ddb_handle; struct acpi_table_header *table_header; struct acpi_table_header *table; u32 table_index; acpi_status status; u32 length; ACPI_FUNCTION_TRACE(ex_load_op); if (target->common.descriptor_type == ACPI_DESC_TYPE_NAMED) { target = acpi_ns_get_attached_object(ACPI_CAST_PTR (struct acpi_namespace_node, target)); } if (target->common.type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "Type not integer: %X", target->common.type)); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } target->integer.value = 0; /* Source Object can be either an op_region or a Buffer/Field */ switch (obj_desc->common.type) { case ACPI_TYPE_REGION: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load table from Region %p\n", obj_desc)); /* Region must be system_memory (from ACPI spec) */ if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* * If the Region Address and Length have not been previously * evaluated, evaluate them now and save the results. */ if (!(obj_desc->common.flags & AOPOBJ_DATA_VALID)) { status = acpi_ds_get_region_arguments(obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* Get the table header first so we can get the table length */ table_header = ACPI_ALLOCATE(sizeof(struct acpi_table_header)); if (!table_header) { return_ACPI_STATUS(AE_NO_MEMORY); } status = acpi_ex_region_read(obj_desc, sizeof(struct acpi_table_header), ACPI_CAST_PTR(u8, table_header)); length = table_header->length; ACPI_FREE(table_header); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Must have at least an ACPI table header */ if (length < sizeof(struct acpi_table_header)) { return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH); } /* * The original implementation simply mapped the table, with no copy. * However, the memory region is not guaranteed to remain stable and * we must copy the table to a local buffer. For example, the memory * region is corrupted after suspend on some machines. Dynamically * loaded tables are usually small, so this overhead is minimal. * * The latest implementation (5/2009) does not use a mapping at all. * We use the low-level operation region interface to read the table * instead of the obvious optimization of using a direct mapping. * This maintains a consistent use of operation regions across the * entire subsystem. This is important if additional processing must * be performed in the (possibly user-installed) operation region * handler. For example, acpi_exec and ASLTS depend on this. */ /* Allocate a buffer for the table */ table = ACPI_ALLOCATE(length); if (!table) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Read the entire table */ status = acpi_ex_region_read(obj_desc, length, ACPI_CAST_PTR(u8, table)); if (ACPI_FAILURE(status)) { ACPI_FREE(table); return_ACPI_STATUS(status); } break; case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load table from Buffer or Field %p\n", obj_desc)); /* Must have at least an ACPI table header */ if (obj_desc->buffer.length < sizeof(struct acpi_table_header)) { return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH); } /* Get the actual table length from the table header */ table_header = ACPI_CAST_PTR(struct acpi_table_header, obj_desc->buffer.pointer); length = table_header->length; /* Table cannot extend beyond the buffer */ if (length > obj_desc->buffer.length) { return_ACPI_STATUS(AE_AML_BUFFER_LIMIT); } if (length < sizeof(struct acpi_table_header)) { return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH); } /* * Copy the table from the buffer because the buffer could be * modified or even deleted in the future */ table = ACPI_ALLOCATE(length); if (!table) { return_ACPI_STATUS(AE_NO_MEMORY); } memcpy(table, table_header, length); break; default: return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* Install the new table into the local data structures */ ACPI_INFO(("Dynamic OEM Table Load:")); acpi_ex_exit_interpreter(); status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table), ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL, table, TRUE, &table_index); acpi_ex_enter_interpreter(); if (ACPI_FAILURE(status)) { /* Delete allocated table buffer */ ACPI_FREE(table); return_ACPI_STATUS(status); } /* * Add the table to the namespace. * * Note: Load the table objects relative to the root of the namespace. * This appears to go against the ACPI specification, but we do it for * compatibility with other ACPI implementations. */ status = acpi_ex_add_table(table_index, &ddb_handle); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Complete the initialization/resolution of new objects */ acpi_ex_exit_interpreter(); acpi_ns_initialize_objects(); acpi_ex_enter_interpreter(); /* Remove the reference to ddb_handle created by acpi_ex_add_table above */ acpi_ut_remove_reference(ddb_handle); /* Return -1 (non-zero) indicates success */ target->integer.value = 0xFFFFFFFFFFFFFFFF; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_unload_table * * PARAMETERS: ddb_handle - Handle to a previously loaded table * * RETURN: Status * * DESCRIPTION: Unload an ACPI table * ******************************************************************************/ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) { acpi_status status = AE_OK; union acpi_operand_object *table_desc = ddb_handle; u32 table_index; ACPI_FUNCTION_TRACE(ex_unload_table); /* * Temporarily emit a warning so that the ASL for the machine can be * hopefully obtained. This is to say that the Unload() operator is * extremely rare if not completely unused. */ ACPI_WARNING((AE_INFO, "Received request to unload an ACPI table")); /* * May 2018: Unload is no longer supported for the following reasons: * 1) A correct implementation on some hosts may not be possible. * 2) Other ACPI implementations do not correctly/fully support it. * 3) It requires host device driver support which does not exist. * (To properly support namespace unload out from underneath.) * 4) This AML operator has never been seen in the field. */ ACPI_EXCEPTION((AE_INFO, AE_NOT_IMPLEMENTED, "AML Unload operator is not supported")); /* * Validate the handle * Although the handle is partially validated in acpi_ex_reconfiguration() * when it calls acpi_ex_resolve_operands(), the handle is more completely * validated here. * * Handle must be a valid operand object of type reference. Also, the * ddb_handle must still be marked valid (table has not been previously * unloaded) */ if ((!ddb_handle) || (ACPI_GET_DESCRIPTOR_TYPE(ddb_handle) != ACPI_DESC_TYPE_OPERAND) || (ddb_handle->common.type != ACPI_TYPE_LOCAL_REFERENCE) || (!(ddb_handle->common.flags & AOPOBJ_DATA_VALID))) { return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* Get the table index from the ddb_handle */ table_index = table_desc->reference.value; /* * Release the interpreter lock so that the table lock won't have * strict order requirement against it. */ acpi_ex_exit_interpreter(); status = acpi_tb_unload_table(table_index); acpi_ex_enter_interpreter(); /* * Invalidate the handle. We do this because the handle may be stored * in a named object and may not be actually deleted until much later. */ if (ACPI_SUCCESS(status)) { ddb_handle->common.flags &= ~AOPOBJ_DATA_VALID; } return_ACPI_STATUS(status); }
linux-master
drivers/acpi/acpica/exconfig.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: utalloc - local memory allocation routines * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acdebug.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utalloc") #if !defined (USE_NATIVE_ALLOCATE_ZEROED) /******************************************************************************* * * FUNCTION: acpi_os_allocate_zeroed * * PARAMETERS: size - Size of the allocation * * RETURN: Address of the allocated memory on success, NULL on failure. * * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory. * This is the default implementation. Can be overridden via the * USE_NATIVE_ALLOCATE_ZEROED flag. * ******************************************************************************/ void *acpi_os_allocate_zeroed(acpi_size size) { void *allocation; ACPI_FUNCTION_ENTRY(); allocation = acpi_os_allocate(size); if (allocation) { /* Clear the memory block */ memset(allocation, 0, size); } return (allocation); } #endif /* !USE_NATIVE_ALLOCATE_ZEROED */ /******************************************************************************* * * FUNCTION: acpi_ut_create_caches * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Create all local caches * ******************************************************************************/ acpi_status acpi_ut_create_caches(void) { acpi_status status; /* Object Caches, for frequently used objects */ status = acpi_os_create_cache("Acpi-Namespace", sizeof(struct acpi_namespace_node), ACPI_MAX_NAMESPACE_CACHE_DEPTH, &acpi_gbl_namespace_cache); if (ACPI_FAILURE(status)) { return (status); } status = acpi_os_create_cache("Acpi-State", sizeof(union acpi_generic_state), ACPI_MAX_STATE_CACHE_DEPTH, &acpi_gbl_state_cache); if (ACPI_FAILURE(status)) { return (status); } status = acpi_os_create_cache("Acpi-Parse", sizeof(struct acpi_parse_obj_common), ACPI_MAX_PARSE_CACHE_DEPTH, &acpi_gbl_ps_node_cache); if (ACPI_FAILURE(status)) { return (status); } status = acpi_os_create_cache("Acpi-ParseExt", sizeof(struct acpi_parse_obj_named), ACPI_MAX_EXTPARSE_CACHE_DEPTH, &acpi_gbl_ps_node_ext_cache); if (ACPI_FAILURE(status)) { return (status); } status = acpi_os_create_cache("Acpi-Operand", sizeof(union acpi_operand_object), ACPI_MAX_OBJECT_CACHE_DEPTH, &acpi_gbl_operand_cache); if (ACPI_FAILURE(status)) { return (status); } #ifdef ACPI_ASL_COMPILER /* * For use with the ASL-/ASL+ option. This cache keeps track of regular * 0xA9 0x01 comments. */ status = acpi_os_create_cache("Acpi-Comment", sizeof(struct acpi_comment_node), ACPI_MAX_COMMENT_CACHE_DEPTH, &acpi_gbl_reg_comment_cache); if (ACPI_FAILURE(status)) { return (status); } /* * This cache keeps track of the starting addresses of where the comments * lie. This helps prevent duplication of comments. */ status = acpi_os_create_cache("Acpi-Comment-Addr", sizeof(struct acpi_comment_addr_node), ACPI_MAX_COMMENT_CACHE_DEPTH, &acpi_gbl_comment_addr_cache); if (ACPI_FAILURE(status)) { return (status); } /* * This cache will be used for nodes that represent files. */ status = acpi_os_create_cache("Acpi-File", sizeof(struct acpi_file_node), ACPI_MAX_COMMENT_CACHE_DEPTH, &acpi_gbl_file_cache); if (ACPI_FAILURE(status)) { return (status); } #endif #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Memory allocation lists */ status = acpi_ut_create_list("Acpi-Global", 0, &acpi_gbl_global_list); if (ACPI_FAILURE(status)) { return (status); } status = acpi_ut_create_list("Acpi-Namespace", sizeof(struct acpi_namespace_node), &acpi_gbl_ns_node_list); if (ACPI_FAILURE(status)) { return (status); } #endif return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_delete_caches * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Purge and delete all local caches * ******************************************************************************/ acpi_status acpi_ut_delete_caches(void) { #ifdef ACPI_DBG_TRACK_ALLOCATIONS char buffer[7]; if (acpi_gbl_display_final_mem_stats) { strcpy(buffer, "MEMORY"); (void)acpi_db_display_statistics(buffer); } #endif (void)acpi_os_delete_cache(acpi_gbl_namespace_cache); acpi_gbl_namespace_cache = NULL; (void)acpi_os_delete_cache(acpi_gbl_state_cache); acpi_gbl_state_cache = NULL; (void)acpi_os_delete_cache(acpi_gbl_operand_cache); acpi_gbl_operand_cache = NULL; (void)acpi_os_delete_cache(acpi_gbl_ps_node_cache); acpi_gbl_ps_node_cache = NULL; (void)acpi_os_delete_cache(acpi_gbl_ps_node_ext_cache); acpi_gbl_ps_node_ext_cache = NULL; #ifdef ACPI_ASL_COMPILER (void)acpi_os_delete_cache(acpi_gbl_reg_comment_cache); acpi_gbl_reg_comment_cache = NULL; (void)acpi_os_delete_cache(acpi_gbl_comment_addr_cache); acpi_gbl_comment_addr_cache = NULL; (void)acpi_os_delete_cache(acpi_gbl_file_cache); acpi_gbl_file_cache = NULL; #endif #ifdef ACPI_DBG_TRACK_ALLOCATIONS /* Debug only - display leftover memory allocation, if any */ acpi_ut_dump_allocations(ACPI_UINT32_MAX, NULL); /* Free memory lists */ acpi_os_free(acpi_gbl_global_list); acpi_gbl_global_list = NULL; acpi_os_free(acpi_gbl_ns_node_list); acpi_gbl_ns_node_list = NULL; #endif return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_validate_buffer * * PARAMETERS: buffer - Buffer descriptor to be validated * * RETURN: Status * * DESCRIPTION: Perform parameter validation checks on an struct acpi_buffer * ******************************************************************************/ acpi_status acpi_ut_validate_buffer(struct acpi_buffer *buffer) { /* Obviously, the structure pointer must be valid */ if (!buffer) { return (AE_BAD_PARAMETER); } /* Special semantics for the length */ if ((buffer->length == ACPI_NO_BUFFER) || (buffer->length == ACPI_ALLOCATE_BUFFER) || (buffer->length == ACPI_ALLOCATE_LOCAL_BUFFER)) { return (AE_OK); } /* Length is valid, the buffer pointer must be also */ if (!buffer->pointer) { return (AE_BAD_PARAMETER); } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_initialize_buffer * * PARAMETERS: buffer - Buffer to be validated * required_length - Length needed * * RETURN: Status * * DESCRIPTION: Validate that the buffer is of the required length or * allocate a new buffer. Returned buffer is always zeroed. * ******************************************************************************/ acpi_status acpi_ut_initialize_buffer(struct acpi_buffer *buffer, acpi_size required_length) { acpi_size input_buffer_length; /* Parameter validation */ if (!buffer || !required_length) { return (AE_BAD_PARAMETER); } /* * Buffer->Length is used as both an input and output parameter. Get the * input actual length and set the output required buffer length. */ input_buffer_length = buffer->length; buffer->length = required_length; /* * The input buffer length contains the actual buffer length, or the type * of buffer to be allocated by this routine. */ switch (input_buffer_length) { case ACPI_NO_BUFFER: /* Return the exception (and the required buffer length) */ return (AE_BUFFER_OVERFLOW); case ACPI_ALLOCATE_BUFFER: /* * Allocate a new buffer. We directectly call acpi_os_allocate here to * purposefully bypass the (optionally enabled) internal allocation * tracking mechanism since we only want to track internal * allocations. Note: The caller should use acpi_os_free to free this * buffer created via ACPI_ALLOCATE_BUFFER. */ buffer->pointer = acpi_os_allocate(required_length); break; case ACPI_ALLOCATE_LOCAL_BUFFER: /* Allocate a new buffer with local interface to allow tracking */ buffer->pointer = ACPI_ALLOCATE(required_length); break; default: /* Existing buffer: Validate the size of the buffer */ if (input_buffer_length < required_length) { return (AE_BUFFER_OVERFLOW); } break; } /* Validate allocation from above or input buffer pointer */ if (!buffer->pointer) { return (AE_NO_MEMORY); } /* Have a valid buffer, clear it */ memset(buffer->pointer, 0, required_length); return (AE_OK); }
linux-master
drivers/acpi/acpica/utalloc.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: uthex -- Hex/ASCII support functions * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_COMPILER ACPI_MODULE_NAME("uthex") /* Hex to ASCII conversion table */ static const char acpi_gbl_hex_to_ascii[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; /******************************************************************************* * * FUNCTION: acpi_ut_hex_to_ascii_char * * PARAMETERS: integer - Contains the hex digit * position - bit position of the digit within the * integer (multiple of 4) * * RETURN: The converted Ascii character * * DESCRIPTION: Convert a hex digit to an Ascii character * ******************************************************************************/ char acpi_ut_hex_to_ascii_char(u64 integer, u32 position) { u64 index; acpi_ut_short_shift_right(integer, position, &index); return (acpi_gbl_hex_to_ascii[index & 0xF]); } /******************************************************************************* * * FUNCTION: acpi_ut_ascii_to_hex_byte * * PARAMETERS: two_ascii_chars - Pointer to two ASCII characters * return_byte - Where converted byte is returned * * RETURN: Status and converted hex byte * * DESCRIPTION: Perform ascii-to-hex translation, exactly two ASCII characters * to a single converted byte value. * ******************************************************************************/ acpi_status acpi_ut_ascii_to_hex_byte(char *two_ascii_chars, u8 *return_byte) { /* Both ASCII characters must be valid hex digits */ if (!isxdigit((int)two_ascii_chars[0]) || !isxdigit((int)two_ascii_chars[1])) { return (AE_BAD_HEX_CONSTANT); } *return_byte = acpi_ut_ascii_char_to_hex(two_ascii_chars[1]) | (acpi_ut_ascii_char_to_hex(two_ascii_chars[0]) << 4); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_ascii_char_to_hex * * PARAMETERS: hex_char - Hex character in Ascii. Must be: * 0-9 or A-F or a-f * * RETURN: The binary value of the ascii/hex character * * DESCRIPTION: Perform ascii-to-hex translation * ******************************************************************************/ u8 acpi_ut_ascii_char_to_hex(int hex_char) { /* Values 0-9 */ if (hex_char <= '9') { return ((u8)(hex_char - '0')); } /* Upper case A-F */ if (hex_char <= 'F') { return ((u8)(hex_char - 0x37)); } /* Lower case a-f */ return ((u8)(hex_char - 0x57)); }
linux-master
drivers/acpi/acpica/uthex.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: utcopy - Internal to external object translation utilities * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utcopy") /* Local prototypes */ static acpi_status acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object, union acpi_object *external_object, u8 *data_space, acpi_size *buffer_space_used); static acpi_status acpi_ut_copy_ielement_to_ielement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context); static acpi_status acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object, u8 *buffer, acpi_size *space_used); static acpi_status acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj, union acpi_operand_object **return_obj); static acpi_status acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, union acpi_operand_object **internal_object); static acpi_status acpi_ut_copy_simple_object(union acpi_operand_object *source_desc, union acpi_operand_object *dest_desc); static acpi_status acpi_ut_copy_ielement_to_eelement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context); static acpi_status acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, union acpi_operand_object *dest_obj, struct acpi_walk_state *walk_state); /******************************************************************************* * * FUNCTION: acpi_ut_copy_isimple_to_esimple * * PARAMETERS: internal_object - Source object to be copied * external_object - Where to return the copied object * data_space - Where object data is returned (such as * buffer and string data) * buffer_space_used - Length of data_space that was used * * RETURN: Status * * DESCRIPTION: This function is called to copy a simple internal object to * an external object. * * The data_space buffer is assumed to have sufficient space for * the object. * ******************************************************************************/ static acpi_status acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object, union acpi_object *external_object, u8 *data_space, acpi_size *buffer_space_used) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple); *buffer_space_used = 0; /* * Check for NULL object case (could be an uninitialized * package element) */ if (!internal_object) { return_ACPI_STATUS(AE_OK); } /* Always clear the external object */ memset(external_object, 0, sizeof(union acpi_object)); /* * In general, the external object will be the same type as * the internal object */ external_object->type = internal_object->common.type; /* However, only a limited number of external types are supported */ switch (internal_object->common.type) { case ACPI_TYPE_STRING: external_object->string.pointer = (char *)data_space; external_object->string.length = internal_object->string.length; *buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size) internal_object-> string. length + 1); memcpy((void *)data_space, (void *)internal_object->string.pointer, (acpi_size)internal_object->string.length + 1); break; case ACPI_TYPE_BUFFER: external_object->buffer.pointer = data_space; external_object->buffer.length = internal_object->buffer.length; *buffer_space_used = ACPI_ROUND_UP_TO_NATIVE_WORD(internal_object->string. length); memcpy((void *)data_space, (void *)internal_object->buffer.pointer, internal_object->buffer.length); break; case ACPI_TYPE_INTEGER: external_object->integer.value = internal_object->integer.value; break; case ACPI_TYPE_LOCAL_REFERENCE: /* This is an object reference. */ switch (internal_object->reference.class) { case ACPI_REFCLASS_NAME: /* * For namepath, return the object handle ("reference") * We are referring to the namespace node */ external_object->reference.handle = internal_object->reference.node; external_object->reference.actual_type = acpi_ns_get_type(internal_object->reference.node); break; default: /* All other reference types are unsupported */ return_ACPI_STATUS(AE_TYPE); } break; case ACPI_TYPE_PROCESSOR: external_object->processor.proc_id = internal_object->processor.proc_id; external_object->processor.pblk_address = internal_object->processor.address; external_object->processor.pblk_length = internal_object->processor.length; break; case ACPI_TYPE_POWER: external_object->power_resource.system_level = internal_object->power_resource.system_level; external_object->power_resource.resource_order = internal_object->power_resource.resource_order; break; default: /* * There is no corresponding external object type */ ACPI_ERROR((AE_INFO, "Unsupported object type, cannot convert to external object: %s", acpi_ut_get_type_name(internal_object->common. type))); return_ACPI_STATUS(AE_SUPPORT); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ielement_to_eelement * * PARAMETERS: acpi_pkg_callback * * RETURN: Status * * DESCRIPTION: Copy one package element to another package element * ******************************************************************************/ static acpi_status acpi_ut_copy_ielement_to_eelement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context) { acpi_status status = AE_OK; struct acpi_pkg_info *info = (struct acpi_pkg_info *)context; acpi_size object_space; u32 this_index; union acpi_object *target_object; ACPI_FUNCTION_ENTRY(); this_index = state->pkg.index; target_object = (union acpi_object *)&((union acpi_object *) (state->pkg.dest_object))-> package.elements[this_index]; switch (object_type) { case ACPI_COPY_TYPE_SIMPLE: /* * This is a simple or null object */ status = acpi_ut_copy_isimple_to_esimple(source_object, target_object, info->free_space, &object_space); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_COPY_TYPE_PACKAGE: /* * Build the package object */ target_object->type = ACPI_TYPE_PACKAGE; target_object->package.count = source_object->package.count; target_object->package.elements = ACPI_CAST_PTR(union acpi_object, info->free_space); /* * Pass the new package object back to the package walk routine */ state->pkg.this_target_obj = target_object; /* * Save space for the array of objects (Package elements) * update the buffer length counter */ object_space = ACPI_ROUND_UP_TO_NATIVE_WORD((acpi_size) target_object-> package.count * sizeof(union acpi_object)); break; default: return (AE_BAD_PARAMETER); } info->free_space += object_space; info->length += object_space; return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ipackage_to_epackage * * PARAMETERS: internal_object - Pointer to the object we are returning * buffer - Where the object is returned * space_used - Where the object length is returned * * RETURN: Status * * DESCRIPTION: This function is called to place a package object in a user * buffer. A package object by definition contains other objects. * * The buffer is assumed to have sufficient space for the object. * The caller must have verified the buffer length needed using * the acpi_ut_get_object_size function before calling this function. * ******************************************************************************/ static acpi_status acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object, u8 *buffer, acpi_size *space_used) { union acpi_object *external_object; acpi_status status; struct acpi_pkg_info info; ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage); /* * First package at head of the buffer */ external_object = ACPI_CAST_PTR(union acpi_object, buffer); /* * Free space begins right after the first package */ info.length = ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.free_space = buffer + ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.object_space = 0; info.num_packages = 1; external_object->type = internal_object->common.type; external_object->package.count = internal_object->package.count; external_object->package.elements = ACPI_CAST_PTR(union acpi_object, info.free_space); /* * Leave room for an array of ACPI_OBJECTS in the buffer * and move the free space past it */ info.length += (acpi_size)external_object->package.count * ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); info.free_space += external_object->package.count * ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)); status = acpi_ut_walk_package_tree(internal_object, external_object, acpi_ut_copy_ielement_to_eelement, &info); *space_used = info.length; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_iobject_to_eobject * * PARAMETERS: internal_object - The internal object to be converted * ret_buffer - Where the object is returned * * RETURN: Status * * DESCRIPTION: This function is called to build an API object to be returned * to the caller. * ******************************************************************************/ acpi_status acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object, struct acpi_buffer *ret_buffer) { acpi_status status; ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject); if (internal_object->common.type == ACPI_TYPE_PACKAGE) { /* * Package object: Copy all subobjects (including * nested packages) */ status = acpi_ut_copy_ipackage_to_epackage(internal_object, ret_buffer->pointer, &ret_buffer->length); } else { /* * Build a simple object (no nested objects) */ status = acpi_ut_copy_isimple_to_esimple(internal_object, ACPI_CAST_PTR(union acpi_object, ret_buffer-> pointer), ACPI_ADD_PTR(u8, ret_buffer-> pointer, ACPI_ROUND_UP_TO_NATIVE_WORD (sizeof (union acpi_object))), &ret_buffer->length); /* * build simple does not include the object size in the length * so we add it in here */ ret_buffer->length += sizeof(union acpi_object); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_esimple_to_isimple * * PARAMETERS: external_object - The external object to be converted * ret_internal_object - Where the internal object is returned * * RETURN: Status * * DESCRIPTION: This function copies an external object to an internal one. * NOTE: Pointers can be copied, we don't need to copy data. * (The pointers have to be valid in our address space no matter * what we do with them!) * ******************************************************************************/ static acpi_status acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object, union acpi_operand_object **ret_internal_object) { union acpi_operand_object *internal_object; ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple); /* * Simple types supported are: String, Buffer, Integer */ switch (external_object->type) { case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: case ACPI_TYPE_INTEGER: case ACPI_TYPE_LOCAL_REFERENCE: internal_object = acpi_ut_create_internal_object((u8) external_object-> type); if (!internal_object) { return_ACPI_STATUS(AE_NO_MEMORY); } break; case ACPI_TYPE_ANY: /* This is the case for a NULL object */ *ret_internal_object = NULL; return_ACPI_STATUS(AE_OK); default: /* All other types are not supported */ ACPI_ERROR((AE_INFO, "Unsupported object type, cannot convert to internal object: %s", acpi_ut_get_type_name(external_object->type))); return_ACPI_STATUS(AE_SUPPORT); } /* Must COPY string and buffer contents */ switch (external_object->type) { case ACPI_TYPE_STRING: internal_object->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size) external_object->string.length + 1); if (!internal_object->string.pointer) { goto error_exit; } memcpy(internal_object->string.pointer, external_object->string.pointer, external_object->string.length); internal_object->string.length = external_object->string.length; break; case ACPI_TYPE_BUFFER: internal_object->buffer.pointer = ACPI_ALLOCATE_ZEROED(external_object->buffer.length); if (!internal_object->buffer.pointer) { goto error_exit; } memcpy(internal_object->buffer.pointer, external_object->buffer.pointer, external_object->buffer.length); internal_object->buffer.length = external_object->buffer.length; /* Mark buffer data valid */ internal_object->buffer.flags |= AOPOBJ_DATA_VALID; break; case ACPI_TYPE_INTEGER: internal_object->integer.value = external_object->integer.value; break; case ACPI_TYPE_LOCAL_REFERENCE: /* An incoming reference is defined to be a namespace node */ internal_object->reference.class = ACPI_REFCLASS_REFOF; internal_object->reference.object = external_object->reference.handle; break; default: /* Other types can't get here */ break; } *ret_internal_object = internal_object; return_ACPI_STATUS(AE_OK); error_exit: acpi_ut_remove_reference(internal_object); return_ACPI_STATUS(AE_NO_MEMORY); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_epackage_to_ipackage * * PARAMETERS: external_object - The external object to be converted * internal_object - Where the internal object is returned * * RETURN: Status * * DESCRIPTION: Copy an external package object to an internal package. * Handles nested packages. * ******************************************************************************/ static acpi_status acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object, union acpi_operand_object **internal_object) { acpi_status status = AE_OK; union acpi_operand_object *package_object; union acpi_operand_object **package_elements; u32 i; ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage); /* Create the package object */ package_object = acpi_ut_create_package_object(external_object->package.count); if (!package_object) { return_ACPI_STATUS(AE_NO_MEMORY); } package_elements = package_object->package.elements; /* * Recursive implementation. Probably ok, since nested external * packages as parameters should be very rare. */ for (i = 0; i < external_object->package.count; i++) { status = acpi_ut_copy_eobject_to_iobject(&external_object->package. elements[i], &package_elements[i]); if (ACPI_FAILURE(status)) { /* Truncate package and delete it */ package_object->package.count = i; package_elements[i] = NULL; acpi_ut_remove_reference(package_object); return_ACPI_STATUS(status); } } /* Mark package data valid */ package_object->package.flags |= AOPOBJ_DATA_VALID; *internal_object = package_object; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_eobject_to_iobject * * PARAMETERS: external_object - The external object to be converted * internal_object - Where the internal object is returned * * RETURN: Status * * DESCRIPTION: Converts an external object to an internal object. * ******************************************************************************/ acpi_status acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object, union acpi_operand_object **internal_object) { acpi_status status; ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject); if (external_object->type == ACPI_TYPE_PACKAGE) { status = acpi_ut_copy_epackage_to_ipackage(external_object, internal_object); } else { /* * Build a simple object (no nested objects) */ status = acpi_ut_copy_esimple_to_isimple(external_object, internal_object); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_simple_object * * PARAMETERS: source_desc - The internal object to be copied * dest_desc - New target object * * RETURN: Status * * DESCRIPTION: Simple copy of one internal object to another. Reference count * of the destination object is preserved. * ******************************************************************************/ static acpi_status acpi_ut_copy_simple_object(union acpi_operand_object *source_desc, union acpi_operand_object *dest_desc) { u16 reference_count; union acpi_operand_object *next_object; acpi_status status; acpi_size copy_size; /* Save fields from destination that we don't want to overwrite */ reference_count = dest_desc->common.reference_count; next_object = dest_desc->common.next_object; /* * Copy the entire source object over the destination object. * Note: Source can be either an operand object or namespace node. */ copy_size = sizeof(union acpi_operand_object); if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) { copy_size = sizeof(struct acpi_namespace_node); } memcpy(ACPI_CAST_PTR(char, dest_desc), ACPI_CAST_PTR(char, source_desc), copy_size); /* Restore the saved fields */ dest_desc->common.reference_count = reference_count; dest_desc->common.next_object = next_object; /* New object is not static, regardless of source */ dest_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; /* Handle the objects with extra data */ switch (dest_desc->common.type) { case ACPI_TYPE_BUFFER: /* * Allocate and copy the actual buffer if and only if: * 1) There is a valid buffer pointer * 2) The buffer has a length > 0 */ if ((source_desc->buffer.pointer) && (source_desc->buffer.length)) { dest_desc->buffer.pointer = ACPI_ALLOCATE(source_desc->buffer.length); if (!dest_desc->buffer.pointer) { return (AE_NO_MEMORY); } /* Copy the actual buffer data */ memcpy(dest_desc->buffer.pointer, source_desc->buffer.pointer, source_desc->buffer.length); } break; case ACPI_TYPE_STRING: /* * Allocate and copy the actual string if and only if: * 1) There is a valid string pointer * (Pointer to a NULL string is allowed) */ if (source_desc->string.pointer) { dest_desc->string.pointer = ACPI_ALLOCATE((acpi_size)source_desc->string. length + 1); if (!dest_desc->string.pointer) { return (AE_NO_MEMORY); } /* Copy the actual string data */ memcpy(dest_desc->string.pointer, source_desc->string.pointer, (acpi_size)source_desc->string.length + 1); } break; case ACPI_TYPE_LOCAL_REFERENCE: /* * We copied the reference object, so we now must add a reference * to the object pointed to by the reference * * DDBHandle reference (from Load/load_table) is a special reference, * it does not have a Reference.Object, so does not need to * increase the reference count */ if (source_desc->reference.class == ACPI_REFCLASS_TABLE) { break; } acpi_ut_add_reference(source_desc->reference.object); break; case ACPI_TYPE_REGION: /* * We copied the Region Handler, so we now must add a reference */ if (dest_desc->region.handler) { acpi_ut_add_reference(dest_desc->region.handler); } break; /* * For Mutex and Event objects, we cannot simply copy the underlying * OS object. We must create a new one. */ case ACPI_TYPE_MUTEX: status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex); if (ACPI_FAILURE(status)) { return (status); } break; case ACPI_TYPE_EVENT: status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &dest_desc->event. os_semaphore); if (ACPI_FAILURE(status)) { return (status); } break; default: /* Nothing to do for other simple objects */ break; } return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ielement_to_ielement * * PARAMETERS: acpi_pkg_callback * * RETURN: Status * * DESCRIPTION: Copy one package element to another package element * ******************************************************************************/ static acpi_status acpi_ut_copy_ielement_to_ielement(u8 object_type, union acpi_operand_object *source_object, union acpi_generic_state *state, void *context) { acpi_status status = AE_OK; u32 this_index; union acpi_operand_object **this_target_ptr; union acpi_operand_object *target_object; ACPI_FUNCTION_ENTRY(); this_index = state->pkg.index; this_target_ptr = (union acpi_operand_object **) &state->pkg.dest_object->package.elements[this_index]; switch (object_type) { case ACPI_COPY_TYPE_SIMPLE: /* A null source object indicates a (legal) null package element */ if (source_object) { /* * This is a simple object, just copy it */ target_object = acpi_ut_create_internal_object(source_object-> common.type); if (!target_object) { return (AE_NO_MEMORY); } status = acpi_ut_copy_simple_object(source_object, target_object); if (ACPI_FAILURE(status)) { goto error_exit; } *this_target_ptr = target_object; } else { /* Pass through a null element */ *this_target_ptr = NULL; } break; case ACPI_COPY_TYPE_PACKAGE: /* * This object is a package - go down another nesting level * Create and build the package object */ target_object = acpi_ut_create_package_object(source_object->package.count); if (!target_object) { return (AE_NO_MEMORY); } target_object->common.flags = source_object->common.flags; /* Pass the new package object back to the package walk routine */ state->pkg.this_target_obj = target_object; /* Store the object pointer in the parent package object */ *this_target_ptr = target_object; break; default: return (AE_BAD_PARAMETER); } return (status); error_exit: acpi_ut_remove_reference(target_object); return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_ipackage_to_ipackage * * PARAMETERS: source_obj - Pointer to the source package object * dest_obj - Where the internal object is returned * walk_state - Current Walk state descriptor * * RETURN: Status * * DESCRIPTION: This function is called to copy an internal package object * into another internal package object. * ******************************************************************************/ static acpi_status acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, union acpi_operand_object *dest_obj, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage); dest_obj->common.type = source_obj->common.type; dest_obj->common.flags = source_obj->common.flags; dest_obj->package.count = source_obj->package.count; /* * Create the object array and walk the source package tree */ dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) source_obj->package. count + 1) * sizeof(void *)); if (!dest_obj->package.elements) { ACPI_ERROR((AE_INFO, "Package allocation failure")); return_ACPI_STATUS(AE_NO_MEMORY); } /* * Copy the package element-by-element by walking the package "tree". * This handles nested packages of arbitrary depth. */ status = acpi_ut_walk_package_tree(source_obj, dest_obj, acpi_ut_copy_ielement_to_ielement, walk_state); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_copy_iobject_to_iobject * * PARAMETERS: source_desc - The internal object to be copied * dest_desc - Where the copied object is returned * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Copy an internal object to a new internal object * ******************************************************************************/ acpi_status acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc, union acpi_operand_object **dest_desc, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject); /* Create the top level object */ *dest_desc = acpi_ut_create_internal_object(source_desc->common.type); if (!*dest_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Copy the object and possible subobjects */ if (source_desc->common.type == ACPI_TYPE_PACKAGE) { status = acpi_ut_copy_ipackage_to_ipackage(source_desc, *dest_desc, walk_state); } else { status = acpi_ut_copy_simple_object(source_desc, *dest_desc); } /* Delete the allocated object if copy failed */ if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(*dest_desc); } return_ACPI_STATUS(status); }
linux-master
drivers/acpi/acpica/utcopy.c
// SPDX-License-Identifier: GPL-2.0-only /* * NFIT - Machine Check Handler * * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. */ #include <linux/notifier.h> #include <linux/acpi.h> #include <linux/nd.h> #include <asm/mce.h> #include "nfit.h" static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, void *data) { struct mce *mce = (struct mce *)data; struct acpi_nfit_desc *acpi_desc; struct nfit_spa *nfit_spa; /* We only care about uncorrectable memory errors */ if (!mce_is_memory_error(mce) || mce_is_correctable(mce)) return NOTIFY_DONE; /* Verify the address reported in the MCE is valid. */ if (!mce_usable_address(mce)) return NOTIFY_DONE; /* * mce->addr contains the physical addr accessed that caused the * machine check. We need to walk through the list of NFITs, and see * if any of them matches that address, and only then start a scrub. */ mutex_lock(&acpi_desc_lock); list_for_each_entry(acpi_desc, &acpi_descs, list) { unsigned int align = 1UL << MCI_MISC_ADDR_LSB(mce->misc); struct device *dev = acpi_desc->dev; int found_match = 0; mutex_lock(&acpi_desc->init_mutex); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { struct acpi_nfit_system_address *spa = nfit_spa->spa; if (nfit_spa_type(spa) != NFIT_SPA_PM) continue; /* find the spa that covers the mce addr */ if (spa->address > mce->addr) continue; if ((spa->address + spa->length - 1) < mce->addr) continue; found_match = 1; dev_dbg(dev, "addr in SPA %d (0x%llx, 0x%llx)\n", spa->range_index, spa->address, spa->length); /* * We can break at the first match because we're going * to rescan all the SPA ranges. There shouldn't be any * aliasing anyway. */ break; } mutex_unlock(&acpi_desc->init_mutex); if (!found_match) continue; /* If this fails due to an -ENOMEM, there is little we can do */ nvdimm_bus_add_badrange(acpi_desc->nvdimm_bus, ALIGN_DOWN(mce->addr, align), align); nvdimm_region_notify(nfit_spa->nd_region, NVDIMM_REVALIDATE_POISON); if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) { /* * We can ignore an -EBUSY here because if an ARS is * already in progress, just let that be the last * authoritative one */ acpi_nfit_ars_rescan(acpi_desc, 0); } mce->kflags |= MCE_HANDLED_NFIT; break; } mutex_unlock(&acpi_desc_lock); return NOTIFY_DONE; } static struct notifier_block nfit_mce_dec = { .notifier_call = nfit_handle_mce, .priority = MCE_PRIO_NFIT, }; void nfit_mce_register(void) { mce_register_decode_chain(&nfit_mce_dec); } void nfit_mce_unregister(void) { mce_unregister_decode_chain(&nfit_mce_dec); }
linux-master
drivers/acpi/nfit/mce.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */ #include <linux/libnvdimm.h> #include <linux/ndctl.h> #include <linux/acpi.h> #include <linux/memregion.h> #include <asm/smp.h> #include "intel.h" #include "nfit.h" static ssize_t firmware_activate_noidle_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N"); } static ssize_t firmware_activate_noidle_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); ssize_t rc; bool val; rc = kstrtobool(buf, &val); if (rc) return rc; if (val != acpi_desc->fwa_noidle) acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID; acpi_desc->fwa_noidle = val; return size; } DEVICE_ATTR_RW(firmware_activate_noidle); bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus) { struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); unsigned long *mask; if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask)) return false; mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; } static unsigned long intel_security_flags(struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); unsigned long security_flags = 0; struct { struct nd_cmd_pkg pkg; struct nd_intel_get_security_state cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_GET_SECURITY_STATE, .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_out = sizeof(struct nd_intel_get_security_state), .nd_fw_size = sizeof(struct nd_intel_get_security_state), }, }; int rc; if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask)) return 0; /* * Short circuit the state retrieval while we are doing overwrite. * The DSM spec states that the security state is indeterminate * until the overwrite DSM completes. */ if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER) return BIT(NVDIMM_SECURITY_OVERWRITE); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0 || nd_cmd.cmd.status) { pr_err("%s: security state retrieval failed (%d:%#x)\n", nvdimm_name(nvdimm), rc, nd_cmd.cmd.status); return 0; } /* check and see if security is enabled and locked */ if (ptype == NVDIMM_MASTER) { if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED) set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); else set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT) set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); return security_flags; } if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED) return 0; if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) { if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN || nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT) set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED) set_bit(NVDIMM_SECURITY_LOCKED, &security_flags); else set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); } else set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); return security_flags; } static int intel_security_freeze(struct nvdimm *nvdimm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct { struct nd_cmd_pkg pkg; struct nd_intel_freeze_lock cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_FREEZE_LOCK, .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_out = ND_INTEL_STATUS_SIZE, .nd_fw_size = ND_INTEL_STATUS_SIZE, }, }; int rc; if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask)) return -ENOTTY; rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; if (nd_cmd.cmd.status) return -EIO; return 0; } static int intel_security_change_key(struct nvdimm *nvdimm, const struct nvdimm_key_data *old_data, const struct nvdimm_key_data *new_data, enum nvdimm_passphrase_type ptype) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); unsigned int cmd = ptype == NVDIMM_MASTER ? NVDIMM_INTEL_SET_MASTER_PASSPHRASE : NVDIMM_INTEL_SET_PASSPHRASE; struct { struct nd_cmd_pkg pkg; struct nd_intel_set_passphrase cmd; } nd_cmd = { .pkg = { .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2, .nd_size_out = ND_INTEL_STATUS_SIZE, .nd_fw_size = ND_INTEL_STATUS_SIZE, .nd_command = cmd, }, }; int rc; if (!test_bit(cmd, &nfit_mem->dsm_mask)) return -ENOTTY; memcpy(nd_cmd.cmd.old_pass, old_data->data, sizeof(nd_cmd.cmd.old_pass)); memcpy(nd_cmd.cmd.new_pass, new_data->data, sizeof(nd_cmd.cmd.new_pass)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; switch (nd_cmd.cmd.status) { case 0: return 0; case ND_INTEL_STATUS_INVALID_PASS: return -EINVAL; case ND_INTEL_STATUS_NOT_SUPPORTED: return -EOPNOTSUPP; case ND_INTEL_STATUS_INVALID_STATE: default: return -EIO; } } static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct { struct nd_cmd_pkg pkg; struct nd_intel_unlock_unit cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_UNLOCK_UNIT, .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, .nd_size_out = ND_INTEL_STATUS_SIZE, .nd_fw_size = ND_INTEL_STATUS_SIZE, }, }; int rc; if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask)) return -ENOTTY; memcpy(nd_cmd.cmd.passphrase, key_data->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; switch (nd_cmd.cmd.status) { case 0: break; case ND_INTEL_STATUS_INVALID_PASS: return -EINVAL; default: return -EIO; } return 0; } static int intel_security_disable(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data) { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct { struct nd_cmd_pkg pkg; struct nd_intel_disable_passphrase cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE, .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, .nd_size_out = ND_INTEL_STATUS_SIZE, .nd_fw_size = ND_INTEL_STATUS_SIZE, }, }; if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask)) return -ENOTTY; memcpy(nd_cmd.cmd.passphrase, key_data->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; switch (nd_cmd.cmd.status) { case 0: break; case ND_INTEL_STATUS_INVALID_PASS: return -EINVAL; case ND_INTEL_STATUS_INVALID_STATE: default: return -ENXIO; } return 0; } static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, const struct nvdimm_key_data *key, enum nvdimm_passphrase_type ptype) { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); unsigned int cmd = ptype == NVDIMM_MASTER ? NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE; struct { struct nd_cmd_pkg pkg; struct nd_intel_secure_erase cmd; } nd_cmd = { .pkg = { .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, .nd_size_out = ND_INTEL_STATUS_SIZE, .nd_fw_size = ND_INTEL_STATUS_SIZE, .nd_command = cmd, }, }; if (!test_bit(cmd, &nfit_mem->dsm_mask)) return -ENOTTY; memcpy(nd_cmd.cmd.passphrase, key->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; switch (nd_cmd.cmd.status) { case 0: break; case ND_INTEL_STATUS_NOT_SUPPORTED: return -EOPNOTSUPP; case ND_INTEL_STATUS_INVALID_PASS: return -EINVAL; case ND_INTEL_STATUS_INVALID_STATE: default: return -ENXIO; } return 0; } static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct { struct nd_cmd_pkg pkg; struct nd_intel_query_overwrite cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_QUERY_OVERWRITE, .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_out = ND_INTEL_STATUS_SIZE, .nd_fw_size = ND_INTEL_STATUS_SIZE, }, }; if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask)) return -ENOTTY; rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; switch (nd_cmd.cmd.status) { case 0: break; case ND_INTEL_STATUS_OQUERY_INPROGRESS: return -EBUSY; default: return -ENXIO; } return 0; } static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, const struct nvdimm_key_data *nkey) { int rc; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct { struct nd_cmd_pkg pkg; struct nd_intel_overwrite cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_OVERWRITE, .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = ND_INTEL_PASSPHRASE_SIZE, .nd_size_out = ND_INTEL_STATUS_SIZE, .nd_fw_size = ND_INTEL_STATUS_SIZE, }, }; if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask)) return -ENOTTY; memcpy(nd_cmd.cmd.passphrase, nkey->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; switch (nd_cmd.cmd.status) { case 0: return 0; case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED: return -ENOTSUPP; case ND_INTEL_STATUS_INVALID_PASS: return -EINVAL; case ND_INTEL_STATUS_INVALID_STATE: default: return -ENXIO; } } static const struct nvdimm_security_ops __intel_security_ops = { .get_flags = intel_security_flags, .freeze = intel_security_freeze, .change_key = intel_security_change_key, .disable = intel_security_disable, #ifdef CONFIG_X86 .unlock = intel_security_unlock, .erase = intel_security_erase, .overwrite = intel_security_overwrite, .query_overwrite = intel_security_query_overwrite, #endif }; const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops; static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc, struct nd_intel_bus_fw_activate_businfo *info) { struct { struct nd_cmd_pkg pkg; struct nd_intel_bus_fw_activate_businfo cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, .nd_family = NVDIMM_BUS_FAMILY_INTEL, .nd_size_out = sizeof(struct nd_intel_bus_fw_activate_businfo), .nd_fw_size = sizeof(struct nd_intel_bus_fw_activate_businfo), }, }; int rc; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); *info = nd_cmd.cmd; return rc; } /* The fw_ops expect to be called with the nvdimm_bus_lock() held */ static enum nvdimm_fwa_state intel_bus_fwa_state( struct nvdimm_bus_descriptor *nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct nd_intel_bus_fw_activate_businfo info; struct device *dev = acpi_desc->dev; enum nvdimm_fwa_state state; int rc; /* * It should not be possible for platform firmware to return * busy because activate is a synchronous operation. Treat it * similar to invalid, i.e. always refresh / poll the status. */ switch (acpi_desc->fwa_state) { case NVDIMM_FWA_INVALID: case NVDIMM_FWA_BUSY: break; default: /* check if capability needs to be refreshed */ if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) break; return acpi_desc->fwa_state; } /* Refresh with platform firmware */ rc = intel_bus_fwa_businfo(nd_desc, &info); if (rc) return NVDIMM_FWA_INVALID; switch (info.state) { case ND_INTEL_FWA_IDLE: state = NVDIMM_FWA_IDLE; break; case ND_INTEL_FWA_BUSY: state = NVDIMM_FWA_BUSY; break; case ND_INTEL_FWA_ARMED: if (info.activate_tmo > info.max_quiesce_tmo) state = NVDIMM_FWA_ARM_OVERFLOW; else state = NVDIMM_FWA_ARMED; break; default: dev_err_once(dev, "invalid firmware activate state %d\n", info.state); return NVDIMM_FWA_INVALID; } /* * Capability data is available in the same payload as state. It * is expected to be static. */ if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) { if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE) acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE; else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) { /* * Skip hibernate cycle by default if platform * indicates that it does not need devices to be * quiesced. */ acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE; } else acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE; } acpi_desc->fwa_state = state; return state; } static enum nvdimm_fwa_capability intel_bus_fwa_capability( struct nvdimm_bus_descriptor *nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID) return acpi_desc->fwa_cap; if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID) return acpi_desc->fwa_cap; return NVDIMM_FWA_CAP_INVALID; } static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct { struct nd_cmd_pkg pkg; struct nd_intel_bus_fw_activate cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE, .nd_family = NVDIMM_BUS_FAMILY_INTEL, .nd_size_in = sizeof(nd_cmd.cmd.iodev_state), .nd_size_out = sizeof(struct nd_intel_bus_fw_activate), .nd_fw_size = sizeof(struct nd_intel_bus_fw_activate), }, /* * Even though activate is run from a suspended context, * for safety, still ask platform firmware to force * quiesce devices by default. Let a module * parameter override that policy. */ .cmd = { .iodev_state = acpi_desc->fwa_noidle ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE, }, }; int rc; switch (intel_bus_fwa_state(nd_desc)) { case NVDIMM_FWA_ARMED: case NVDIMM_FWA_ARM_OVERFLOW: break; default: return -ENXIO; } rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); /* * Whether the command succeeded, or failed, the agent checking * for the result needs to query the DIMMs individually. * Increment the activation count to invalidate all the DIMM * states at once (it's otherwise not possible to take * acpi_desc->init_mutex in this context) */ acpi_desc->fwa_state = NVDIMM_FWA_INVALID; acpi_desc->fwa_count++; dev_dbg(acpi_desc->dev, "result: %d\n", rc); return rc; } static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = { .activate_state = intel_bus_fwa_state, .capability = intel_bus_fwa_capability, .activate = intel_bus_fwa_activate, }; const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops; static int intel_fwa_dimminfo(struct nvdimm *nvdimm, struct nd_intel_fw_activate_dimminfo *info) { struct { struct nd_cmd_pkg pkg; struct nd_intel_fw_activate_dimminfo cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_out = sizeof(struct nd_intel_fw_activate_dimminfo), .nd_fw_size = sizeof(struct nd_intel_fw_activate_dimminfo), }, }; int rc; rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); *info = nd_cmd.cmd; return rc; } static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; struct nd_intel_fw_activate_dimminfo info; int rc; /* * Similar to the bus state, since activate is synchronous the * busy state should resolve within the context of 'activate'. */ switch (nfit_mem->fwa_state) { case NVDIMM_FWA_INVALID: case NVDIMM_FWA_BUSY: break; default: /* If no activations occurred the old state is still valid */ if (nfit_mem->fwa_count == acpi_desc->fwa_count) return nfit_mem->fwa_state; } rc = intel_fwa_dimminfo(nvdimm, &info); if (rc) return NVDIMM_FWA_INVALID; switch (info.state) { case ND_INTEL_FWA_IDLE: nfit_mem->fwa_state = NVDIMM_FWA_IDLE; break; case ND_INTEL_FWA_BUSY: nfit_mem->fwa_state = NVDIMM_FWA_BUSY; break; case ND_INTEL_FWA_ARMED: nfit_mem->fwa_state = NVDIMM_FWA_ARMED; break; default: nfit_mem->fwa_state = NVDIMM_FWA_INVALID; break; } switch (info.result) { case ND_INTEL_DIMM_FWA_NONE: nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE; break; case ND_INTEL_DIMM_FWA_SUCCESS: nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS; break; case ND_INTEL_DIMM_FWA_NOTSTAGED: nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED; break; case ND_INTEL_DIMM_FWA_NEEDRESET: nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET; break; case ND_INTEL_DIMM_FWA_MEDIAFAILED: case ND_INTEL_DIMM_FWA_ABORT: case ND_INTEL_DIMM_FWA_NOTSUPP: case ND_INTEL_DIMM_FWA_ERROR: default: nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL; break; } nfit_mem->fwa_count = acpi_desc->fwa_count; return nfit_mem->fwa_state; } static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; if (nfit_mem->fwa_count == acpi_desc->fwa_count && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID) return nfit_mem->fwa_result; if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID) return nfit_mem->fwa_result; return NVDIMM_FWA_RESULT_INVALID; } static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; struct { struct nd_cmd_pkg pkg; struct nd_intel_fw_activate_arm cmd; } nd_cmd = { .pkg = { .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM, .nd_family = NVDIMM_FAMILY_INTEL, .nd_size_in = sizeof(nd_cmd.cmd.activate_arm), .nd_size_out = sizeof(struct nd_intel_fw_activate_arm), .nd_fw_size = sizeof(struct nd_intel_fw_activate_arm), }, .cmd = { .activate_arm = arm == NVDIMM_FWA_ARM ? ND_INTEL_DIMM_FWA_ARM : ND_INTEL_DIMM_FWA_DISARM, }, }; int rc; switch (intel_fwa_state(nvdimm)) { case NVDIMM_FWA_INVALID: return -ENXIO; case NVDIMM_FWA_BUSY: return -EBUSY; case NVDIMM_FWA_IDLE: if (arm == NVDIMM_FWA_DISARM) return 0; break; case NVDIMM_FWA_ARMED: if (arm == NVDIMM_FWA_ARM) return 0; break; default: return -ENXIO; } /* * Invalidate the bus-level state, now that we're committed to * changing the 'arm' state. */ acpi_desc->fwa_state = NVDIMM_FWA_INVALID; nfit_mem->fwa_state = NVDIMM_FWA_INVALID; rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM ? "arm" : "disarm", rc); return rc; } static const struct nvdimm_fw_ops __intel_fw_ops = { .activate_state = intel_fwa_state, .activate_result = intel_fwa_result, .arm = intel_fwa_arm, }; const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
linux-master
drivers/acpi/nfit/intel.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. */ #include <linux/list_sort.h> #include <linux/libnvdimm.h> #include <linux/module.h> #include <linux/nospec.h> #include <linux/mutex.h> #include <linux/ndctl.h> #include <linux/sysfs.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/acpi.h> #include <linux/sort.h> #include <linux/io.h> #include <linux/nd.h> #include <asm/cacheflush.h> #include <acpi/nfit.h> #include "intel.h" #include "nfit.h" /* * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is * irrelevant. */ #include <linux/io-64-nonatomic-hi-lo.h> static bool force_enable_dimms; module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); static bool disable_vendor_specific; module_param(disable_vendor_specific, bool, S_IRUGO); MODULE_PARM_DESC(disable_vendor_specific, "Limit commands to the publicly specified set"); static unsigned long override_dsm_mask; module_param(override_dsm_mask, ulong, S_IRUGO); MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); static int default_dsm_family = -1; module_param(default_dsm_family, int, S_IRUGO); MODULE_PARM_DESC(default_dsm_family, "Try this DSM type first when identifying NVDIMM family"); static bool no_init_ars; module_param(no_init_ars, bool, 0644); MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); static bool force_labels; module_param(force_labels, bool, 0444); MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods"); LIST_HEAD(acpi_descs); DEFINE_MUTEX(acpi_desc_lock); static struct workqueue_struct *nfit_wq; struct nfit_table_prev { struct list_head spas; struct list_head memdevs; struct list_head dcrs; struct list_head bdws; struct list_head idts; struct list_head flushes; }; static guid_t nfit_uuid[NFIT_UUID_MAX]; const guid_t *to_nfit_uuid(enum nfit_uuids id) { return &nfit_uuid[id]; } EXPORT_SYMBOL(to_nfit_uuid); static const guid_t *to_nfit_bus_uuid(int family) { if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT, "only secondary bus families can be translated\n")) return NULL; /* * The index of bus UUIDs starts immediately following the last * NVDIMM/leaf family. */ return to_nfit_uuid(family + NVDIMM_FAMILY_MAX); } static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; /* * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct * acpi_device. */ if (!nd_desc->provider_name || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) return NULL; return to_acpi_device(acpi_desc->dev); } static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) { struct nd_cmd_clear_error *clear_err; struct nd_cmd_ars_status *ars_status; u16 flags; switch (cmd) { case ND_CMD_ARS_CAP: if ((status & 0xffff) == NFIT_ARS_CAP_NONE) return -ENOTTY; /* Command failed */ if (status & 0xffff) return -EIO; /* No supported scan types for this range */ flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; if ((status >> 16 & flags) == 0) return -ENOTTY; return 0; case ND_CMD_ARS_START: /* ARS is in progress */ if ((status & 0xffff) == NFIT_ARS_START_BUSY) return -EBUSY; /* Command failed */ if (status & 0xffff) return -EIO; return 0; case ND_CMD_ARS_STATUS: ars_status = buf; /* Command failed */ if (status & 0xffff) return -EIO; /* Check extended status (Upper two bytes) */ if (status == NFIT_ARS_STATUS_DONE) return 0; /* ARS is in progress */ if (status == NFIT_ARS_STATUS_BUSY) return -EBUSY; /* No ARS performed for the current boot */ if (status == NFIT_ARS_STATUS_NONE) return -EAGAIN; /* * ARS interrupted, either we overflowed or some other * agent wants the scan to stop. If we didn't overflow * then just continue with the returned results. */ if (status == NFIT_ARS_STATUS_INTR) { if (ars_status->out_length >= 40 && (ars_status->flags & NFIT_ARS_F_OVERFLOW)) return -ENOSPC; return 0; } /* Unknown status */ if (status >> 16) return -EIO; return 0; case ND_CMD_CLEAR_ERROR: clear_err = buf; if (status & 0xffff) return -EIO; if (!clear_err->cleared) return -EIO; if (clear_err->length > clear_err->cleared) return clear_err->cleared; return 0; default: break; } /* all other non-zero status results in an error */ if (status) return -EIO; return 0; } #define ACPI_LABELS_LOCKED 3 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, u32 status) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); switch (cmd) { case ND_CMD_GET_CONFIG_SIZE: /* * In the _LSI, _LSR, _LSW case the locked status is * communicated via the read/write commands */ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) break; if (status >> 16 & ND_CONFIG_LOCKED) return -EACCES; break; case ND_CMD_GET_CONFIG_DATA: if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) && status == ACPI_LABELS_LOCKED) return -EACCES; break; case ND_CMD_SET_CONFIG_DATA: if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags) && status == ACPI_LABELS_LOCKED) return -EACCES; break; default: break; } /* all other non-zero status results in an error */ if (status) return -EIO; return 0; } static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, u32 status) { if (!nvdimm) return xlat_bus_status(buf, cmd, status); return xlat_nvdimm_status(nvdimm, buf, cmd, status); } /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */ static union acpi_object *pkg_to_buf(union acpi_object *pkg) { int i; void *dst; size_t size = 0; union acpi_object *buf = NULL; if (pkg->type != ACPI_TYPE_PACKAGE) { WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", pkg->type); goto err; } for (i = 0; i < pkg->package.count; i++) { union acpi_object *obj = &pkg->package.elements[i]; if (obj->type == ACPI_TYPE_INTEGER) size += 4; else if (obj->type == ACPI_TYPE_BUFFER) size += obj->buffer.length; else { WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", obj->type); goto err; } } buf = ACPI_ALLOCATE(sizeof(*buf) + size); if (!buf) goto err; dst = buf + 1; buf->type = ACPI_TYPE_BUFFER; buf->buffer.length = size; buf->buffer.pointer = dst; for (i = 0; i < pkg->package.count; i++) { union acpi_object *obj = &pkg->package.elements[i]; if (obj->type == ACPI_TYPE_INTEGER) { memcpy(dst, &obj->integer.value, 4); dst += 4; } else if (obj->type == ACPI_TYPE_BUFFER) { memcpy(dst, obj->buffer.pointer, obj->buffer.length); dst += obj->buffer.length; } } err: ACPI_FREE(pkg); return buf; } static union acpi_object *int_to_buf(union acpi_object *integer) { union acpi_object *buf = NULL; void *dst = NULL; if (integer->type != ACPI_TYPE_INTEGER) { WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n", integer->type); goto err; } buf = ACPI_ALLOCATE(sizeof(*buf) + 4); if (!buf) goto err; dst = buf + 1; buf->type = ACPI_TYPE_BUFFER; buf->buffer.length = 4; buf->buffer.pointer = dst; memcpy(dst, &integer->integer.value, 4); err: ACPI_FREE(integer); return buf; } static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset, u32 len, void *data) { acpi_status rc; struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input = { .count = 3, .pointer = (union acpi_object []) { [0] = { .integer.type = ACPI_TYPE_INTEGER, .integer.value = offset, }, [1] = { .integer.type = ACPI_TYPE_INTEGER, .integer.value = len, }, [2] = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.pointer = data, .buffer.length = len, }, }, }; rc = acpi_evaluate_object(handle, "_LSW", &input, &buf); if (ACPI_FAILURE(rc)) return NULL; return int_to_buf(buf.pointer); } static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset, u32 len) { acpi_status rc; struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input = { .count = 2, .pointer = (union acpi_object []) { [0] = { .integer.type = ACPI_TYPE_INTEGER, .integer.value = offset, }, [1] = { .integer.type = ACPI_TYPE_INTEGER, .integer.value = len, }, }, }; rc = acpi_evaluate_object(handle, "_LSR", &input, &buf); if (ACPI_FAILURE(rc)) return NULL; return pkg_to_buf(buf.pointer); } static union acpi_object *acpi_label_info(acpi_handle handle) { acpi_status rc; struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf); if (ACPI_FAILURE(rc)) return NULL; return pkg_to_buf(buf.pointer); } static u8 nfit_dsm_revid(unsigned family, unsigned func) { static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = { [NVDIMM_FAMILY_INTEL] = { [NVDIMM_INTEL_GET_MODES ... NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2, }, }; u8 id; if (family > NVDIMM_FAMILY_MAX) return 0; if (func > NVDIMM_CMD_MAX) return 0; id = revid_table[family][func]; if (id == 0) return 1; /* default */ return id; } static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL && func >= NVDIMM_INTEL_GET_SECURITY_STATE && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE) return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG); return true; } static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, struct nd_cmd_pkg *call_pkg, int *family) { if (call_pkg) { int i; if (nfit_mem && nfit_mem->family != call_pkg->nd_family) return -ENOTTY; for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) if (call_pkg->nd_reserved2[i]) return -EINVAL; *family = call_pkg->nd_family; return call_pkg->nd_command; } /* In the !call_pkg case, bus commands == bus functions */ if (!nfit_mem) return cmd; /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ if (nfit_mem->family == NVDIMM_FAMILY_INTEL) return cmd; /* * Force function number validation to fail since 0 is never * published as a valid function in dsm_mask. */ return 0; } int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); union acpi_object in_obj, in_buf, *out_obj; const struct nd_cmd_desc *desc = NULL; struct device *dev = acpi_desc->dev; struct nd_cmd_pkg *call_pkg = NULL; const char *cmd_name, *dimm_name; unsigned long cmd_mask, dsm_mask; u32 offset, fw_status = 0; acpi_handle handle; const guid_t *guid; int func, rc, i; int family = 0; if (cmd_rc) *cmd_rc = -EINVAL; if (cmd == ND_CMD_CALL) call_pkg = buf; func = cmd_to_func(nfit_mem, cmd, call_pkg, &family); if (func < 0) return func; if (nvdimm) { struct acpi_device *adev = nfit_mem->adev; if (!adev) return -ENOTTY; dimm_name = nvdimm_name(nvdimm); cmd_name = nvdimm_cmd_name(cmd); cmd_mask = nvdimm_cmd_mask(nvdimm); dsm_mask = nfit_mem->dsm_mask; desc = nd_cmd_dimm_desc(cmd); guid = to_nfit_uuid(nfit_mem->family); handle = adev->handle; } else { struct acpi_device *adev = to_acpi_dev(acpi_desc); cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; if (cmd == ND_CMD_CALL && call_pkg->nd_family) { family = call_pkg->nd_family; if (family > NVDIMM_BUS_FAMILY_MAX || !test_bit(family, &nd_desc->bus_family_mask)) return -EINVAL; family = array_index_nospec(family, NVDIMM_BUS_FAMILY_MAX + 1); dsm_mask = acpi_desc->family_dsm_mask[family]; guid = to_nfit_bus_uuid(family); } else { dsm_mask = acpi_desc->bus_dsm_mask; guid = to_nfit_uuid(NFIT_DEV_BUS); } desc = nd_cmd_bus_desc(cmd); handle = adev->handle; dimm_name = "bus"; } if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) return -ENOTTY; /* * Check for a valid command. For ND_CMD_CALL, we also have to * make sure that the DSM function is supported. */ if (cmd == ND_CMD_CALL && (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask))) return -ENOTTY; else if (!test_bit(cmd, &cmd_mask)) return -ENOTTY; in_obj.type = ACPI_TYPE_PACKAGE; in_obj.package.count = 1; in_obj.package.elements = &in_buf; in_buf.type = ACPI_TYPE_BUFFER; in_buf.buffer.pointer = buf; in_buf.buffer.length = 0; /* libnvdimm has already validated the input envelope */ for (i = 0; i < desc->in_num; i++) in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, i, buf); if (call_pkg) { /* skip over package wrapper */ in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; in_buf.buffer.length = call_pkg->nd_size_in; } dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n", dimm_name, cmd, family, func, in_buf.buffer.length); if (payload_dumpable(nvdimm, func)) print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, in_buf.buffer.pointer, min_t(u32, 256, in_buf.buffer.length), true); /* call the BIOS, prefer the named methods over _DSM if available */ if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) out_obj = acpi_label_info(handle); else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { struct nd_cmd_get_config_data_hdr *p = buf; out_obj = acpi_label_read(handle, p->in_offset, p->in_length); } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) { struct nd_cmd_set_config_hdr *p = buf; out_obj = acpi_label_write(handle, p->in_offset, p->in_length, p->in_buf); } else { u8 revid; if (nvdimm) revid = nfit_dsm_revid(nfit_mem->family, func); else revid = 1; out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); } if (!out_obj) { dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name); return -EINVAL; } if (out_obj->type != ACPI_TYPE_BUFFER) { dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", dimm_name, cmd_name, out_obj->type); rc = -EINVAL; goto out; } dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, cmd_name, out_obj->buffer.length); print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, out_obj->buffer.pointer, min_t(u32, 128, out_obj->buffer.length), true); if (call_pkg) { call_pkg->nd_fw_size = out_obj->buffer.length; memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, out_obj->buffer.pointer, min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); ACPI_FREE(out_obj); /* * Need to support FW function w/o known size in advance. * Caller can determine required size based upon nd_fw_size. * If we return an error (like elsewhere) then caller wouldn't * be able to rely upon data returned to make calculation. */ if (cmd_rc) *cmd_rc = 0; return 0; } for (i = 0, offset = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, (u32 *) out_obj->buffer.pointer, out_obj->buffer.length - offset); if (offset + out_size > out_obj->buffer.length) { dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n", dimm_name, cmd_name, i); break; } if (in_buf.buffer.length + offset + out_size > buf_len) { dev_dbg(dev, "%s output overrun cmd: %s field: %d\n", dimm_name, cmd_name, i); rc = -ENXIO; goto out; } memcpy(buf + in_buf.buffer.length + offset, out_obj->buffer.pointer + offset, out_size); offset += out_size; } /* * Set fw_status for all the commands with a known format to be * later interpreted by xlat_status(). */ if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR) || (nvdimm && cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR))) fw_status = *(u32 *) out_obj->buffer.pointer; if (offset + in_buf.buffer.length < buf_len) { if (i >= 1) { /* * status valid, return the number of bytes left * unfilled in the output buffer */ rc = buf_len - offset - in_buf.buffer.length; if (cmd_rc) *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); } else { dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", __func__, dimm_name, cmd_name, buf_len, offset); rc = -ENXIO; } } else { rc = 0; if (cmd_rc) *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status); } out: ACPI_FREE(out_obj); return rc; } EXPORT_SYMBOL_GPL(acpi_nfit_ctl); static const char *spa_type_name(u16 type) { static const char *to_name[] = { [NFIT_SPA_VOLATILE] = "volatile", [NFIT_SPA_PM] = "pmem", [NFIT_SPA_DCR] = "dimm-control-region", [NFIT_SPA_BDW] = "block-data-window", [NFIT_SPA_VDISK] = "volatile-disk", [NFIT_SPA_VCD] = "volatile-cd", [NFIT_SPA_PDISK] = "persistent-disk", [NFIT_SPA_PCD] = "persistent-cd", }; if (type > NFIT_SPA_PCD) return "unknown"; return to_name[type]; } int nfit_spa_type(struct acpi_nfit_system_address *spa) { guid_t guid; int i; import_guid(&guid, spa->range_guid); for (i = 0; i < NFIT_UUID_MAX; i++) if (guid_equal(to_nfit_uuid(i), &guid)) return i; return -1; } static size_t sizeof_spa(struct acpi_nfit_system_address *spa) { if (spa->flags & ACPI_NFIT_LOCATION_COOKIE_VALID) return sizeof(*spa); return sizeof(*spa) - 8; } static bool add_spa(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_system_address *spa) { struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; if (spa->header.length != sizeof_spa(spa)) return false; list_for_each_entry(nfit_spa, &prev->spas, list) { if (memcmp(nfit_spa->spa, spa, sizeof_spa(spa)) == 0) { list_move_tail(&nfit_spa->list, &acpi_desc->spas); return true; } } nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof_spa(spa), GFP_KERNEL); if (!nfit_spa) return false; INIT_LIST_HEAD(&nfit_spa->list); memcpy(nfit_spa->spa, spa, sizeof_spa(spa)); list_add_tail(&nfit_spa->list, &acpi_desc->spas); dev_dbg(dev, "spa index: %d type: %s\n", spa->range_index, spa_type_name(nfit_spa_type(spa))); return true; } static bool add_memdev(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_memory_map *memdev) { struct device *dev = acpi_desc->dev; struct nfit_memdev *nfit_memdev; if (memdev->header.length != sizeof(*memdev)) return false; list_for_each_entry(nfit_memdev, &prev->memdevs, list) if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); return true; } nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), GFP_KERNEL); if (!nfit_memdev) return false; INIT_LIST_HEAD(&nfit_memdev->list); memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n", memdev->device_handle, memdev->range_index, memdev->region_index, memdev->flags); return true; } int nfit_get_smbios_id(u32 device_handle, u16 *flags) { struct acpi_nfit_memory_map *memdev; struct acpi_nfit_desc *acpi_desc; struct nfit_mem *nfit_mem; u16 physical_id; mutex_lock(&acpi_desc_lock); list_for_each_entry(acpi_desc, &acpi_descs, list) { mutex_lock(&acpi_desc->init_mutex); list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { memdev = __to_nfit_memdev(nfit_mem); if (memdev->device_handle == device_handle) { *flags = memdev->flags; physical_id = memdev->physical_id; mutex_unlock(&acpi_desc->init_mutex); mutex_unlock(&acpi_desc_lock); return physical_id; } } mutex_unlock(&acpi_desc->init_mutex); } mutex_unlock(&acpi_desc_lock); return -ENODEV; } EXPORT_SYMBOL_GPL(nfit_get_smbios_id); /* * An implementation may provide a truncated control region if no block windows * are defined. */ static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) { if (dcr->header.length < offsetof(struct acpi_nfit_control_region, window_size)) return 0; if (dcr->windows) return sizeof(*dcr); return offsetof(struct acpi_nfit_control_region, window_size); } static bool add_dcr(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_control_region *dcr) { struct device *dev = acpi_desc->dev; struct nfit_dcr *nfit_dcr; if (!sizeof_dcr(dcr)) return false; list_for_each_entry(nfit_dcr, &prev->dcrs, list) if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); return true; } nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), GFP_KERNEL); if (!nfit_dcr) return false; INIT_LIST_HEAD(&nfit_dcr->list); memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); dev_dbg(dev, "dcr index: %d windows: %d\n", dcr->region_index, dcr->windows); return true; } static bool add_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_data_region *bdw) { struct device *dev = acpi_desc->dev; struct nfit_bdw *nfit_bdw; if (bdw->header.length != sizeof(*bdw)) return false; list_for_each_entry(nfit_bdw, &prev->bdws, list) if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); return true; } nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), GFP_KERNEL); if (!nfit_bdw) return false; INIT_LIST_HEAD(&nfit_bdw->list); memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); dev_dbg(dev, "bdw dcr: %d windows: %d\n", bdw->region_index, bdw->windows); return true; } static size_t sizeof_idt(struct acpi_nfit_interleave *idt) { if (idt->header.length < sizeof(*idt)) return 0; return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); } static bool add_idt(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_interleave *idt) { struct device *dev = acpi_desc->dev; struct nfit_idt *nfit_idt; if (!sizeof_idt(idt)) return false; list_for_each_entry(nfit_idt, &prev->idts, list) { if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) continue; if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { list_move_tail(&nfit_idt->list, &acpi_desc->idts); return true; } } nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), GFP_KERNEL); if (!nfit_idt) return false; INIT_LIST_HEAD(&nfit_idt->list); memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); list_add_tail(&nfit_idt->list, &acpi_desc->idts); dev_dbg(dev, "idt index: %d num_lines: %d\n", idt->interleave_index, idt->line_count); return true; } static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) { if (flush->header.length < sizeof(*flush)) return 0; return struct_size(flush, hint_address, flush->hint_count); } static bool add_flush(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_flush_address *flush) { struct device *dev = acpi_desc->dev; struct nfit_flush *nfit_flush; if (!sizeof_flush(flush)) return false; list_for_each_entry(nfit_flush, &prev->flushes, list) { if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) continue; if (memcmp(nfit_flush->flush, flush, sizeof_flush(flush)) == 0) { list_move_tail(&nfit_flush->list, &acpi_desc->flushes); return true; } } nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) + sizeof_flush(flush), GFP_KERNEL); if (!nfit_flush) return false; INIT_LIST_HEAD(&nfit_flush->list); memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); list_add_tail(&nfit_flush->list, &acpi_desc->flushes); dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n", flush->device_handle, flush->hint_count); return true; } static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_capabilities *pcap) { struct device *dev = acpi_desc->dev; u32 mask; mask = (1 << (pcap->highest_capability + 1)) - 1; acpi_desc->platform_cap = pcap->capabilities & mask; dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap); return true; } static void *add_table(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, void *table, const void *end) { struct device *dev = acpi_desc->dev; struct acpi_nfit_header *hdr; void *err = ERR_PTR(-ENOMEM); if (table >= end) return NULL; hdr = table; if (!hdr->length) { dev_warn(dev, "found a zero length table '%d' parsing nfit\n", hdr->type); return NULL; } switch (hdr->type) { case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: if (!add_spa(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_MEMORY_MAP: if (!add_memdev(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_CONTROL_REGION: if (!add_dcr(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_DATA_REGION: if (!add_bdw(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_INTERLEAVE: if (!add_idt(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_FLUSH_ADDRESS: if (!add_flush(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_SMBIOS: dev_dbg(dev, "smbios\n"); break; case ACPI_NFIT_TYPE_CAPABILITIES: if (!add_platform_cap(acpi_desc, table)) return err; break; default: dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); break; } return table + hdr->length; } static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa) { struct nfit_mem *nfit_mem, *found; struct nfit_memdev *nfit_memdev; int type = spa ? nfit_spa_type(spa) : 0; switch (type) { case NFIT_SPA_DCR: case NFIT_SPA_PM: break; default: if (spa) return 0; } /* * This loop runs in two modes, when a dimm is mapped the loop * adds memdev associations to an existing dimm, or creates a * dimm. In the unmapped dimm case this loop sweeps for memdev * instances with an invalid / zero range_index and adds those * dimms without spa associations. */ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { struct nfit_flush *nfit_flush; struct nfit_dcr *nfit_dcr; u32 device_handle; u16 dcr; if (spa && nfit_memdev->memdev->range_index != spa->range_index) continue; if (!spa && nfit_memdev->memdev->range_index) continue; found = NULL; dcr = nfit_memdev->memdev->region_index; device_handle = nfit_memdev->memdev->device_handle; list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) { found = nfit_mem; break; } if (found) nfit_mem = found; else { nfit_mem = devm_kzalloc(acpi_desc->dev, sizeof(*nfit_mem), GFP_KERNEL); if (!nfit_mem) return -ENOMEM; INIT_LIST_HEAD(&nfit_mem->list); nfit_mem->acpi_desc = acpi_desc; list_add(&nfit_mem->list, &acpi_desc->dimms); } list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { if (nfit_dcr->dcr->region_index != dcr) continue; /* * Record the control region for the dimm. For * the ACPI 6.1 case, where there are separate * control regions for the pmem vs blk * interfaces, be sure to record the extended * blk details. */ if (!nfit_mem->dcr) nfit_mem->dcr = nfit_dcr->dcr; else if (nfit_mem->dcr->windows == 0 && nfit_dcr->dcr->windows) nfit_mem->dcr = nfit_dcr->dcr; break; } list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { struct acpi_nfit_flush_address *flush; u16 i; if (nfit_flush->flush->device_handle != device_handle) continue; nfit_mem->nfit_flush = nfit_flush; flush = nfit_flush->flush; nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev, flush->hint_count, sizeof(struct resource), GFP_KERNEL); if (!nfit_mem->flush_wpq) return -ENOMEM; for (i = 0; i < flush->hint_count; i++) { struct resource *res = &nfit_mem->flush_wpq[i]; res->start = flush->hint_address[i]; res->end = res->start + 8 - 1; } break; } if (dcr && !nfit_mem->dcr) { dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", spa->range_index, dcr); return -ENODEV; } if (type == NFIT_SPA_DCR) { struct nfit_idt *nfit_idt; u16 idt_idx; /* multiple dimms may share a SPA when interleaved */ nfit_mem->spa_dcr = spa; nfit_mem->memdev_dcr = nfit_memdev->memdev; idt_idx = nfit_memdev->memdev->interleave_index; list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { if (nfit_idt->idt->interleave_index != idt_idx) continue; nfit_mem->idt_dcr = nfit_idt->idt; break; } } else if (type == NFIT_SPA_PM) { /* * A single dimm may belong to multiple SPA-PM * ranges, record at least one in addition to * any SPA-DCR range. */ nfit_mem->memdev_pmem = nfit_memdev->memdev; } else nfit_mem->memdev_dcr = nfit_memdev->memdev; } return 0; } static int nfit_mem_cmp(void *priv, const struct list_head *_a, const struct list_head *_b) { struct nfit_mem *a = container_of(_a, typeof(*a), list); struct nfit_mem *b = container_of(_b, typeof(*b), list); u32 handleA, handleB; handleA = __to_nfit_memdev(a)->device_handle; handleB = __to_nfit_memdev(b)->device_handle; if (handleA < handleB) return -1; else if (handleA > handleB) return 1; return 0; } static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; int rc; /* * For each SPA-DCR or SPA-PMEM address range find its * corresponding MEMDEV(s). From each MEMDEV find the * corresponding DCR. Then, if we're operating on a SPA-DCR, * try to find a SPA-BDW and a corresponding BDW that references * the DCR. Throw it all into an nfit_mem object. Note, that * BDWs are optional. */ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); if (rc) return rc; } /* * If a DIMM has failed to be mapped into SPA there will be no * SPA entries above. Find and register all the unmapped DIMMs * for reporting and recovery purposes. */ rc = __nfit_mem_init(acpi_desc, NULL); if (rc) return rc; list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); return 0; } static ssize_t bus_dsm_mask_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask); } static struct device_attribute dev_attr_bus_dsm_mask = __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); } static DEVICE_ATTR_RO(revision); static ssize_t hw_error_scrub_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); return sprintf(buf, "%d\n", acpi_desc->scrub_mode); } /* * The 'hw_error_scrub' attribute can have the following values written to it: * '0': Switch to the default mode where an exception will only insert * the address of the memory error into the poison and badblocks lists. * '1': Enable a full scrub to happen if an exception for a memory error is * received. */ static ssize_t hw_error_scrub_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct nvdimm_bus_descriptor *nd_desc; ssize_t rc; long val; rc = kstrtol(buf, 0, &val); if (rc) return rc; device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); switch (val) { case HW_ERROR_SCRUB_ON: acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON; break; case HW_ERROR_SCRUB_OFF: acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF; break; default: rc = -EINVAL; break; } } device_unlock(dev); if (rc) return rc; return size; } static DEVICE_ATTR_RW(hw_error_scrub); /* * This shows the number of full Address Range Scrubs that have been * completed since driver load time. Userspace can wait on this using * select/poll etc. A '+' at the end indicates an ARS is in progress */ static ssize_t scrub_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus_descriptor *nd_desc; struct acpi_nfit_desc *acpi_desc; ssize_t rc = -ENXIO; bool busy; device_lock(dev); nd_desc = dev_get_drvdata(dev); if (!nd_desc) { device_unlock(dev); return rc; } acpi_desc = to_acpi_desc(nd_desc); mutex_lock(&acpi_desc->init_mutex); busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); /* Allow an admin to poll the busy state at a higher rate */ if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL, &acpi_desc->scrub_flags)) { acpi_desc->scrub_tmo = 1; mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ); } mutex_unlock(&acpi_desc->init_mutex); device_unlock(dev); return rc; } static ssize_t scrub_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct nvdimm_bus_descriptor *nd_desc; ssize_t rc; long val; rc = kstrtol(buf, 0, &val); if (rc) return rc; if (val != 1) return -EINVAL; device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); } device_unlock(dev); if (rc) return rc; return size; } static DEVICE_ATTR_RW(scrub); static bool ars_supported(struct nvdimm_bus *nvdimm_bus) { struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START | 1 << ND_CMD_ARS_STATUS; return (nd_desc->cmd_mask & mask) == mask; } static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); if (a == &dev_attr_scrub.attr) return ars_supported(nvdimm_bus) ? a->mode : 0; if (a == &dev_attr_firmware_activate_noidle.attr) return intel_fwa_supported(nvdimm_bus) ? a->mode : 0; return a->mode; } static struct attribute *acpi_nfit_attributes[] = { &dev_attr_revision.attr, &dev_attr_scrub.attr, &dev_attr_hw_error_scrub.attr, &dev_attr_bus_dsm_mask.attr, &dev_attr_firmware_activate_noidle.attr, NULL, }; static const struct attribute_group acpi_nfit_attribute_group = { .name = "nfit", .attrs = acpi_nfit_attributes, .is_visible = nfit_visible, }; static const struct attribute_group *acpi_nfit_attribute_groups[] = { &acpi_nfit_attribute_group, NULL, }; static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); return __to_nfit_memdev(nfit_mem); } static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); return nfit_mem->dcr; } static ssize_t handle_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); return sprintf(buf, "%#x\n", memdev->device_handle); } static DEVICE_ATTR_RO(handle); static ssize_t phys_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); return sprintf(buf, "%#x\n", memdev->physical_id); } static DEVICE_ATTR_RO(phys_id); static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); } static DEVICE_ATTR_RO(vendor); static ssize_t rev_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); } static DEVICE_ATTR_RO(rev_id); static ssize_t device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); } static DEVICE_ATTR_RO(device); static ssize_t subsystem_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); } static DEVICE_ATTR_RO(subsystem_vendor); static ssize_t subsystem_rev_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_revision_id)); } static DEVICE_ATTR_RO(subsystem_rev_id); static ssize_t subsystem_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); } static DEVICE_ATTR_RO(subsystem_device); static int num_nvdimm_formats(struct nvdimm *nvdimm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); int formats = 0; if (nfit_mem->memdev_pmem) formats++; return formats; } static ssize_t format_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); } static DEVICE_ATTR_RO(format); static ssize_t format1_show(struct device *dev, struct device_attribute *attr, char *buf) { u32 handle; ssize_t rc = -ENXIO; struct nfit_mem *nfit_mem; struct nfit_memdev *nfit_memdev; struct acpi_nfit_desc *acpi_desc; struct nvdimm *nvdimm = to_nvdimm(dev); struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); nfit_mem = nvdimm_provider_data(nvdimm); acpi_desc = nfit_mem->acpi_desc; handle = to_nfit_memdev(dev)->device_handle; /* assumes DIMMs have at most 2 published interface codes */ mutex_lock(&acpi_desc->init_mutex); list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; struct nfit_dcr *nfit_dcr; if (memdev->device_handle != handle) continue; list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { if (nfit_dcr->dcr->region_index != memdev->region_index) continue; if (nfit_dcr->dcr->code == dcr->code) continue; rc = sprintf(buf, "0x%04x\n", le16_to_cpu(nfit_dcr->dcr->code)); break; } if (rc != -ENXIO) break; } mutex_unlock(&acpi_desc->init_mutex); return rc; } static DEVICE_ATTR_RO(format1); static ssize_t formats_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); } static DEVICE_ATTR_RO(formats); static ssize_t serial_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); } static DEVICE_ATTR_RO(serial); static ssize_t family_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); if (nfit_mem->family < 0) return -ENXIO; return sprintf(buf, "%d\n", nfit_mem->family); } static DEVICE_ATTR_RO(family); static ssize_t dsm_mask_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); if (nfit_mem->family < 0) return -ENXIO; return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); } static DEVICE_ATTR_RO(dsm_mask); static ssize_t flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); u16 flags = __to_nfit_memdev(nfit_mem)->flags; if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags)) flags |= ACPI_NFIT_MEM_FLUSH_FAILED; return sprintf(buf, "%s%s%s%s%s%s%s\n", flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); } static DEVICE_ATTR_RO(flags); static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); return sprintf(buf, "%s\n", nfit_mem->id); } static DEVICE_ATTR_RO(id); static ssize_t dirty_shutdown_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown); } static DEVICE_ATTR_RO(dirty_shutdown); static struct attribute *acpi_nfit_dimm_attributes[] = { &dev_attr_handle.attr, &dev_attr_phys_id.attr, &dev_attr_vendor.attr, &dev_attr_device.attr, &dev_attr_rev_id.attr, &dev_attr_subsystem_vendor.attr, &dev_attr_subsystem_device.attr, &dev_attr_subsystem_rev_id.attr, &dev_attr_format.attr, &dev_attr_formats.attr, &dev_attr_format1.attr, &dev_attr_serial.attr, &dev_attr_flags.attr, &dev_attr_id.attr, &dev_attr_family.attr, &dev_attr_dsm_mask.attr, &dev_attr_dirty_shutdown.attr, NULL, }; static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); if (!to_nfit_dcr(dev)) { /* Without a dcr only the memdev attributes can be surfaced */ if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr || a == &dev_attr_flags.attr || a == &dev_attr_family.attr || a == &dev_attr_dsm_mask.attr) return a->mode; return 0; } if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) return 0; if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags) && a == &dev_attr_dirty_shutdown.attr) return 0; return a->mode; } static const struct attribute_group acpi_nfit_dimm_attribute_group = { .name = "nfit", .attrs = acpi_nfit_dimm_attributes, .is_visible = acpi_nfit_dimm_attr_visible, }; static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { &acpi_nfit_dimm_attribute_group, NULL, }; static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, u32 device_handle) { struct nfit_mem *nfit_mem; list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) return nfit_mem->nvdimm; return NULL; } void __acpi_nvdimm_notify(struct device *dev, u32 event) { struct nfit_mem *nfit_mem; struct acpi_nfit_desc *acpi_desc; dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev), event); if (event != NFIT_NOTIFY_DIMM_HEALTH) { dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev), event); return; } acpi_desc = dev_get_drvdata(dev->parent); if (!acpi_desc) return; /* * If we successfully retrieved acpi_desc, then we know nfit_mem data * is still valid. */ nfit_mem = dev_get_drvdata(dev); if (nfit_mem && nfit_mem->flags_attr) sysfs_notify_dirent(nfit_mem->flags_attr); } EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify); static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) { struct acpi_device *adev = data; struct device *dev = &adev->dev; device_lock(dev->parent); __acpi_nvdimm_notify(dev, event); device_unlock(dev->parent); } static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) { acpi_handle handle; acpi_status status; status = acpi_get_handle(adev->handle, method, &handle); if (ACPI_SUCCESS(status)) return true; return false; } __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) { struct device *dev = &nfit_mem->adev->dev; struct nd_intel_smart smart = { 0 }; union acpi_object in_buf = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = 0, }; union acpi_object in_obj = { .package.type = ACPI_TYPE_PACKAGE, .package.count = 1, .package.elements = &in_buf, }; const u8 func = ND_INTEL_SMART; const guid_t *guid = to_nfit_uuid(nfit_mem->family); u8 revid = nfit_dsm_revid(nfit_mem->family, func); struct acpi_device *adev = nfit_mem->adev; acpi_handle handle = adev->handle; union acpi_object *out_obj; if ((nfit_mem->dsm_mask & (1 << func)) == 0) return; out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER || out_obj->buffer.length < sizeof(smart)) { dev_dbg(dev->parent, "%s: failed to retrieve initial health\n", dev_name(dev)); ACPI_FREE(out_obj); return; } memcpy(&smart, out_obj->buffer.pointer, sizeof(smart)); ACPI_FREE(out_obj); if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { if (smart.shutdown_state) set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags); } if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) { set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags); nfit_mem->dirty_shutdown = smart.shutdown_count; } } static void populate_shutdown_status(struct nfit_mem *nfit_mem) { /* * For DIMMs that provide a dynamic facility to retrieve a * dirty-shutdown status and/or a dirty-shutdown count, cache * these values in nfit_mem. */ if (nfit_mem->family == NVDIMM_FAMILY_INTEL) nfit_intel_shutdown_status(nfit_mem); } static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, u32 device_handle) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct acpi_device *adev, *adev_dimm; struct device *dev = acpi_desc->dev; unsigned long dsm_mask, label_mask; const guid_t *guid; int i; int family = -1; struct acpi_nfit_control_region *dcr = nfit_mem->dcr; /* nfit test assumes 1:1 relationship between commands and dsms */ nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; nfit_mem->family = NVDIMM_FAMILY_INTEL; set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x", be16_to_cpu(dcr->vendor_id), dcr->manufacturing_location, be16_to_cpu(dcr->manufacturing_date), be32_to_cpu(dcr->serial_number)); else sprintf(nfit_mem->id, "%04x-%08x", be16_to_cpu(dcr->vendor_id), be32_to_cpu(dcr->serial_number)); adev = to_acpi_dev(acpi_desc); if (!adev) { /* unit test case */ populate_shutdown_status(nfit_mem); return 0; } adev_dimm = acpi_find_child_device(adev, device_handle, false); nfit_mem->adev = adev_dimm; if (!adev_dimm) { dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", device_handle); return force_enable_dimms ? 0 : -ENODEV; } if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle, ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) { dev_err(dev, "%s: notification registration failed\n", dev_name(&adev_dimm->dev)); return -ENXIO; } /* * Record nfit_mem for the notification path to track back to * the nfit sysfs attributes for this dimm device object. */ dev_set_drvdata(&adev_dimm->dev, nfit_mem); /* * There are 4 "legacy" NVDIMM command sets * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before * an EFI working group was established to constrain this * proliferation. The nfit driver probes for the supported command * set by GUID. Note, if you're a platform developer looking to add * a new command set to this probe, consider using an existing set, * or otherwise seek approval to publish the command set at * http://www.uefi.org/RFIC_LIST. * * Note, that checking for function0 (bit0) tells us if any commands * are reachable through this GUID. */ clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) { set_bit(i, &nd_desc->dimm_family_mask); if (family < 0 || i == default_dsm_family) family = i; } /* limit the supported commands to those that are publicly documented */ nfit_mem->family = family; if (override_dsm_mask && !disable_vendor_specific) dsm_mask = override_dsm_mask; else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { dsm_mask = NVDIMM_INTEL_CMDMASK; if (disable_vendor_specific) dsm_mask &= ~(1 << ND_CMD_VENDOR); } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { dsm_mask = 0x1c3c76; } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { dsm_mask = 0x1fe; if (disable_vendor_specific) dsm_mask &= ~(1 << 8); } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { dsm_mask = 0xffffffff; } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) { dsm_mask = 0x1f; } else { dev_dbg(dev, "unknown dimm command family\n"); nfit_mem->family = -1; /* DSMs are optional, continue loading the driver... */ return 0; } /* * Function 0 is the command interrogation function, don't * export it to potential userspace use, and enable it to be * used as an error value in acpi_nfit_ctl(). */ dsm_mask &= ~1UL; guid = to_nfit_uuid(nfit_mem->family); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev_dimm->handle, guid, nfit_dsm_revid(nfit_mem->family, i), 1ULL << i)) set_bit(i, &nfit_mem->dsm_mask); /* * Prefer the NVDIMM_FAMILY_INTEL label read commands if present * due to their better semantics handling locked capacity. */ label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA | 1 << ND_CMD_SET_CONFIG_DATA; if (family == NVDIMM_FAMILY_INTEL && (dsm_mask & label_mask) == label_mask) /* skip _LS{I,R,W} enabling */; else { if (acpi_nvdimm_has_method(adev_dimm, "_LSI") && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); set_bit(NFIT_MEM_LSR, &nfit_mem->flags); } if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); set_bit(NFIT_MEM_LSW, &nfit_mem->flags); } /* * Quirk read-only label configurations to preserve * access to label-less namespaces by default. */ if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags) && !force_labels) { dev_dbg(dev, "%s: No _LSW, disable labels\n", dev_name(&adev_dimm->dev)); clear_bit(NFIT_MEM_LSR, &nfit_mem->flags); } else dev_dbg(dev, "%s: Force enable labels\n", dev_name(&adev_dimm->dev)); } populate_shutdown_status(nfit_mem); return 0; } static void shutdown_dimm_notify(void *data) { struct acpi_nfit_desc *acpi_desc = data; struct nfit_mem *nfit_mem; mutex_lock(&acpi_desc->init_mutex); /* * Clear out the nfit_mem->flags_attr and shut down dimm event * notifications. */ list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { struct acpi_device *adev_dimm = nfit_mem->adev; if (nfit_mem->flags_attr) { sysfs_put(nfit_mem->flags_attr); nfit_mem->flags_attr = NULL; } if (adev_dimm) { acpi_remove_notify_handler(adev_dimm->handle, ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); dev_set_drvdata(&adev_dimm->dev, NULL); } } mutex_unlock(&acpi_desc->init_mutex); } static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family) { switch (family) { case NVDIMM_FAMILY_INTEL: return intel_security_ops; default: return NULL; } } static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops( struct nfit_mem *nfit_mem) { unsigned long mask; struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; if (!nd_desc->fw_ops) return NULL; if (nfit_mem->family != NVDIMM_FAMILY_INTEL) return NULL; mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK; if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) return NULL; return intel_fw_ops; } static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) { struct nfit_mem *nfit_mem; int dimm_count = 0, rc; struct nvdimm *nvdimm; list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { struct acpi_nfit_flush_address *flush; unsigned long flags = 0, cmd_mask; struct nfit_memdev *nfit_memdev; u32 device_handle; u16 mem_flags; device_handle = __to_nfit_memdev(nfit_mem)->device_handle; nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); if (nvdimm) { dimm_count++; continue; } /* collate flags across all memdevs for this dimm */ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { struct acpi_nfit_memory_map *dimm_memdev; dimm_memdev = __to_nfit_memdev(nfit_mem); if (dimm_memdev->device_handle != nfit_memdev->memdev->device_handle) continue; dimm_memdev->flags |= nfit_memdev->memdev->flags; } mem_flags = __to_nfit_memdev(nfit_mem)->flags; if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) set_bit(NDD_UNARMED, &flags); rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); if (rc) continue; /* * TODO: provide translation for non-NVDIMM_FAMILY_INTEL * devices (i.e. from nd_cmd to acpi_dsm) to standardize the * userspace interface. */ cmd_mask = 1UL << ND_CMD_CALL; if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { /* * These commands have a 1:1 correspondence * between DSM payload and libnvdimm ioctl * payload format. */ cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; } if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); } if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush : NULL; nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, acpi_nfit_dimm_attribute_groups, flags, cmd_mask, flush ? flush->hint_count : 0, nfit_mem->flush_wpq, &nfit_mem->id[0], acpi_nfit_get_security_ops(nfit_mem->family), acpi_nfit_get_fw_ops(nfit_mem)); if (!nvdimm) return -ENOMEM; nfit_mem->nvdimm = nvdimm; dimm_count++; if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) continue; dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n", nvdimm_name(nvdimm), mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); } rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); if (rc) return rc; /* * Now that dimms are successfully registered, and async registration * is flushed, attempt to enable event notification. */ list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { struct kernfs_node *nfit_kernfs; nvdimm = nfit_mem->nvdimm; if (!nvdimm) continue; nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); if (nfit_kernfs) nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, "flags"); sysfs_put(nfit_kernfs); if (!nfit_mem->flags_attr) dev_warn(acpi_desc->dev, "%s: notifications disabled\n", nvdimm_name(nvdimm)); } return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify, acpi_desc); } /* * These constants are private because there are no kernel consumers of * these commands. */ enum nfit_aux_cmds { NFIT_CMD_TRANSLATE_SPA = 5, NFIT_CMD_ARS_INJECT_SET = 7, NFIT_CMD_ARS_INJECT_CLEAR = 8, NFIT_CMD_ARS_INJECT_GET = 9, }; static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); unsigned long dsm_mask, *mask; struct acpi_device *adev; int i; set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask); /* enable nfit_test to inject bus command emulation */ if (acpi_desc->bus_cmd_force_en) { nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; mask = &nd_desc->bus_family_mask; if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) { set_bit(NVDIMM_BUS_FAMILY_INTEL, mask); nd_desc->fw_ops = intel_bus_fw_ops; } } adev = to_acpi_dev(acpi_desc); if (!adev) return; for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, &nd_desc->cmd_mask); dsm_mask = (1 << ND_CMD_ARS_CAP) | (1 << ND_CMD_ARS_START) | (1 << ND_CMD_ARS_STATUS) | (1 << ND_CMD_CLEAR_ERROR) | (1 << NFIT_CMD_TRANSLATE_SPA) | (1 << NFIT_CMD_ARS_INJECT_SET) | (1 << NFIT_CMD_ARS_INJECT_CLEAR) | (1 << NFIT_CMD_ARS_INJECT_GET); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, &acpi_desc->bus_dsm_mask); /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */ dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL); mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, mask); if (*mask == dsm_mask) { set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask); nd_desc->fw_ops = intel_bus_fw_ops; } } static ssize_t range_index_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); return sprintf(buf, "%d\n", nfit_spa->spa->range_index); } static DEVICE_ATTR_RO(range_index); static struct attribute *acpi_nfit_region_attributes[] = { &dev_attr_range_index.attr, NULL, }; static const struct attribute_group acpi_nfit_region_attribute_group = { .name = "nfit", .attrs = acpi_nfit_region_attributes, }; static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { &acpi_nfit_region_attribute_group, NULL, }; /* enough info to uniquely specify an interleave set */ struct nfit_set_info { u64 region_offset; u32 serial_number; u32 pad; }; struct nfit_set_info2 { u64 region_offset; u32 serial_number; u16 vendor_id; u16 manufacturing_date; u8 manufacturing_location; u8 reserved[31]; }; static int cmp_map_compat(const void *m0, const void *m1) { const struct nfit_set_info *map0 = m0; const struct nfit_set_info *map1 = m1; return memcmp(&map0->region_offset, &map1->region_offset, sizeof(u64)); } static int cmp_map(const void *m0, const void *m1) { const struct nfit_set_info *map0 = m0; const struct nfit_set_info *map1 = m1; if (map0->region_offset < map1->region_offset) return -1; else if (map0->region_offset > map1->region_offset) return 1; return 0; } static int cmp_map2(const void *m0, const void *m1) { const struct nfit_set_info2 *map0 = m0; const struct nfit_set_info2 *map1 = m1; if (map0->region_offset < map1->region_offset) return -1; else if (map0->region_offset > map1->region_offset) return 1; return 0; } /* Retrieve the nth entry referencing this spa */ static struct acpi_nfit_memory_map *memdev_from_spa( struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) { struct nfit_memdev *nfit_memdev; list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) if (nfit_memdev->memdev->range_index == range_index) if (n-- == 0) return nfit_memdev->memdev; return NULL; } static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, struct nd_region_desc *ndr_desc, struct acpi_nfit_system_address *spa) { struct device *dev = acpi_desc->dev; struct nd_interleave_set *nd_set; u16 nr = ndr_desc->num_mappings; struct nfit_set_info2 *info2; struct nfit_set_info *info; int i; nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); if (!nd_set) return -ENOMEM; import_guid(&nd_set->type_guid, spa->range_guid); info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL); if (!info2) return -ENOMEM; for (i = 0; i < nr; i++) { struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; struct nvdimm *nvdimm = mapping->nvdimm; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct nfit_set_info *map = &info[i]; struct nfit_set_info2 *map2 = &info2[i]; struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, spa->range_index, i); struct acpi_nfit_control_region *dcr = nfit_mem->dcr; if (!memdev || !nfit_mem->dcr) { dev_err(dev, "%s: failed to find DCR\n", __func__); return -ENODEV; } map->region_offset = memdev->region_offset; map->serial_number = dcr->serial_number; map2->region_offset = memdev->region_offset; map2->serial_number = dcr->serial_number; map2->vendor_id = dcr->vendor_id; map2->manufacturing_date = dcr->manufacturing_date; map2->manufacturing_location = dcr->manufacturing_location; } /* v1.1 namespaces */ sort(info, nr, sizeof(*info), cmp_map, NULL); nd_set->cookie1 = nd_fletcher64(info, sizeof(*info) * nr, 0); /* v1.2 namespaces */ sort(info2, nr, sizeof(*info2), cmp_map2, NULL); nd_set->cookie2 = nd_fletcher64(info2, sizeof(*info2) * nr, 0); /* support v1.1 namespaces created with the wrong sort order */ sort(info, nr, sizeof(*info), cmp_map_compat, NULL); nd_set->altcookie = nd_fletcher64(info, sizeof(*info) * nr, 0); /* record the result of the sort for the mapping position */ for (i = 0; i < nr; i++) { struct nfit_set_info2 *map2 = &info2[i]; int j; for (j = 0; j < nr; j++) { struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; struct nvdimm *nvdimm = mapping->nvdimm; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct acpi_nfit_control_region *dcr = nfit_mem->dcr; if (map2->serial_number == dcr->serial_number && map2->vendor_id == dcr->vendor_id && map2->manufacturing_date == dcr->manufacturing_date && map2->manufacturing_location == dcr->manufacturing_location) { mapping->position = i; break; } } } ndr_desc->nd_set = nd_set; devm_kfree(dev, info); devm_kfree(dev, info2); return 0; } static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct acpi_nfit_system_address *spa = nfit_spa->spa; int cmd_rc, rc; cmd->address = spa->address; cmd->length = spa->length; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, sizeof(*cmd), &cmd_rc); if (rc < 0) return rc; return cmd_rc; } static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa, enum nfit_ars_state req_type) { int rc; int cmd_rc; struct nd_cmd_ars_start ars_start; struct acpi_nfit_system_address *spa = nfit_spa->spa; struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; memset(&ars_start, 0, sizeof(ars_start)); ars_start.address = spa->address; ars_start.length = spa->length; if (req_type == ARS_REQ_SHORT) ars_start.flags = ND_ARS_RETURN_PREV_DATA; if (nfit_spa_type(spa) == NFIT_SPA_PM) ars_start.type = ND_ARS_PERSISTENT; else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) ars_start.type = ND_ARS_VOLATILE; else return -ENOTTY; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, sizeof(ars_start), &cmd_rc); if (rc < 0) return rc; if (cmd_rc < 0) return cmd_rc; set_bit(ARS_VALID, &acpi_desc->scrub_flags); return 0; } static int ars_continue(struct acpi_nfit_desc *acpi_desc) { int rc, cmd_rc; struct nd_cmd_ars_start ars_start; struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; ars_start = (struct nd_cmd_ars_start) { .address = ars_status->restart_address, .length = ars_status->restart_length, .type = ars_status->type, }; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, sizeof(ars_start), &cmd_rc); if (rc < 0) return rc; return cmd_rc; } static int ars_get_status(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; int rc, cmd_rc; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, acpi_desc->max_ars, &cmd_rc); if (rc < 0) return rc; return cmd_rc; } static void ars_complete(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; struct acpi_nfit_system_address *spa = nfit_spa->spa; struct nd_region *nd_region = nfit_spa->nd_region; struct device *dev; lockdep_assert_held(&acpi_desc->init_mutex); /* * Only advance the ARS state for ARS runs initiated by the * kernel, ignore ARS results from BIOS initiated runs for scrub * completion tracking. */ if (acpi_desc->scrub_spa != nfit_spa) return; if ((ars_status->address >= spa->address && ars_status->address < spa->address + spa->length) || (ars_status->address < spa->address)) { /* * Assume that if a scrub starts at an offset from the * start of nfit_spa that we are in the continuation * case. * * Otherwise, if the scrub covers the spa range, mark * any pending request complete. */ if (ars_status->address + ars_status->length >= spa->address + spa->length) /* complete */; else return; } else return; acpi_desc->scrub_spa = NULL; if (nd_region) { dev = nd_region_dev(nd_region); nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON); } else dev = acpi_desc->dev; dev_dbg(dev, "ARS: range %d complete\n", spa->range_index); } static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; int rc; u32 i; /* * First record starts at 44 byte offset from the start of the * payload. */ if (ars_status->out_length < 44) return 0; /* * Ignore potentially stale results that are only refreshed * after a start-ARS event. */ if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) { dev_dbg(acpi_desc->dev, "skip %d stale records\n", ars_status->num_records); return 0; } for (i = 0; i < ars_status->num_records; i++) { /* only process full records */ if (ars_status->out_length < 44 + sizeof(struct nd_ars_record) * (i + 1)) break; rc = nvdimm_bus_add_badrange(nvdimm_bus, ars_status->records[i].err_address, ars_status->records[i].length); if (rc) return rc; } if (i < ars_status->num_records) dev_warn(acpi_desc->dev, "detected truncated ars results\n"); return 0; } static void acpi_nfit_remove_resource(void *data) { struct resource *res = data; remove_resource(res); } static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, struct nd_region_desc *ndr_desc) { struct resource *res, *nd_res = ndr_desc->res; int is_pmem, ret; /* No operation if the region is already registered as PMEM */ is_pmem = region_intersects(nd_res->start, resource_size(nd_res), IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); if (is_pmem == REGION_INTERSECTS) return 0; res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; res->name = "Persistent Memory"; res->start = nd_res->start; res->end = nd_res->end; res->flags = IORESOURCE_MEM; res->desc = IORES_DESC_PERSISTENT_MEMORY; ret = insert_resource(&iomem_resource, res); if (ret) return ret; ret = devm_add_action_or_reset(acpi_desc->dev, acpi_nfit_remove_resource, res); if (ret) return ret; return 0; } static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, struct acpi_nfit_memory_map *memdev, struct nfit_spa *nfit_spa) { struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, memdev->device_handle); struct acpi_nfit_system_address *spa = nfit_spa->spa; if (!nvdimm) { dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", spa->range_index, memdev->device_handle); return -ENODEV; } mapping->nvdimm = nvdimm; switch (nfit_spa_type(spa)) { case NFIT_SPA_PM: case NFIT_SPA_VOLATILE: mapping->start = memdev->address; mapping->size = memdev->region_size; break; } return 0; } static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) { return (nfit_spa_type(spa) == NFIT_SPA_VDISK || nfit_spa_type(spa) == NFIT_SPA_VCD || nfit_spa_type(spa) == NFIT_SPA_PDISK || nfit_spa_type(spa) == NFIT_SPA_PCD); } static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa) { return (nfit_spa_type(spa) == NFIT_SPA_VDISK || nfit_spa_type(spa) == NFIT_SPA_VCD || nfit_spa_type(spa) == NFIT_SPA_VOLATILE); } static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS]; struct acpi_nfit_system_address *spa = nfit_spa->spa; struct nd_region_desc *ndr_desc, _ndr_desc; struct nfit_memdev *nfit_memdev; struct nvdimm_bus *nvdimm_bus; struct resource res; int count = 0, rc; if (nfit_spa->nd_region) return 0; if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { dev_dbg(acpi_desc->dev, "detected invalid spa index\n"); return 0; } memset(&res, 0, sizeof(res)); memset(&mappings, 0, sizeof(mappings)); memset(&_ndr_desc, 0, sizeof(_ndr_desc)); res.start = spa->address; res.end = res.start + spa->length - 1; ndr_desc = &_ndr_desc; ndr_desc->res = &res; ndr_desc->provider_data = nfit_spa; ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) { ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain); ndr_desc->target_node = pxm_to_node(spa->proximity_domain); } else { ndr_desc->numa_node = NUMA_NO_NODE; ndr_desc->target_node = NUMA_NO_NODE; } /* Fallback to address based numa information if node lookup failed */ if (ndr_desc->numa_node == NUMA_NO_NODE) { ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address); dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]", NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); } if (ndr_desc->target_node == NUMA_NO_NODE) { ndr_desc->target_node = phys_to_target_node(spa->address); dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]", NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); } /* * Persistence domain bits are hierarchical, if * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. */ if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; struct nd_mapping_desc *mapping; /* range index 0 == unmapped in SPA or invalid-SPA */ if (memdev->range_index == 0 || spa->range_index == 0) continue; if (memdev->range_index != spa->range_index) continue; if (count >= ND_MAX_MAPPINGS) { dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", spa->range_index, ND_MAX_MAPPINGS); return -ENXIO; } mapping = &mappings[count++]; rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, memdev, nfit_spa); if (rc) goto out; } ndr_desc->mapping = mappings; ndr_desc->num_mappings = count; rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); if (rc) goto out; nvdimm_bus = acpi_desc->nvdimm_bus; if (nfit_spa_type(spa) == NFIT_SPA_PM) { rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); if (rc) { dev_warn(acpi_desc->dev, "failed to insert pmem resource to iomem: %d\n", rc); goto out; } nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, ndr_desc); if (!nfit_spa->nd_region) rc = -ENOMEM; } else if (nfit_spa_is_volatile(spa)) { nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, ndr_desc); if (!nfit_spa->nd_region) rc = -ENOMEM; } else if (nfit_spa_is_virtual(spa)) { nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, ndr_desc); if (!nfit_spa->nd_region) rc = -ENOMEM; } out: if (rc) dev_err(acpi_desc->dev, "failed to register spa range %d\n", nfit_spa->spa->range_index); return rc; } static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc) { struct device *dev = acpi_desc->dev; struct nd_cmd_ars_status *ars_status; if (acpi_desc->ars_status) { memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); return 0; } ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL); if (!ars_status) return -ENOMEM; acpi_desc->ars_status = ars_status; return 0; } static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc) { int rc; if (ars_status_alloc(acpi_desc)) return -ENOMEM; rc = ars_get_status(acpi_desc); if (rc < 0 && rc != -ENOSPC) return rc; if (ars_status_process_records(acpi_desc)) dev_err(acpi_desc->dev, "Failed to process ARS records\n"); return rc; } static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { int rc; if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) return acpi_nfit_register_region(acpi_desc, nfit_spa); set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); if (!no_init_ars) set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); switch (acpi_nfit_query_poison(acpi_desc)) { case 0: case -ENOSPC: case -EAGAIN: rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); /* shouldn't happen, try again later */ if (rc == -EBUSY) break; if (rc) { set_bit(ARS_FAILED, &nfit_spa->ars_state); break; } clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); rc = acpi_nfit_query_poison(acpi_desc); if (rc) break; acpi_desc->scrub_spa = nfit_spa; ars_complete(acpi_desc, nfit_spa); /* * If ars_complete() says we didn't complete the * short scrub, we'll try again with a long * request. */ acpi_desc->scrub_spa = NULL; break; case -EBUSY: case -ENOMEM: /* * BIOS was using ARS, wait for it to complete (or * resources to become available) and then perform our * own scrubs. */ break; default: set_bit(ARS_FAILED, &nfit_spa->ars_state); break; } return acpi_nfit_register_region(acpi_desc, nfit_spa); } static void ars_complete_all(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) continue; ars_complete(acpi_desc, nfit_spa); } } static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, int query_rc) { unsigned int tmo = acpi_desc->scrub_tmo; struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; lockdep_assert_held(&acpi_desc->init_mutex); if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) return 0; if (query_rc == -EBUSY) { dev_dbg(dev, "ARS: ARS busy\n"); return min(30U * 60U, tmo * 2); } if (query_rc == -ENOSPC) { dev_dbg(dev, "ARS: ARS continue\n"); ars_continue(acpi_desc); return 1; } if (query_rc && query_rc != -EAGAIN) { unsigned long long addr, end; addr = acpi_desc->ars_status->address; end = addr + acpi_desc->ars_status->length; dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end, query_rc); } ars_complete_all(acpi_desc); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { enum nfit_ars_state req_type; int rc; if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) continue; /* prefer short ARS requests first */ if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)) req_type = ARS_REQ_SHORT; else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) req_type = ARS_REQ_LONG; else continue; rc = ars_start(acpi_desc, nfit_spa, req_type); dev = nd_region_dev(nfit_spa->nd_region); dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n", nfit_spa->spa->range_index, req_type == ARS_REQ_SHORT ? "short" : "long", rc); /* * Hmm, we raced someone else starting ARS? Try again in * a bit. */ if (rc == -EBUSY) return 1; if (rc == 0) { dev_WARN_ONCE(dev, acpi_desc->scrub_spa, "scrub start while range %d active\n", acpi_desc->scrub_spa->spa->range_index); clear_bit(req_type, &nfit_spa->ars_state); acpi_desc->scrub_spa = nfit_spa; /* * Consider this spa last for future scrub * requests */ list_move_tail(&nfit_spa->list, &acpi_desc->spas); return 1; } dev_err(dev, "ARS: range %d ARS failed (%d)\n", nfit_spa->spa->range_index, rc); set_bit(ARS_FAILED, &nfit_spa->ars_state); } return 0; } static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) { lockdep_assert_held(&acpi_desc->init_mutex); set_bit(ARS_BUSY, &acpi_desc->scrub_flags); /* note this should only be set from within the workqueue */ if (tmo) acpi_desc->scrub_tmo = tmo; queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); } static void sched_ars(struct acpi_nfit_desc *acpi_desc) { __sched_ars(acpi_desc, 0); } static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) { lockdep_assert_held(&acpi_desc->init_mutex); clear_bit(ARS_BUSY, &acpi_desc->scrub_flags); acpi_desc->scrub_count++; if (acpi_desc->scrub_count_state) sysfs_notify_dirent(acpi_desc->scrub_count_state); } static void acpi_nfit_scrub(struct work_struct *work) { struct acpi_nfit_desc *acpi_desc; unsigned int tmo; int query_rc; acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work); mutex_lock(&acpi_desc->init_mutex); query_rc = acpi_nfit_query_poison(acpi_desc); tmo = __acpi_nfit_scrub(acpi_desc, query_rc); if (tmo) __sched_ars(acpi_desc, tmo); else notify_ars_done(acpi_desc); memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); clear_bit(ARS_POLL, &acpi_desc->scrub_flags); mutex_unlock(&acpi_desc->init_mutex); } static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { int type = nfit_spa_type(nfit_spa->spa); struct nd_cmd_ars_cap ars_cap; int rc; set_bit(ARS_FAILED, &nfit_spa->ars_state); memset(&ars_cap, 0, sizeof(ars_cap)); rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); if (rc < 0) return; /* check that the supported scrub types match the spa type */ if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0) return; if (type == NFIT_SPA_PM && ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0) return; nfit_spa->max_ars = ars_cap.max_ars_out; nfit_spa->clear_err_unit = ars_cap.clear_err_unit; acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars); clear_bit(ARS_FAILED, &nfit_spa->ars_state); } static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; int rc, do_sched_ars = 0; set_bit(ARS_VALID, &acpi_desc->scrub_flags); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: case NFIT_SPA_PM: acpi_nfit_init_ars(acpi_desc, nfit_spa); break; } } list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: case NFIT_SPA_PM: /* register regions and kick off initial ARS run */ rc = ars_register(acpi_desc, nfit_spa); if (rc) return rc; /* * Kick off background ARS if at least one * region successfully registered ARS */ if (!test_bit(ARS_FAILED, &nfit_spa->ars_state)) do_sched_ars++; break; case NFIT_SPA_BDW: /* nothing to register */ break; case NFIT_SPA_DCR: case NFIT_SPA_VDISK: case NFIT_SPA_VCD: case NFIT_SPA_PDISK: case NFIT_SPA_PCD: /* register known regions that don't support ARS */ rc = acpi_nfit_register_region(acpi_desc, nfit_spa); if (rc) return rc; break; default: /* don't register unknown regions */ break; } } if (do_sched_ars) sched_ars(acpi_desc); return 0; } static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev) { struct device *dev = acpi_desc->dev; if (!list_empty(&prev->spas) || !list_empty(&prev->memdevs) || !list_empty(&prev->dcrs) || !list_empty(&prev->bdws) || !list_empty(&prev->idts) || !list_empty(&prev->flushes)) { dev_err(dev, "new nfit deletes entries (unsupported)\n"); return -ENXIO; } return 0; } static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) { struct device *dev = acpi_desc->dev; struct kernfs_node *nfit; struct device *bus_dev; if (!ars_supported(acpi_desc->nvdimm_bus)) return 0; bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); if (!nfit) { dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); return -ENODEV; } acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); sysfs_put(nfit); if (!acpi_desc->scrub_count_state) { dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); return -ENODEV; } return 0; } static void acpi_nfit_unregister(void *data) { struct acpi_nfit_desc *acpi_desc = data; nvdimm_bus_unregister(acpi_desc->nvdimm_bus); } int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) { struct device *dev = acpi_desc->dev; struct nfit_table_prev prev; const void *end; int rc; if (!acpi_desc->nvdimm_bus) { acpi_nfit_init_dsms(acpi_desc); acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc); if (!acpi_desc->nvdimm_bus) return -ENOMEM; rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, acpi_desc); if (rc) return rc; rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); if (rc) return rc; /* register this acpi_desc for mce notifications */ mutex_lock(&acpi_desc_lock); list_add_tail(&acpi_desc->list, &acpi_descs); mutex_unlock(&acpi_desc_lock); } mutex_lock(&acpi_desc->init_mutex); INIT_LIST_HEAD(&prev.spas); INIT_LIST_HEAD(&prev.memdevs); INIT_LIST_HEAD(&prev.dcrs); INIT_LIST_HEAD(&prev.bdws); INIT_LIST_HEAD(&prev.idts); INIT_LIST_HEAD(&prev.flushes); list_cut_position(&prev.spas, &acpi_desc->spas, acpi_desc->spas.prev); list_cut_position(&prev.memdevs, &acpi_desc->memdevs, acpi_desc->memdevs.prev); list_cut_position(&prev.dcrs, &acpi_desc->dcrs, acpi_desc->dcrs.prev); list_cut_position(&prev.bdws, &acpi_desc->bdws, acpi_desc->bdws.prev); list_cut_position(&prev.idts, &acpi_desc->idts, acpi_desc->idts.prev); list_cut_position(&prev.flushes, &acpi_desc->flushes, acpi_desc->flushes.prev); end = data + sz; while (!IS_ERR_OR_NULL(data)) data = add_table(acpi_desc, &prev, data, end); if (IS_ERR(data)) { dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data)); rc = PTR_ERR(data); goto out_unlock; } rc = acpi_nfit_check_deletions(acpi_desc, &prev); if (rc) goto out_unlock; rc = nfit_mem_init(acpi_desc); if (rc) goto out_unlock; rc = acpi_nfit_register_dimms(acpi_desc); if (rc) goto out_unlock; rc = acpi_nfit_register_regions(acpi_desc); out_unlock: mutex_unlock(&acpi_desc->init_mutex); return rc; } EXPORT_SYMBOL_GPL(acpi_nfit_init); static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); struct device *dev = acpi_desc->dev; /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ device_lock(dev); device_unlock(dev); /* Bounce the init_mutex to complete initial registration */ mutex_lock(&acpi_desc->init_mutex); mutex_unlock(&acpi_desc->init_mutex); return 0; } static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); if (nvdimm) return 0; if (cmd != ND_CMD_ARS_START) return 0; /* * The kernel and userspace may race to initiate a scrub, but * the scrub thread is prepared to lose that initial race. It * just needs guarantees that any ARS it initiates are not * interrupted by any intervening start requests from userspace. */ if (work_busy(&acpi_desc->dwork.work)) return -EBUSY; return 0; } /* * Prevent security and firmware activate commands from being issued via * ioctl. */ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf) { struct nd_cmd_pkg *call_pkg = buf; unsigned int func; if (nvdimm && cmd == ND_CMD_CALL && call_pkg->nd_family == NVDIMM_FAMILY_INTEL) { func = call_pkg->nd_command; if (func > NVDIMM_CMD_MAX || (1 << func) & NVDIMM_INTEL_DENY_CMDMASK) return -EOPNOTSUPP; } /* block all non-nfit bus commands */ if (!nvdimm && cmd == ND_CMD_CALL && call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT) return -EOPNOTSUPP; return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd); } int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, enum nfit_ars_state req_type) { struct device *dev = acpi_desc->dev; int scheduled = 0, busy = 0; struct nfit_spa *nfit_spa; mutex_lock(&acpi_desc->init_mutex); if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) { mutex_unlock(&acpi_desc->init_mutex); return 0; } list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { int type = nfit_spa_type(nfit_spa->spa); if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE) continue; if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) continue; if (test_and_set_bit(req_type, &nfit_spa->ars_state)) busy++; else scheduled++; } if (scheduled) { sched_ars(acpi_desc); dev_dbg(dev, "ars_scan triggered\n"); } mutex_unlock(&acpi_desc->init_mutex); if (scheduled) return 0; if (busy) return -EBUSY; return -ENOTTY; } void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) { struct nvdimm_bus_descriptor *nd_desc; dev_set_drvdata(dev, acpi_desc); acpi_desc->dev = dev; nd_desc = &acpi_desc->nd_desc; nd_desc->provider_name = "ACPI.NFIT"; nd_desc->module = THIS_MODULE; nd_desc->ndctl = acpi_nfit_ctl; nd_desc->flush_probe = acpi_nfit_flush_probe; nd_desc->clear_to_send = acpi_nfit_clear_to_send; nd_desc->attr_groups = acpi_nfit_attribute_groups; INIT_LIST_HEAD(&acpi_desc->spas); INIT_LIST_HEAD(&acpi_desc->dcrs); INIT_LIST_HEAD(&acpi_desc->bdws); INIT_LIST_HEAD(&acpi_desc->idts); INIT_LIST_HEAD(&acpi_desc->flushes); INIT_LIST_HEAD(&acpi_desc->memdevs); INIT_LIST_HEAD(&acpi_desc->dimms); INIT_LIST_HEAD(&acpi_desc->list); mutex_init(&acpi_desc->init_mutex); acpi_desc->scrub_tmo = 1; INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub); } EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); static void acpi_nfit_put_table(void *table) { acpi_put_table(table); } static void acpi_nfit_notify(acpi_handle handle, u32 event, void *data) { struct acpi_device *adev = data; device_lock(&adev->dev); __acpi_nfit_notify(&adev->dev, handle, event); device_unlock(&adev->dev); } static void acpi_nfit_remove_notify_handler(void *data) { struct acpi_device *adev = data; acpi_dev_remove_notify_handler(adev, ACPI_DEVICE_NOTIFY, acpi_nfit_notify); } void acpi_nfit_shutdown(void *data) { struct acpi_nfit_desc *acpi_desc = data; struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); /* * Destruct under acpi_desc_lock so that nfit_handle_mce does not * race teardown */ mutex_lock(&acpi_desc_lock); list_del(&acpi_desc->list); mutex_unlock(&acpi_desc_lock); mutex_lock(&acpi_desc->init_mutex); set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); mutex_unlock(&acpi_desc->init_mutex); cancel_delayed_work_sync(&acpi_desc->dwork); /* * Bounce the nvdimm bus lock to make sure any in-flight * acpi_nfit_ars_rescan() submissions have had a chance to * either submit or see ->cancel set. */ device_lock(bus_dev); device_unlock(bus_dev); flush_workqueue(nfit_wq); } EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); static int acpi_nfit_add(struct acpi_device *adev) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_nfit_desc *acpi_desc; struct device *dev = &adev->dev; struct acpi_table_header *tbl; acpi_status status = AE_OK; acpi_size sz; int rc = 0; status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); if (ACPI_FAILURE(status)) { /* The NVDIMM root device allows OS to trigger enumeration of * NVDIMMs through NFIT at boot time and re-enumeration at * root level via the _FIT method during runtime. * This is ok to return 0 here, we could have an nvdimm * hotplugged later and evaluate _FIT method which returns * data in the format of a series of NFIT Structures. */ dev_dbg(dev, "failed to find NFIT at startup\n"); return 0; } rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); if (rc) return rc; sz = tbl->length; acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); if (!acpi_desc) return -ENOMEM; acpi_nfit_desc_init(acpi_desc, &adev->dev); /* Save the acpi header for exporting the revision via sysfs */ acpi_desc->acpi_header = *tbl; /* Evaluate _FIT and override with that if present */ status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); if (ACPI_SUCCESS(status) && buf.length > 0) { union acpi_object *obj = buf.pointer; if (obj->type == ACPI_TYPE_BUFFER) rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, obj->buffer.length); else dev_dbg(dev, "invalid type %d, ignoring _FIT\n", (int) obj->type); kfree(buf.pointer); } else /* skip over the lead-in header table */ rc = acpi_nfit_init(acpi_desc, (void *) tbl + sizeof(struct acpi_table_nfit), sz - sizeof(struct acpi_table_nfit)); if (rc) return rc; rc = devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); if (rc) return rc; rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY, acpi_nfit_notify); if (rc) return rc; return devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler, adev); } static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; acpi_status status; int ret; if (!dev->driver) { /* dev->driver may be null if we're being removed */ dev_dbg(dev, "no driver found for dev\n"); return; } if (!acpi_desc) { acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); if (!acpi_desc) return; acpi_nfit_desc_init(acpi_desc, dev); } else { /* * Finish previous registration before considering new * regions. */ flush_workqueue(nfit_wq); } /* Evaluate _FIT */ status = acpi_evaluate_object(handle, "_FIT", NULL, &buf); if (ACPI_FAILURE(status)) { dev_err(dev, "failed to evaluate _FIT\n"); return; } obj = buf.pointer; if (obj->type == ACPI_TYPE_BUFFER) { ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, obj->buffer.length); if (ret) dev_err(dev, "failed to merge updated NFIT\n"); } else dev_err(dev, "Invalid _FIT\n"); kfree(buf.pointer); } static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev); if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); else acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT); } void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event) { dev_dbg(dev, "event: 0x%x\n", event); switch (event) { case NFIT_NOTIFY_UPDATE: return acpi_nfit_update_notify(dev, handle); case NFIT_NOTIFY_UC_MEMORY_ERROR: return acpi_nfit_uc_error_notify(dev, handle); default: return; } } EXPORT_SYMBOL_GPL(__acpi_nfit_notify); static const struct acpi_device_id acpi_nfit_ids[] = { { "ACPI0012", 0 }, { "", 0 }, }; MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); static struct acpi_driver acpi_nfit_driver = { .name = KBUILD_MODNAME, .ids = acpi_nfit_ids, .ops = { .add = acpi_nfit_add, }, }; static __init int nfit_init(void) { int ret; BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 64); BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 16); BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 8); BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16); guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]); guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]); guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]); guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]); guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]); guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]); guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]); guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]); guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]); guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]); guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]); guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]); nfit_wq = create_singlethread_workqueue("nfit"); if (!nfit_wq) return -ENOMEM; nfit_mce_register(); ret = acpi_bus_register_driver(&acpi_nfit_driver); if (ret) { nfit_mce_unregister(); destroy_workqueue(nfit_wq); } return ret; } static __exit void nfit_exit(void) { nfit_mce_unregister(); acpi_bus_unregister_driver(&acpi_nfit_driver); destroy_workqueue(nfit_wq); WARN_ON(!list_empty(&acpi_descs)); } module_init(nfit_init); module_exit(nfit_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation");
linux-master
drivers/acpi/nfit/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * APEI Boot Error Record Table (BERT) support * * Copyright 2011 Intel Corp. * Author: Huang Ying <[email protected]> * * Under normal circumstances, when a hardware error occurs, the error * handler receives control and processes the error. This gives OSPM a * chance to process the error condition, report it, and optionally attempt * recovery. In some cases, the system is unable to process an error. * For example, system firmware or a management controller may choose to * reset the system or the system might experience an uncontrolled crash * or reset.The boot error source is used to report unhandled errors that * occurred in a previous boot. This mechanism is described in the BERT * table. * * For more information about BERT, please refer to ACPI Specification * version 4.0, section 17.3.1 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/cper.h> #include <linux/io.h> #include "apei-internal.h" #undef pr_fmt #define pr_fmt(fmt) "BERT: " fmt #define ACPI_BERT_PRINT_MAX_RECORDS 5 #define ACPI_BERT_PRINT_MAX_LEN 1024 static int bert_disable __initdata; /* * Print "all" the error records in the BERT table, but avoid huge spam to * the console if the BIOS included oversize records, or too many records. * Skipping some records here does not lose anything because the full * data is available to user tools in: * /sys/firmware/acpi/tables/data/BERT */ static void __init bert_print_all(struct acpi_bert_region *region, unsigned int region_len) { struct acpi_hest_generic_status *estatus = (struct acpi_hest_generic_status *)region; int remain = region_len; int printed = 0, skipped = 0; u32 estatus_len; while (remain >= sizeof(struct acpi_bert_region)) { estatus_len = cper_estatus_len(estatus); if (remain < estatus_len) { pr_err(FW_BUG "Truncated status block (length: %u).\n", estatus_len); break; } /* No more error records. */ if (!estatus->block_status) break; if (cper_estatus_check(estatus)) { pr_err(FW_BUG "Invalid error record.\n"); break; } if (estatus_len < ACPI_BERT_PRINT_MAX_LEN && printed < ACPI_BERT_PRINT_MAX_RECORDS) { pr_info_once("Error records from previous boot:\n"); cper_estatus_print(KERN_INFO HW_ERR, estatus); printed++; } else { skipped++; } /* * Because the boot error source is "one-time polled" type, * clear Block Status of current Generic Error Status Block, * once it's printed. */ estatus->block_status = 0; estatus = (void *)estatus + estatus_len; remain -= estatus_len; } if (skipped) pr_info(HW_ERR "Skipped %d error records\n", skipped); if (printed + skipped) pr_info("Total records found: %d\n", printed + skipped); } static int __init setup_bert_disable(char *str) { bert_disable = 1; return 1; } __setup("bert_disable", setup_bert_disable); static int __init bert_check_table(struct acpi_table_bert *bert_tab) { if (bert_tab->header.length < sizeof(struct acpi_table_bert) || bert_tab->region_length < sizeof(struct acpi_bert_region)) return -EINVAL; return 0; } static int __init bert_init(void) { struct apei_resources bert_resources; struct acpi_bert_region *boot_error_region; struct acpi_table_bert *bert_tab; unsigned int region_len; acpi_status status; int rc = 0; if (acpi_disabled) return 0; if (bert_disable) { pr_info("Boot Error Record Table support is disabled.\n"); return 0; } status = acpi_get_table(ACPI_SIG_BERT, 0, (struct acpi_table_header **)&bert_tab); if (status == AE_NOT_FOUND) return 0; if (ACPI_FAILURE(status)) { pr_err("get table failed, %s.\n", acpi_format_exception(status)); return -EINVAL; } rc = bert_check_table(bert_tab); if (rc) { pr_err(FW_BUG "table invalid.\n"); goto out_put_bert_tab; } region_len = bert_tab->region_length; apei_resources_init(&bert_resources); rc = apei_resources_add(&bert_resources, bert_tab->address, region_len, true); if (rc) goto out_put_bert_tab; rc = apei_resources_request(&bert_resources, "APEI BERT"); if (rc) goto out_fini; boot_error_region = ioremap_cache(bert_tab->address, region_len); if (boot_error_region) { bert_print_all(boot_error_region, region_len); iounmap(boot_error_region); } else { rc = -ENOMEM; } apei_resources_release(&bert_resources); out_fini: apei_resources_fini(&bert_resources); out_put_bert_tab: acpi_put_table((struct acpi_table_header *)bert_tab); return rc; } late_initcall(bert_init);
linux-master
drivers/acpi/apei/bert.c
// SPDX-License-Identifier: GPL-2.0-only /* * apei-base.c - ACPI Platform Error Interface (APEI) supporting * infrastructure * * APEI allows to report errors (for example from the chipset) to * the operating system. This improves NMI handling especially. In * addition it supports error serialization and error injection. * * For more information about APEI, please refer to ACPI Specification * version 4.0, chapter 17. * * This file has Common functions used by more than one APEI table, * including framework of interpreter for ERST and EINJ; resource * management for APEI registers. * * Copyright (C) 2009, Intel Corp. * Author: Huang Ying <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/kref.h> #include <linux/interrupt.h> #include <linux/debugfs.h> #include <acpi/apei.h> #include <asm/unaligned.h> #include "apei-internal.h" #define APEI_PFX "APEI: " /* * APEI ERST (Error Record Serialization Table) and EINJ (Error * INJection) interpreter framework. */ #define APEI_EXEC_PRESERVE_REGISTER 0x1 void apei_exec_ctx_init(struct apei_exec_context *ctx, struct apei_exec_ins_type *ins_table, u32 instructions, struct acpi_whea_header *action_table, u32 entries) { ctx->ins_table = ins_table; ctx->instructions = instructions; ctx->action_table = action_table; ctx->entries = entries; } EXPORT_SYMBOL_GPL(apei_exec_ctx_init); int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) { int rc; rc = apei_read(val, &entry->register_region); if (rc) return rc; *val >>= entry->register_region.bit_offset; *val &= entry->mask; return 0; } int apei_exec_read_register(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { int rc; u64 val = 0; rc = __apei_exec_read_register(entry, &val); if (rc) return rc; ctx->value = val; return 0; } EXPORT_SYMBOL_GPL(apei_exec_read_register); int apei_exec_read_register_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { int rc; rc = apei_exec_read_register(ctx, entry); if (rc) return rc; ctx->value = (ctx->value == entry->value); return 0; } EXPORT_SYMBOL_GPL(apei_exec_read_register_value); int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) { int rc; val &= entry->mask; val <<= entry->register_region.bit_offset; if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { u64 valr = 0; rc = apei_read(&valr, &entry->register_region); if (rc) return rc; valr &= ~(entry->mask << entry->register_region.bit_offset); val |= valr; } rc = apei_write(val, &entry->register_region); return rc; } int apei_exec_write_register(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { return __apei_exec_write_register(entry, ctx->value); } EXPORT_SYMBOL_GPL(apei_exec_write_register); int apei_exec_write_register_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { ctx->value = entry->value; return apei_exec_write_register(ctx, entry); } EXPORT_SYMBOL_GPL(apei_exec_write_register_value); int apei_exec_noop(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { return 0; } EXPORT_SYMBOL_GPL(apei_exec_noop); /* * Interpret the specified action. Go through whole action table, * execute all instructions belong to the action. */ int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool optional) { int rc = -ENOENT; u32 i, ip; struct acpi_whea_header *entry; apei_exec_ins_func_t run; ctx->ip = 0; /* * "ip" is the instruction pointer of current instruction, * "ctx->ip" specifies the next instruction to executed, * instruction "run" function may change the "ctx->ip" to * implement "goto" semantics. */ rewind: ip = 0; for (i = 0; i < ctx->entries; i++) { entry = &ctx->action_table[i]; if (entry->action != action) continue; if (ip == ctx->ip) { if (entry->instruction >= ctx->instructions || !ctx->ins_table[entry->instruction].run) { pr_warn(FW_WARN APEI_PFX "Invalid action table, unknown instruction type: %d\n", entry->instruction); return -EINVAL; } run = ctx->ins_table[entry->instruction].run; rc = run(ctx, entry); if (rc < 0) return rc; else if (rc != APEI_EXEC_SET_IP) ctx->ip++; } ip++; if (ctx->ip < ip) goto rewind; } return !optional && rc < 0 ? rc : 0; } EXPORT_SYMBOL_GPL(__apei_exec_run); typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data); static int apei_exec_for_each_entry(struct apei_exec_context *ctx, apei_exec_entry_func_t func, void *data, int *end) { u8 ins; int i, rc; struct acpi_whea_header *entry; struct apei_exec_ins_type *ins_table = ctx->ins_table; for (i = 0; i < ctx->entries; i++) { entry = ctx->action_table + i; ins = entry->instruction; if (end) *end = i; if (ins >= ctx->instructions || !ins_table[ins].run) { pr_warn(FW_WARN APEI_PFX "Invalid action table, unknown instruction type: %d\n", ins); return -EINVAL; } rc = func(ctx, entry, data); if (rc) return rc; } return 0; } static int pre_map_gar_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) { u8 ins = entry->instruction; if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) return apei_map_generic_address(&entry->register_region); return 0; } /* * Pre-map all GARs in action table to make it possible to access them * in NMI handler. */ int apei_exec_pre_map_gars(struct apei_exec_context *ctx) { int rc, end; rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback, NULL, &end); if (rc) { struct apei_exec_context ctx_unmap; memcpy(&ctx_unmap, ctx, sizeof(*ctx)); ctx_unmap.entries = end; apei_exec_post_unmap_gars(&ctx_unmap); } return rc; } EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars); static int post_unmap_gar_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) { u8 ins = entry->instruction; if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) apei_unmap_generic_address(&entry->register_region); return 0; } /* Post-unmap all GAR in action table. */ int apei_exec_post_unmap_gars(struct apei_exec_context *ctx) { return apei_exec_for_each_entry(ctx, post_unmap_gar_callback, NULL, NULL); } EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars); /* * Resource management for GARs in APEI */ struct apei_res { struct list_head list; unsigned long start; unsigned long end; }; /* Collect all resources requested, to avoid conflict */ static struct apei_resources apei_resources_all = { .iomem = LIST_HEAD_INIT(apei_resources_all.iomem), .ioport = LIST_HEAD_INIT(apei_resources_all.ioport), }; static int apei_res_add(struct list_head *res_list, unsigned long start, unsigned long size) { struct apei_res *res, *resn, *res_ins = NULL; unsigned long end = start + size; if (end <= start) return 0; repeat: list_for_each_entry_safe(res, resn, res_list, list) { if (res->start > end || res->end < start) continue; else if (end <= res->end && start >= res->start) { kfree(res_ins); return 0; } list_del(&res->list); res->start = start = min(res->start, start); res->end = end = max(res->end, end); kfree(res_ins); res_ins = res; goto repeat; } if (res_ins) list_add(&res_ins->list, res_list); else { res_ins = kmalloc(sizeof(*res_ins), GFP_KERNEL); if (!res_ins) return -ENOMEM; res_ins->start = start; res_ins->end = end; list_add(&res_ins->list, res_list); } return 0; } static int apei_res_sub(struct list_head *res_list1, struct list_head *res_list2) { struct apei_res *res1, *resn1, *res2, *res; res1 = list_entry(res_list1->next, struct apei_res, list); resn1 = list_entry(res1->list.next, struct apei_res, list); while (&res1->list != res_list1) { list_for_each_entry(res2, res_list2, list) { if (res1->start >= res2->end || res1->end <= res2->start) continue; else if (res1->end <= res2->end && res1->start >= res2->start) { list_del(&res1->list); kfree(res1); break; } else if (res1->end > res2->end && res1->start < res2->start) { res = kmalloc(sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; res->start = res2->end; res->end = res1->end; res1->end = res2->start; list_add(&res->list, &res1->list); resn1 = res; } else { if (res1->start < res2->start) res1->end = res2->start; else res1->start = res2->end; } } res1 = resn1; resn1 = list_entry(resn1->list.next, struct apei_res, list); } return 0; } static void apei_res_clean(struct list_head *res_list) { struct apei_res *res, *resn; list_for_each_entry_safe(res, resn, res_list, list) { list_del(&res->list); kfree(res); } } void apei_resources_fini(struct apei_resources *resources) { apei_res_clean(&resources->iomem); apei_res_clean(&resources->ioport); } EXPORT_SYMBOL_GPL(apei_resources_fini); static int apei_resources_merge(struct apei_resources *resources1, struct apei_resources *resources2) { int rc; struct apei_res *res; list_for_each_entry(res, &resources2->iomem, list) { rc = apei_res_add(&resources1->iomem, res->start, res->end - res->start); if (rc) return rc; } list_for_each_entry(res, &resources2->ioport, list) { rc = apei_res_add(&resources1->ioport, res->start, res->end - res->start); if (rc) return rc; } return 0; } int apei_resources_add(struct apei_resources *resources, unsigned long start, unsigned long size, bool iomem) { if (iomem) return apei_res_add(&resources->iomem, start, size); else return apei_res_add(&resources->ioport, start, size); } EXPORT_SYMBOL_GPL(apei_resources_add); /* * EINJ has two groups of GARs (EINJ table entry and trigger table * entry), so common resources are subtracted from the trigger table * resources before the second requesting. */ int apei_resources_sub(struct apei_resources *resources1, struct apei_resources *resources2) { int rc; rc = apei_res_sub(&resources1->iomem, &resources2->iomem); if (rc) return rc; return apei_res_sub(&resources1->ioport, &resources2->ioport); } EXPORT_SYMBOL_GPL(apei_resources_sub); static int apei_get_res_callback(__u64 start, __u64 size, void *data) { struct apei_resources *resources = data; return apei_res_add(&resources->iomem, start, size); } static int apei_get_nvs_resources(struct apei_resources *resources) { return acpi_nvs_for_each_region(apei_get_res_callback, resources); } int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size, void *data), void *data); static int apei_get_arch_resources(struct apei_resources *resources) { return arch_apei_filter_addr(apei_get_res_callback, resources); } /* * IO memory/port resource management mechanism is used to check * whether memory/port area used by GARs conflicts with normal memory * or IO memory/port of devices. */ int apei_resources_request(struct apei_resources *resources, const char *desc) { struct apei_res *res, *res_bak = NULL; struct resource *r; struct apei_resources nvs_resources, arch_res; int rc; rc = apei_resources_sub(resources, &apei_resources_all); if (rc) return rc; /* * Some firmware uses ACPI NVS region, that has been marked as * busy, so exclude it from APEI resources to avoid false * conflict. */ apei_resources_init(&nvs_resources); rc = apei_get_nvs_resources(&nvs_resources); if (rc) goto nvs_res_fini; rc = apei_resources_sub(resources, &nvs_resources); if (rc) goto nvs_res_fini; if (arch_apei_filter_addr) { apei_resources_init(&arch_res); rc = apei_get_arch_resources(&arch_res); if (rc) goto arch_res_fini; rc = apei_resources_sub(resources, &arch_res); if (rc) goto arch_res_fini; } rc = -EINVAL; list_for_each_entry(res, &resources->iomem, list) { r = request_mem_region(res->start, res->end - res->start, desc); if (!r) { pr_err(APEI_PFX "Can not request [mem %#010llx-%#010llx] for %s registers\n", (unsigned long long)res->start, (unsigned long long)res->end - 1, desc); res_bak = res; goto err_unmap_iomem; } } list_for_each_entry(res, &resources->ioport, list) { r = request_region(res->start, res->end - res->start, desc); if (!r) { pr_err(APEI_PFX "Can not request [io %#06llx-%#06llx] for %s registers\n", (unsigned long long)res->start, (unsigned long long)res->end - 1, desc); res_bak = res; goto err_unmap_ioport; } } rc = apei_resources_merge(&apei_resources_all, resources); if (rc) { pr_err(APEI_PFX "Fail to merge resources!\n"); goto err_unmap_ioport; } goto arch_res_fini; err_unmap_ioport: list_for_each_entry(res, &resources->ioport, list) { if (res == res_bak) break; release_region(res->start, res->end - res->start); } res_bak = NULL; err_unmap_iomem: list_for_each_entry(res, &resources->iomem, list) { if (res == res_bak) break; release_mem_region(res->start, res->end - res->start); } arch_res_fini: if (arch_apei_filter_addr) apei_resources_fini(&arch_res); nvs_res_fini: apei_resources_fini(&nvs_resources); return rc; } EXPORT_SYMBOL_GPL(apei_resources_request); void apei_resources_release(struct apei_resources *resources) { int rc; struct apei_res *res; list_for_each_entry(res, &resources->iomem, list) release_mem_region(res->start, res->end - res->start); list_for_each_entry(res, &resources->ioport, list) release_region(res->start, res->end - res->start); rc = apei_resources_sub(&apei_resources_all, resources); if (rc) pr_err(APEI_PFX "Fail to sub resources!\n"); } EXPORT_SYMBOL_GPL(apei_resources_release); static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr, u32 *access_bit_width) { u32 bit_width, bit_offset, access_size_code, space_id; bit_width = reg->bit_width; bit_offset = reg->bit_offset; access_size_code = reg->access_width; space_id = reg->space_id; *paddr = get_unaligned(&reg->address); if (!*paddr) { pr_warn(FW_BUG APEI_PFX "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n", *paddr, bit_width, bit_offset, access_size_code, space_id); return -EINVAL; } if (access_size_code < 1 || access_size_code > 4) { pr_warn(FW_BUG APEI_PFX "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n", *paddr, bit_width, bit_offset, access_size_code, space_id); return -EINVAL; } *access_bit_width = 1UL << (access_size_code + 2); /* Fixup common BIOS bug */ if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 && *access_bit_width < 32) *access_bit_width = 32; else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 && *access_bit_width < 64) *access_bit_width = 64; if ((bit_width + bit_offset) > *access_bit_width) { pr_warn(FW_BUG APEI_PFX "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n", *paddr, bit_width, bit_offset, access_size_code, space_id); return -EINVAL; } if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && space_id != ACPI_ADR_SPACE_SYSTEM_IO) { pr_warn(FW_BUG APEI_PFX "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n", *paddr, bit_width, bit_offset, access_size_code, space_id); return -EINVAL; } return 0; } int apei_map_generic_address(struct acpi_generic_address *reg) { int rc; u32 access_bit_width; u64 address; rc = apei_check_gar(reg, &address, &access_bit_width); if (rc) return rc; /* IO space doesn't need mapping */ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) return 0; if (!acpi_os_map_generic_address(reg)) return -ENXIO; return 0; } EXPORT_SYMBOL_GPL(apei_map_generic_address); /* read GAR in interrupt (including NMI) or process context */ int apei_read(u64 *val, struct acpi_generic_address *reg) { int rc; u32 access_bit_width; u64 address; acpi_status status; rc = apei_check_gar(reg, &address, &access_bit_width); if (rc) return rc; *val = 0; switch(reg->space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: status = acpi_os_read_memory((acpi_physical_address) address, val, access_bit_width); if (ACPI_FAILURE(status)) return -EIO; break; case ACPI_ADR_SPACE_SYSTEM_IO: status = acpi_os_read_port(address, (u32 *)val, access_bit_width); if (ACPI_FAILURE(status)) return -EIO; break; default: return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(apei_read); /* write GAR in interrupt (including NMI) or process context */ int apei_write(u64 val, struct acpi_generic_address *reg) { int rc; u32 access_bit_width; u64 address; acpi_status status; rc = apei_check_gar(reg, &address, &access_bit_width); if (rc) return rc; switch (reg->space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: status = acpi_os_write_memory((acpi_physical_address) address, val, access_bit_width); if (ACPI_FAILURE(status)) return -EIO; break; case ACPI_ADR_SPACE_SYSTEM_IO: status = acpi_os_write_port(address, val, access_bit_width); if (ACPI_FAILURE(status)) return -EIO; break; default: return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(apei_write); static int collect_res_callback(struct apei_exec_context *ctx, struct acpi_whea_header *entry, void *data) { struct apei_resources *resources = data; struct acpi_generic_address *reg = &entry->register_region; u8 ins = entry->instruction; u32 access_bit_width; u64 paddr; int rc; if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)) return 0; rc = apei_check_gar(reg, &paddr, &access_bit_width); if (rc) return rc; switch (reg->space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: return apei_res_add(&resources->iomem, paddr, access_bit_width / 8); case ACPI_ADR_SPACE_SYSTEM_IO: return apei_res_add(&resources->ioport, paddr, access_bit_width / 8); default: return -EINVAL; } } /* * Same register may be used by multiple instructions in GARs, so * resources are collected before requesting. */ int apei_exec_collect_resources(struct apei_exec_context *ctx, struct apei_resources *resources) { return apei_exec_for_each_entry(ctx, collect_res_callback, resources, NULL); } EXPORT_SYMBOL_GPL(apei_exec_collect_resources); struct dentry *apei_get_debugfs_dir(void) { static struct dentry *dapei; if (!dapei) dapei = debugfs_create_dir("apei", NULL); return dapei; } EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data) { return 1; } EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff); void __weak arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) { } EXPORT_SYMBOL_GPL(arch_apei_report_mem_error); int apei_osc_setup(void) { static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c"; acpi_handle handle; u32 capbuf[3]; struct acpi_osc_context context = { .uuid_str = whea_uuid_str, .rev = 1, .cap.length = sizeof(capbuf), .cap.pointer = capbuf, }; capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE; capbuf[OSC_SUPPORT_DWORD] = 1; capbuf[OSC_CONTROL_DWORD] = 0; if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) || ACPI_FAILURE(acpi_run_osc(handle, &context))) return -EIO; else { kfree(context.ret.pointer); return 0; } } EXPORT_SYMBOL_GPL(apei_osc_setup);
linux-master
drivers/acpi/apei/apei-base.c
// SPDX-License-Identifier: GPL-2.0-only /* * APEI Error INJection support * * EINJ provides a hardware error injection mechanism, this is useful * for debugging and testing of other APEI and RAS features. * * For more information about EINJ, please refer to ACPI Specification * version 4.0, section 17.5. * * Copyright 2009-2010 Intel Corp. * Author: Huang Ying <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/nmi.h> #include <linux/delay.h> #include <linux/mm.h> #include <asm/unaligned.h> #include "apei-internal.h" #undef pr_fmt #define pr_fmt(fmt) "EINJ: " fmt #define SLEEP_UNIT_MIN 1000 /* 1ms */ #define SLEEP_UNIT_MAX 5000 /* 5ms */ /* Firmware should respond within 1 seconds */ #define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC) #define ACPI5_VENDOR_BIT BIT(31) #define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \ ACPI_EINJ_MEMORY_UNCORRECTABLE | \ ACPI_EINJ_MEMORY_FATAL) /* * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action. */ static int acpi5; struct set_error_type_with_address { u32 type; u32 vendor_extension; u32 flags; u32 apicid; u64 memory_address; u64 memory_address_range; u32 pcie_sbdf; }; enum { SETWA_FLAGS_APICID = 1, SETWA_FLAGS_MEM = 2, SETWA_FLAGS_PCIE_SBDF = 4, }; /* * Vendor extensions for platform specific operations */ struct vendor_error_type_extension { u32 length; u32 pcie_sbdf; u16 vendor_id; u16 device_id; u8 rev_id; u8 reserved[3]; }; static u32 notrigger; static u32 vendor_flags; static struct debugfs_blob_wrapper vendor_blob; static char vendor_dev[64]; /* * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the * EINJ table through an unpublished extension. Use with caution as * most will ignore the parameter and make their own choice of address * for error injection. This extension is used only if * param_extension module parameter is specified. */ struct einj_parameter { u64 type; u64 reserved1; u64 reserved2; u64 param1; u64 param2; }; #define EINJ_OP_BUSY 0x1 #define EINJ_STATUS_SUCCESS 0x0 #define EINJ_STATUS_FAIL 0x1 #define EINJ_STATUS_INVAL 0x2 #define EINJ_TAB_ENTRY(tab) \ ((struct acpi_whea_header *)((char *)(tab) + \ sizeof(struct acpi_table_einj))) static bool param_extension; module_param(param_extension, bool, 0); static struct acpi_table_einj *einj_tab; static struct apei_resources einj_resources; static struct apei_exec_ins_type einj_ins_type[] = { [ACPI_EINJ_READ_REGISTER] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_read_register, }, [ACPI_EINJ_READ_REGISTER_VALUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_read_register_value, }, [ACPI_EINJ_WRITE_REGISTER] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_write_register, }, [ACPI_EINJ_WRITE_REGISTER_VALUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_write_register_value, }, [ACPI_EINJ_NOOP] = { .flags = 0, .run = apei_exec_noop, }, }; /* * Prevent EINJ interpreter to run simultaneously, because the * corresponding firmware implementation may not work properly when * invoked simultaneously. */ static DEFINE_MUTEX(einj_mutex); static void *einj_param; static void einj_exec_ctx_init(struct apei_exec_context *ctx) { apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), EINJ_TAB_ENTRY(einj_tab), einj_tab->entries); } static int __einj_get_available_error_type(u32 *type) { struct apei_exec_context ctx; int rc; einj_exec_ctx_init(&ctx); rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE); if (rc) return rc; *type = apei_exec_ctx_get_output(&ctx); return 0; } /* Get error injection capabilities of the platform */ static int einj_get_available_error_type(u32 *type) { int rc; mutex_lock(&einj_mutex); rc = __einj_get_available_error_type(type); mutex_unlock(&einj_mutex); return rc; } static int einj_timedout(u64 *t) { if ((s64)*t < SLEEP_UNIT_MIN) { pr_warn(FW_WARN "Firmware does not respond in time\n"); return 1; } *t -= SLEEP_UNIT_MIN; usleep_range(SLEEP_UNIT_MIN, SLEEP_UNIT_MAX); return 0; } static void check_vendor_extension(u64 paddr, struct set_error_type_with_address *v5param) { int offset = v5param->vendor_extension; struct vendor_error_type_extension *v; u32 sbdf; if (!offset) return; v = acpi_os_map_iomem(paddr + offset, sizeof(*v)); if (!v) return; sbdf = v->pcie_sbdf; sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", sbdf >> 24, (sbdf >> 16) & 0xff, (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, v->vendor_id, v->device_id, v->rev_id); acpi_os_unmap_iomem(v, sizeof(*v)); } static void *einj_get_parameter_address(void) { int i; u64 pa_v4 = 0, pa_v5 = 0; struct acpi_whea_header *entry; entry = EINJ_TAB_ENTRY(einj_tab); for (i = 0; i < einj_tab->entries; i++) { if (entry->action == ACPI_EINJ_SET_ERROR_TYPE && entry->instruction == ACPI_EINJ_WRITE_REGISTER && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) pa_v4 = get_unaligned(&entry->register_region.address); if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS && entry->instruction == ACPI_EINJ_WRITE_REGISTER && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) pa_v5 = get_unaligned(&entry->register_region.address); entry++; } if (pa_v5) { struct set_error_type_with_address *v5param; v5param = acpi_os_map_iomem(pa_v5, sizeof(*v5param)); if (v5param) { acpi5 = 1; check_vendor_extension(pa_v5, v5param); return v5param; } } if (param_extension && pa_v4) { struct einj_parameter *v4param; v4param = acpi_os_map_iomem(pa_v4, sizeof(*v4param)); if (!v4param) return NULL; if (v4param->reserved1 || v4param->reserved2) { acpi_os_unmap_iomem(v4param, sizeof(*v4param)); return NULL; } return v4param; } return NULL; } /* do sanity check to trigger table */ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) { if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) return -EINVAL; if (trigger_tab->table_size > PAGE_SIZE || trigger_tab->table_size < trigger_tab->header_size) return -EINVAL; if (trigger_tab->entry_count != (trigger_tab->table_size - trigger_tab->header_size) / sizeof(struct acpi_einj_entry)) return -EINVAL; return 0; } static struct acpi_generic_address *einj_get_trigger_parameter_region( struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2) { int i; struct acpi_whea_header *entry; entry = (struct acpi_whea_header *) ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); for (i = 0; i < trigger_tab->entry_count; i++) { if (entry->action == ACPI_EINJ_TRIGGER_ERROR && entry->instruction <= ACPI_EINJ_WRITE_REGISTER_VALUE && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && (entry->register_region.address & param2) == (param1 & param2)) return &entry->register_region; entry++; } return NULL; } /* Execute instructions in trigger error action table */ static int __einj_error_trigger(u64 trigger_paddr, u32 type, u64 param1, u64 param2) { struct acpi_einj_trigger *trigger_tab = NULL; struct apei_exec_context trigger_ctx; struct apei_resources trigger_resources; struct acpi_whea_header *trigger_entry; struct resource *r; u32 table_size; int rc = -EIO; struct acpi_generic_address *trigger_param_region = NULL; r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), "APEI EINJ Trigger Table"); if (!r) { pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n", (unsigned long long)trigger_paddr, (unsigned long long)trigger_paddr + sizeof(*trigger_tab) - 1); goto out; } trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); if (!trigger_tab) { pr_err("Failed to map trigger table!\n"); goto out_rel_header; } rc = einj_check_trigger_header(trigger_tab); if (rc) { pr_warn(FW_BUG "Invalid trigger error action table.\n"); goto out_rel_header; } /* No action structures in the TRIGGER_ERROR table, nothing to do */ if (!trigger_tab->entry_count) goto out_rel_header; rc = -EIO; table_size = trigger_tab->table_size; r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), table_size - sizeof(*trigger_tab), "APEI EINJ Trigger Table"); if (!r) { pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n", (unsigned long long)trigger_paddr + sizeof(*trigger_tab), (unsigned long long)trigger_paddr + table_size - 1); goto out_rel_header; } iounmap(trigger_tab); trigger_tab = ioremap_cache(trigger_paddr, table_size); if (!trigger_tab) { pr_err("Failed to map trigger table!\n"); goto out_rel_entry; } trigger_entry = (struct acpi_whea_header *) ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); apei_resources_init(&trigger_resources); apei_exec_ctx_init(&trigger_ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), trigger_entry, trigger_tab->entry_count); rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources); if (rc) goto out_fini; rc = apei_resources_sub(&trigger_resources, &einj_resources); if (rc) goto out_fini; /* * Some firmware will access target address specified in * param1 to trigger the error when injecting memory error. * This will cause resource conflict with regular memory. So * remove it from trigger table resources. */ if ((param_extension || acpi5) && (type & MEM_ERROR_MASK) && param2) { struct apei_resources addr_resources; apei_resources_init(&addr_resources); trigger_param_region = einj_get_trigger_parameter_region( trigger_tab, param1, param2); if (trigger_param_region) { rc = apei_resources_add(&addr_resources, trigger_param_region->address, trigger_param_region->bit_width/8, true); if (rc) goto out_fini; rc = apei_resources_sub(&trigger_resources, &addr_resources); } apei_resources_fini(&addr_resources); if (rc) goto out_fini; } rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); if (rc) goto out_fini; rc = apei_exec_pre_map_gars(&trigger_ctx); if (rc) goto out_release; rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR); apei_exec_post_unmap_gars(&trigger_ctx); out_release: apei_resources_release(&trigger_resources); out_fini: apei_resources_fini(&trigger_resources); out_rel_entry: release_mem_region(trigger_paddr + sizeof(*trigger_tab), table_size - sizeof(*trigger_tab)); out_rel_header: release_mem_region(trigger_paddr, sizeof(*trigger_tab)); out: if (trigger_tab) iounmap(trigger_tab); return rc; } static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3, u64 param4) { struct apei_exec_context ctx; u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; int rc; einj_exec_ctx_init(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION); if (rc) return rc; apei_exec_ctx_set_input(&ctx, type); if (acpi5) { struct set_error_type_with_address *v5param = einj_param; v5param->type = type; if (type & ACPI5_VENDOR_BIT) { switch (vendor_flags) { case SETWA_FLAGS_APICID: v5param->apicid = param1; break; case SETWA_FLAGS_MEM: v5param->memory_address = param1; v5param->memory_address_range = param2; break; case SETWA_FLAGS_PCIE_SBDF: v5param->pcie_sbdf = param1; break; } v5param->flags = vendor_flags; } else if (flags) { v5param->flags = flags; v5param->memory_address = param1; v5param->memory_address_range = param2; v5param->apicid = param3; v5param->pcie_sbdf = param4; } else { switch (type) { case ACPI_EINJ_PROCESSOR_CORRECTABLE: case ACPI_EINJ_PROCESSOR_UNCORRECTABLE: case ACPI_EINJ_PROCESSOR_FATAL: v5param->apicid = param1; v5param->flags = SETWA_FLAGS_APICID; break; case ACPI_EINJ_MEMORY_CORRECTABLE: case ACPI_EINJ_MEMORY_UNCORRECTABLE: case ACPI_EINJ_MEMORY_FATAL: v5param->memory_address = param1; v5param->memory_address_range = param2; v5param->flags = SETWA_FLAGS_MEM; break; case ACPI_EINJ_PCIX_CORRECTABLE: case ACPI_EINJ_PCIX_UNCORRECTABLE: case ACPI_EINJ_PCIX_FATAL: v5param->pcie_sbdf = param1; v5param->flags = SETWA_FLAGS_PCIE_SBDF; break; } } } else { rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); if (rc) return rc; if (einj_param) { struct einj_parameter *v4param = einj_param; v4param->param1 = param1; v4param->param2 = param2; } } rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); if (rc) return rc; for (;;) { rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); if (!(val & EINJ_OP_BUSY)) break; if (einj_timedout(&timeout)) return -EIO; } rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); if (val == EINJ_STATUS_FAIL) return -EBUSY; else if (val == EINJ_STATUS_INVAL) return -EINVAL; /* * The error is injected into the platform successfully, then it needs * to trigger the error. */ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE); if (rc) return rc; trigger_paddr = apei_exec_ctx_get_output(&ctx); if (notrigger == 0) { rc = __einj_error_trigger(trigger_paddr, type, param1, param2); if (rc) return rc; } rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); return rc; } /* Inject the specified hardware error */ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3, u64 param4) { int rc; u64 base_addr, size; /* If user manually set "flags", make sure it is legal */ if (flags && (flags & ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF))) return -EINVAL; /* * We need extra sanity checks for memory errors. * Other types leap directly to injection. */ /* ensure param1/param2 existed */ if (!(param_extension || acpi5)) goto inject; /* ensure injection is memory related */ if (type & ACPI5_VENDOR_BIT) { if (vendor_flags != SETWA_FLAGS_MEM) goto inject; } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM)) goto inject; /* * Disallow crazy address masks that give BIOS leeway to pick * injection address almost anywhere. Insist on page or * better granularity and that target address is normal RAM or * NVDIMM. */ base_addr = param1 & param2; size = ~param2 + 1; if (((param2 & PAGE_MASK) != PAGE_MASK) || ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) != REGION_INTERSECTS) && (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY) != REGION_INTERSECTS) && (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED) != REGION_INTERSECTS) && !arch_is_platform_page(base_addr))) return -EINVAL; if (is_zero_pfn(base_addr >> PAGE_SHIFT)) return -EADDRINUSE; inject: mutex_lock(&einj_mutex); rc = __einj_error_inject(type, flags, param1, param2, param3, param4); mutex_unlock(&einj_mutex); return rc; } static u32 error_type; static u32 error_flags; static u64 error_param1; static u64 error_param2; static u64 error_param3; static u64 error_param4; static struct dentry *einj_debug_dir; static const char * const einj_error_type_string[] = { "0x00000001\tProcessor Correctable\n", "0x00000002\tProcessor Uncorrectable non-fatal\n", "0x00000004\tProcessor Uncorrectable fatal\n", "0x00000008\tMemory Correctable\n", "0x00000010\tMemory Uncorrectable non-fatal\n", "0x00000020\tMemory Uncorrectable fatal\n", "0x00000040\tPCI Express Correctable\n", "0x00000080\tPCI Express Uncorrectable non-fatal\n", "0x00000100\tPCI Express Uncorrectable fatal\n", "0x00000200\tPlatform Correctable\n", "0x00000400\tPlatform Uncorrectable non-fatal\n", "0x00000800\tPlatform Uncorrectable fatal\n", "0x00001000\tCXL.cache Protocol Correctable\n", "0x00002000\tCXL.cache Protocol Uncorrectable non-fatal\n", "0x00004000\tCXL.cache Protocol Uncorrectable fatal\n", "0x00008000\tCXL.mem Protocol Correctable\n", "0x00010000\tCXL.mem Protocol Uncorrectable non-fatal\n", "0x00020000\tCXL.mem Protocol Uncorrectable fatal\n", }; static int available_error_type_show(struct seq_file *m, void *v) { int rc; u32 available_error_type = 0; rc = einj_get_available_error_type(&available_error_type); if (rc) return rc; for (int pos = 0; pos < ARRAY_SIZE(einj_error_type_string); pos++) if (available_error_type & BIT(pos)) seq_puts(m, einj_error_type_string[pos]); return 0; } DEFINE_SHOW_ATTRIBUTE(available_error_type); static int error_type_get(void *data, u64 *val) { *val = error_type; return 0; } static int error_type_set(void *data, u64 val) { int rc; u32 available_error_type = 0; u32 tval, vendor; /* Only low 32 bits for error type are valid */ if (val & GENMASK_ULL(63, 32)) return -EINVAL; /* * Vendor defined types have 0x80000000 bit set, and * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE */ vendor = val & ACPI5_VENDOR_BIT; tval = val & 0x7fffffff; /* Only one error type can be specified */ if (tval & (tval - 1)) return -EINVAL; if (!vendor) { rc = einj_get_available_error_type(&available_error_type); if (rc) return rc; if (!(val & available_error_type)) return -EINVAL; } error_type = val; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(error_type_fops, error_type_get, error_type_set, "0x%llx\n"); static int error_inject_set(void *data, u64 val) { if (!error_type) return -EINVAL; return einj_error_inject(error_type, error_flags, error_param1, error_param2, error_param3, error_param4); } DEFINE_DEBUGFS_ATTRIBUTE(error_inject_fops, NULL, error_inject_set, "%llu\n"); static int einj_check_table(struct acpi_table_einj *einj_tab) { if ((einj_tab->header_length != (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header))) && (einj_tab->header_length != sizeof(struct acpi_table_einj))) return -EINVAL; if (einj_tab->header.length < sizeof(struct acpi_table_einj)) return -EINVAL; if (einj_tab->entries != (einj_tab->header.length - sizeof(struct acpi_table_einj)) / sizeof(struct acpi_einj_entry)) return -EINVAL; return 0; } static int __init einj_init(void) { int rc; acpi_status status; struct apei_exec_context ctx; if (acpi_disabled) { pr_info("ACPI disabled.\n"); return -ENODEV; } status = acpi_get_table(ACPI_SIG_EINJ, 0, (struct acpi_table_header **)&einj_tab); if (status == AE_NOT_FOUND) { pr_warn("EINJ table not found.\n"); return -ENODEV; } else if (ACPI_FAILURE(status)) { pr_err("Failed to get EINJ table: %s\n", acpi_format_exception(status)); return -EINVAL; } rc = einj_check_table(einj_tab); if (rc) { pr_warn(FW_BUG "Invalid EINJ table.\n"); goto err_put_table; } rc = -ENOMEM; einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); debugfs_create_file("available_error_type", S_IRUSR, einj_debug_dir, NULL, &available_error_type_fops); debugfs_create_file_unsafe("error_type", 0600, einj_debug_dir, NULL, &error_type_fops); debugfs_create_file_unsafe("error_inject", 0200, einj_debug_dir, NULL, &error_inject_fops); apei_resources_init(&einj_resources); einj_exec_ctx_init(&ctx); rc = apei_exec_collect_resources(&ctx, &einj_resources); if (rc) { pr_err("Error collecting EINJ resources.\n"); goto err_fini; } rc = apei_resources_request(&einj_resources, "APEI EINJ"); if (rc) { pr_err("Error requesting memory/port resources.\n"); goto err_fini; } rc = apei_exec_pre_map_gars(&ctx); if (rc) { pr_err("Error pre-mapping GARs.\n"); goto err_release; } einj_param = einj_get_parameter_address(); if ((param_extension || acpi5) && einj_param) { debugfs_create_x32("flags", S_IRUSR | S_IWUSR, einj_debug_dir, &error_flags); debugfs_create_x64("param1", S_IRUSR | S_IWUSR, einj_debug_dir, &error_param1); debugfs_create_x64("param2", S_IRUSR | S_IWUSR, einj_debug_dir, &error_param2); debugfs_create_x64("param3", S_IRUSR | S_IWUSR, einj_debug_dir, &error_param3); debugfs_create_x64("param4", S_IRUSR | S_IWUSR, einj_debug_dir, &error_param4); debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR, einj_debug_dir, &notrigger); } if (vendor_dev[0]) { vendor_blob.data = vendor_dev; vendor_blob.size = strlen(vendor_dev); debugfs_create_blob("vendor", S_IRUSR, einj_debug_dir, &vendor_blob); debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR, einj_debug_dir, &vendor_flags); } pr_info("Error INJection is initialized.\n"); return 0; err_release: apei_resources_release(&einj_resources); err_fini: apei_resources_fini(&einj_resources); debugfs_remove_recursive(einj_debug_dir); err_put_table: acpi_put_table((struct acpi_table_header *)einj_tab); return rc; } static void __exit einj_exit(void) { struct apei_exec_context ctx; if (einj_param) { acpi_size size = (acpi5) ? sizeof(struct set_error_type_with_address) : sizeof(struct einj_parameter); acpi_os_unmap_iomem(einj_param, size); } einj_exec_ctx_init(&ctx); apei_exec_post_unmap_gars(&ctx); apei_resources_release(&einj_resources); apei_resources_fini(&einj_resources); debugfs_remove_recursive(einj_debug_dir); acpi_put_table((struct acpi_table_header *)einj_tab); } module_init(einj_init); module_exit(einj_exit); MODULE_AUTHOR("Huang Ying"); MODULE_DESCRIPTION("APEI Error INJection support"); MODULE_LICENSE("GPL");
linux-master
drivers/acpi/apei/einj.c
// SPDX-License-Identifier: GPL-2.0-only /* * APEI Error Record Serialization Table support * * ERST is a way provided by APEI to save and retrieve hardware error * information to and from a persistent store. * * For more information about ERST, please refer to ACPI Specification * version 4.0, section 17.4. * * Copyright 2010 Intel Corp. * Author: Huang Ying <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/acpi.h> #include <linux/uaccess.h> #include <linux/cper.h> #include <linux/nmi.h> #include <linux/hardirq.h> #include <linux/pstore.h> #include <linux/vmalloc.h> #include <linux/mm.h> /* kvfree() */ #include <acpi/apei.h> #include "apei-internal.h" #undef pr_fmt #define pr_fmt(fmt) "ERST: " fmt /* ERST command status */ #define ERST_STATUS_SUCCESS 0x0 #define ERST_STATUS_NOT_ENOUGH_SPACE 0x1 #define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2 #define ERST_STATUS_FAILED 0x3 #define ERST_STATUS_RECORD_STORE_EMPTY 0x4 #define ERST_STATUS_RECORD_NOT_FOUND 0x5 #define ERST_TAB_ENTRY(tab) \ ((struct acpi_whea_header *)((char *)(tab) + \ sizeof(struct acpi_table_erst))) #define SPIN_UNIT 100 /* 100ns */ /* Firmware should respond within 1 milliseconds */ #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) #define FIRMWARE_MAX_STALL 50 /* 50us */ int erst_disable; EXPORT_SYMBOL_GPL(erst_disable); static struct acpi_table_erst *erst_tab; /* ERST Error Log Address Range attributes */ #define ERST_RANGE_RESERVED 0x0001 #define ERST_RANGE_NVRAM 0x0002 #define ERST_RANGE_SLOW 0x0004 /* * ERST Error Log Address Range, used as buffer for reading/writing * error records. */ static struct erst_erange { u64 base; u64 size; void __iomem *vaddr; u32 attr; } erst_erange; /* * Prevent ERST interpreter to run simultaneously, because the * corresponding firmware implementation may not work properly when * invoked simultaneously. * * It is used to provide exclusive accessing for ERST Error Log * Address Range too. */ static DEFINE_RAW_SPINLOCK(erst_lock); static inline int erst_errno(int command_status) { switch (command_status) { case ERST_STATUS_SUCCESS: return 0; case ERST_STATUS_HARDWARE_NOT_AVAILABLE: return -ENODEV; case ERST_STATUS_NOT_ENOUGH_SPACE: return -ENOSPC; case ERST_STATUS_RECORD_STORE_EMPTY: case ERST_STATUS_RECORD_NOT_FOUND: return -ENOENT; default: return -EINVAL; } } static int erst_timedout(u64 *t, u64 spin_unit) { if ((s64)*t < spin_unit) { pr_warn(FW_WARN "Firmware does not respond in time.\n"); return 1; } *t -= spin_unit; ndelay(spin_unit); touch_nmi_watchdog(); return 0; } static int erst_exec_load_var1(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { return __apei_exec_read_register(entry, &ctx->var1); } static int erst_exec_load_var2(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { return __apei_exec_read_register(entry, &ctx->var2); } static int erst_exec_store_var1(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { return __apei_exec_write_register(entry, ctx->var1); } static int erst_exec_add(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { ctx->var1 += ctx->var2; return 0; } static int erst_exec_subtract(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { ctx->var1 -= ctx->var2; return 0; } static int erst_exec_add_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { int rc; u64 val; rc = __apei_exec_read_register(entry, &val); if (rc) return rc; val += ctx->value; rc = __apei_exec_write_register(entry, val); return rc; } static int erst_exec_subtract_value(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { int rc; u64 val; rc = __apei_exec_read_register(entry, &val); if (rc) return rc; val -= ctx->value; rc = __apei_exec_write_register(entry, val); return rc; } static int erst_exec_stall(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { u64 stall_time; if (ctx->value > FIRMWARE_MAX_STALL) { if (!in_nmi()) pr_warn(FW_WARN "Too long stall time for stall instruction: 0x%llx.\n", ctx->value); stall_time = FIRMWARE_MAX_STALL; } else stall_time = ctx->value; udelay(stall_time); return 0; } static int erst_exec_stall_while_true(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { int rc; u64 val; u64 timeout = FIRMWARE_TIMEOUT; u64 stall_time; if (ctx->var1 > FIRMWARE_MAX_STALL) { if (!in_nmi()) pr_warn(FW_WARN "Too long stall time for stall while true instruction: 0x%llx.\n", ctx->var1); stall_time = FIRMWARE_MAX_STALL; } else stall_time = ctx->var1; for (;;) { rc = __apei_exec_read_register(entry, &val); if (rc) return rc; if (val != ctx->value) break; if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC)) return -EIO; } return 0; } static int erst_exec_skip_next_instruction_if_true( struct apei_exec_context *ctx, struct acpi_whea_header *entry) { int rc; u64 val; rc = __apei_exec_read_register(entry, &val); if (rc) return rc; if (val == ctx->value) { ctx->ip += 2; return APEI_EXEC_SET_IP; } return 0; } static int erst_exec_goto(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { ctx->ip = ctx->value; return APEI_EXEC_SET_IP; } static int erst_exec_set_src_address_base(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { return __apei_exec_read_register(entry, &ctx->src_base); } static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { return __apei_exec_read_register(entry, &ctx->dst_base); } static int erst_exec_move_data(struct apei_exec_context *ctx, struct acpi_whea_header *entry) { int rc; u64 offset; void *src, *dst; /* ioremap does not work in interrupt context */ if (in_interrupt()) { pr_warn("MOVE_DATA can not be used in interrupt context.\n"); return -EBUSY; } rc = __apei_exec_read_register(entry, &offset); if (rc) return rc; src = ioremap(ctx->src_base + offset, ctx->var2); if (!src) return -ENOMEM; dst = ioremap(ctx->dst_base + offset, ctx->var2); if (!dst) { iounmap(src); return -ENOMEM; } memmove(dst, src, ctx->var2); iounmap(src); iounmap(dst); return 0; } static struct apei_exec_ins_type erst_ins_type[] = { [ACPI_ERST_READ_REGISTER] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_read_register, }, [ACPI_ERST_READ_REGISTER_VALUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_read_register_value, }, [ACPI_ERST_WRITE_REGISTER] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_write_register, }, [ACPI_ERST_WRITE_REGISTER_VALUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_write_register_value, }, [ACPI_ERST_NOOP] = { .flags = 0, .run = apei_exec_noop, }, [ACPI_ERST_LOAD_VAR1] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_load_var1, }, [ACPI_ERST_LOAD_VAR2] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_load_var2, }, [ACPI_ERST_STORE_VAR1] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_store_var1, }, [ACPI_ERST_ADD] = { .flags = 0, .run = erst_exec_add, }, [ACPI_ERST_SUBTRACT] = { .flags = 0, .run = erst_exec_subtract, }, [ACPI_ERST_ADD_VALUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_add_value, }, [ACPI_ERST_SUBTRACT_VALUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_subtract_value, }, [ACPI_ERST_STALL] = { .flags = 0, .run = erst_exec_stall, }, [ACPI_ERST_STALL_WHILE_TRUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_stall_while_true, }, [ACPI_ERST_SKIP_NEXT_IF_TRUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_skip_next_instruction_if_true, }, [ACPI_ERST_GOTO] = { .flags = 0, .run = erst_exec_goto, }, [ACPI_ERST_SET_SRC_ADDRESS_BASE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_set_src_address_base, }, [ACPI_ERST_SET_DST_ADDRESS_BASE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_set_dst_address_base, }, [ACPI_ERST_MOVE_DATA] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = erst_exec_move_data, }, }; static inline void erst_exec_ctx_init(struct apei_exec_context *ctx) { apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type), ERST_TAB_ENTRY(erst_tab), erst_tab->entries); } static int erst_get_erange(struct erst_erange *range) { struct apei_exec_context ctx; int rc; erst_exec_ctx_init(&ctx); rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE); if (rc) return rc; range->base = apei_exec_ctx_get_output(&ctx); rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH); if (rc) return rc; range->size = apei_exec_ctx_get_output(&ctx); rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES); if (rc) return rc; range->attr = apei_exec_ctx_get_output(&ctx); return 0; } static ssize_t __erst_get_record_count(void) { struct apei_exec_context ctx; int rc; erst_exec_ctx_init(&ctx); rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT); if (rc) return rc; return apei_exec_ctx_get_output(&ctx); } ssize_t erst_get_record_count(void) { ssize_t count; unsigned long flags; if (erst_disable) return -ENODEV; raw_spin_lock_irqsave(&erst_lock, flags); count = __erst_get_record_count(); raw_spin_unlock_irqrestore(&erst_lock, flags); return count; } EXPORT_SYMBOL_GPL(erst_get_record_count); #define ERST_RECORD_ID_CACHE_SIZE_MIN 16 #define ERST_RECORD_ID_CACHE_SIZE_MAX 1024 struct erst_record_id_cache { struct mutex lock; u64 *entries; int len; int size; int refcount; }; static struct erst_record_id_cache erst_record_id_cache = { .lock = __MUTEX_INITIALIZER(erst_record_id_cache.lock), .refcount = 0, }; static int __erst_get_next_record_id(u64 *record_id) { struct apei_exec_context ctx; int rc; erst_exec_ctx_init(&ctx); rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID); if (rc) return rc; *record_id = apei_exec_ctx_get_output(&ctx); return 0; } int erst_get_record_id_begin(int *pos) { int rc; if (erst_disable) return -ENODEV; rc = mutex_lock_interruptible(&erst_record_id_cache.lock); if (rc) return rc; erst_record_id_cache.refcount++; mutex_unlock(&erst_record_id_cache.lock); *pos = 0; return 0; } EXPORT_SYMBOL_GPL(erst_get_record_id_begin); /* erst_record_id_cache.lock must be held by caller */ static int __erst_record_id_cache_add_one(void) { u64 id, prev_id, first_id; int i, rc; u64 *entries; unsigned long flags; id = prev_id = first_id = APEI_ERST_INVALID_RECORD_ID; retry: raw_spin_lock_irqsave(&erst_lock, flags); rc = __erst_get_next_record_id(&id); raw_spin_unlock_irqrestore(&erst_lock, flags); if (rc == -ENOENT) return 0; if (rc) return rc; if (id == APEI_ERST_INVALID_RECORD_ID) return 0; /* can not skip current ID, or loop back to first ID */ if (id == prev_id || id == first_id) return 0; if (first_id == APEI_ERST_INVALID_RECORD_ID) first_id = id; prev_id = id; entries = erst_record_id_cache.entries; for (i = 0; i < erst_record_id_cache.len; i++) { if (entries[i] == id) break; } /* record id already in cache, try next */ if (i < erst_record_id_cache.len) goto retry; if (erst_record_id_cache.len >= erst_record_id_cache.size) { int new_size; u64 *new_entries; new_size = erst_record_id_cache.size * 2; new_size = clamp_val(new_size, ERST_RECORD_ID_CACHE_SIZE_MIN, ERST_RECORD_ID_CACHE_SIZE_MAX); if (new_size <= erst_record_id_cache.size) { if (printk_ratelimit()) pr_warn(FW_WARN "too many record IDs!\n"); return 0; } new_entries = kvmalloc_array(new_size, sizeof(entries[0]), GFP_KERNEL); if (!new_entries) return -ENOMEM; memcpy(new_entries, entries, erst_record_id_cache.len * sizeof(entries[0])); kvfree(entries); erst_record_id_cache.entries = entries = new_entries; erst_record_id_cache.size = new_size; } entries[i] = id; erst_record_id_cache.len++; return 1; } /* * Get the record ID of an existing error record on the persistent * storage. If there is no error record on the persistent storage, the * returned record_id is APEI_ERST_INVALID_RECORD_ID. */ int erst_get_record_id_next(int *pos, u64 *record_id) { int rc = 0; u64 *entries; if (erst_disable) return -ENODEV; /* must be enclosed by erst_get_record_id_begin/end */ BUG_ON(!erst_record_id_cache.refcount); BUG_ON(*pos < 0 || *pos > erst_record_id_cache.len); mutex_lock(&erst_record_id_cache.lock); entries = erst_record_id_cache.entries; for (; *pos < erst_record_id_cache.len; (*pos)++) if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID) break; /* found next record id in cache */ if (*pos < erst_record_id_cache.len) { *record_id = entries[*pos]; (*pos)++; goto out_unlock; } /* Try to add one more record ID to cache */ rc = __erst_record_id_cache_add_one(); if (rc < 0) goto out_unlock; /* successfully add one new ID */ if (rc == 1) { *record_id = erst_record_id_cache.entries[*pos]; (*pos)++; rc = 0; } else { *pos = -1; *record_id = APEI_ERST_INVALID_RECORD_ID; } out_unlock: mutex_unlock(&erst_record_id_cache.lock); return rc; } EXPORT_SYMBOL_GPL(erst_get_record_id_next); /* erst_record_id_cache.lock must be held by caller */ static void __erst_record_id_cache_compact(void) { int i, wpos = 0; u64 *entries; if (erst_record_id_cache.refcount) return; entries = erst_record_id_cache.entries; for (i = 0; i < erst_record_id_cache.len; i++) { if (entries[i] == APEI_ERST_INVALID_RECORD_ID) continue; if (wpos != i) entries[wpos] = entries[i]; wpos++; } erst_record_id_cache.len = wpos; } void erst_get_record_id_end(void) { /* * erst_disable != 0 should be detected by invoker via the * return value of erst_get_record_id_begin/next, so this * function should not be called for erst_disable != 0. */ BUG_ON(erst_disable); mutex_lock(&erst_record_id_cache.lock); erst_record_id_cache.refcount--; BUG_ON(erst_record_id_cache.refcount < 0); __erst_record_id_cache_compact(); mutex_unlock(&erst_record_id_cache.lock); } EXPORT_SYMBOL_GPL(erst_get_record_id_end); static int __erst_write_to_storage(u64 offset) { struct apei_exec_context ctx; u64 timeout = FIRMWARE_TIMEOUT; u64 val; int rc; erst_exec_ctx_init(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE); if (rc) return rc; apei_exec_ctx_set_input(&ctx, offset); rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); if (rc) return rc; rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); if (rc) return rc; for (;;) { rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); if (!val) break; if (erst_timedout(&timeout, SPIN_UNIT)) return -EIO; } rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); if (rc) return rc; return erst_errno(val); } static int __erst_read_from_storage(u64 record_id, u64 offset) { struct apei_exec_context ctx; u64 timeout = FIRMWARE_TIMEOUT; u64 val; int rc; erst_exec_ctx_init(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ); if (rc) return rc; apei_exec_ctx_set_input(&ctx, offset); rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); if (rc) return rc; apei_exec_ctx_set_input(&ctx, record_id); rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); if (rc) return rc; rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); if (rc) return rc; for (;;) { rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); if (!val) break; if (erst_timedout(&timeout, SPIN_UNIT)) return -EIO; } rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); if (rc) return rc; return erst_errno(val); } static int __erst_clear_from_storage(u64 record_id) { struct apei_exec_context ctx; u64 timeout = FIRMWARE_TIMEOUT; u64 val; int rc; erst_exec_ctx_init(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR); if (rc) return rc; apei_exec_ctx_set_input(&ctx, record_id); rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); if (rc) return rc; rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); if (rc) return rc; for (;;) { rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); if (!val) break; if (erst_timedout(&timeout, SPIN_UNIT)) return -EIO; } rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); if (rc) return rc; return erst_errno(val); } /* NVRAM ERST Error Log Address Range is not supported yet */ static void pr_unimpl_nvram(void) { if (printk_ratelimit()) pr_warn("NVRAM ERST Log Address Range not implemented yet.\n"); } static int __erst_write_to_nvram(const struct cper_record_header *record) { /* do not print message, because printk is not safe for NMI */ return -ENOSYS; } static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset) { pr_unimpl_nvram(); return -ENOSYS; } static int __erst_clear_from_nvram(u64 record_id) { pr_unimpl_nvram(); return -ENOSYS; } int erst_write(const struct cper_record_header *record) { int rc; unsigned long flags; struct cper_record_header *rcd_erange; if (erst_disable) return -ENODEV; if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE)) return -EINVAL; if (erst_erange.attr & ERST_RANGE_NVRAM) { if (!raw_spin_trylock_irqsave(&erst_lock, flags)) return -EBUSY; rc = __erst_write_to_nvram(record); raw_spin_unlock_irqrestore(&erst_lock, flags); return rc; } if (record->record_length > erst_erange.size) return -EINVAL; if (!raw_spin_trylock_irqsave(&erst_lock, flags)) return -EBUSY; memcpy(erst_erange.vaddr, record, record->record_length); rcd_erange = erst_erange.vaddr; /* signature for serialization system */ memcpy(&rcd_erange->persistence_information, "ER", 2); rc = __erst_write_to_storage(0); raw_spin_unlock_irqrestore(&erst_lock, flags); return rc; } EXPORT_SYMBOL_GPL(erst_write); static int __erst_read_to_erange(u64 record_id, u64 *offset) { int rc; if (erst_erange.attr & ERST_RANGE_NVRAM) return __erst_read_to_erange_from_nvram( record_id, offset); rc = __erst_read_from_storage(record_id, 0); if (rc) return rc; *offset = 0; return 0; } static ssize_t __erst_read(u64 record_id, struct cper_record_header *record, size_t buflen) { int rc; u64 offset, len = 0; struct cper_record_header *rcd_tmp; rc = __erst_read_to_erange(record_id, &offset); if (rc) return rc; rcd_tmp = erst_erange.vaddr + offset; len = rcd_tmp->record_length; if (len <= buflen) memcpy(record, rcd_tmp, len); return len; } /* * If return value > buflen, the buffer size is not big enough, * else if return value < 0, something goes wrong, * else everything is OK, and return value is record length */ ssize_t erst_read(u64 record_id, struct cper_record_header *record, size_t buflen) { ssize_t len; unsigned long flags; if (erst_disable) return -ENODEV; raw_spin_lock_irqsave(&erst_lock, flags); len = __erst_read(record_id, record, buflen); raw_spin_unlock_irqrestore(&erst_lock, flags); return len; } EXPORT_SYMBOL_GPL(erst_read); static void erst_clear_cache(u64 record_id) { int i; u64 *entries; mutex_lock(&erst_record_id_cache.lock); entries = erst_record_id_cache.entries; for (i = 0; i < erst_record_id_cache.len; i++) { if (entries[i] == record_id) entries[i] = APEI_ERST_INVALID_RECORD_ID; } __erst_record_id_cache_compact(); mutex_unlock(&erst_record_id_cache.lock); } ssize_t erst_read_record(u64 record_id, struct cper_record_header *record, size_t buflen, size_t recordlen, const guid_t *creatorid) { ssize_t len; /* * if creatorid is NULL, read any record for erst-dbg module */ if (creatorid == NULL) { len = erst_read(record_id, record, buflen); if (len == -ENOENT) erst_clear_cache(record_id); return len; } len = erst_read(record_id, record, buflen); /* * if erst_read return value is -ENOENT skip to next record_id, * and clear the record_id cache. */ if (len == -ENOENT) { erst_clear_cache(record_id); goto out; } if (len < 0) goto out; /* * if erst_read return value is less than record head length, * consider it as -EIO, and clear the record_id cache. */ if (len < recordlen) { len = -EIO; erst_clear_cache(record_id); goto out; } /* * if creatorid is not wanted, consider it as not found, * for skipping to next record_id. */ if (!guid_equal(&record->creator_id, creatorid)) len = -ENOENT; out: return len; } EXPORT_SYMBOL_GPL(erst_read_record); int erst_clear(u64 record_id) { int rc, i; unsigned long flags; u64 *entries; if (erst_disable) return -ENODEV; rc = mutex_lock_interruptible(&erst_record_id_cache.lock); if (rc) return rc; raw_spin_lock_irqsave(&erst_lock, flags); if (erst_erange.attr & ERST_RANGE_NVRAM) rc = __erst_clear_from_nvram(record_id); else rc = __erst_clear_from_storage(record_id); raw_spin_unlock_irqrestore(&erst_lock, flags); if (rc) goto out; entries = erst_record_id_cache.entries; for (i = 0; i < erst_record_id_cache.len; i++) { if (entries[i] == record_id) entries[i] = APEI_ERST_INVALID_RECORD_ID; } __erst_record_id_cache_compact(); out: mutex_unlock(&erst_record_id_cache.lock); return rc; } EXPORT_SYMBOL_GPL(erst_clear); static int __init setup_erst_disable(char *str) { erst_disable = 1; return 1; } __setup("erst_disable", setup_erst_disable); static int erst_check_table(struct acpi_table_erst *erst_tab) { if ((erst_tab->header_length != (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header))) && (erst_tab->header_length != sizeof(struct acpi_table_erst))) return -EINVAL; if (erst_tab->header.length < sizeof(struct acpi_table_erst)) return -EINVAL; if (erst_tab->entries != (erst_tab->header.length - sizeof(struct acpi_table_erst)) / sizeof(struct acpi_erst_entry)) return -EINVAL; return 0; } static int erst_open_pstore(struct pstore_info *psi); static int erst_close_pstore(struct pstore_info *psi); static ssize_t erst_reader(struct pstore_record *record); static int erst_writer(struct pstore_record *record); static int erst_clearer(struct pstore_record *record); static struct pstore_info erst_info = { .owner = THIS_MODULE, .name = "erst", .flags = PSTORE_FLAGS_DMESG, .open = erst_open_pstore, .close = erst_close_pstore, .read = erst_reader, .write = erst_writer, .erase = erst_clearer }; #define CPER_CREATOR_PSTORE \ GUID_INIT(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ 0x64, 0x90, 0xb8, 0x9d) #define CPER_SECTION_TYPE_DMESG \ GUID_INIT(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \ 0x94, 0x19, 0xeb, 0x12) #define CPER_SECTION_TYPE_DMESG_Z \ GUID_INIT(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d, \ 0x34, 0xdd, 0xfa, 0xc6) #define CPER_SECTION_TYPE_MCE \ GUID_INIT(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ 0x04, 0x4a, 0x38, 0xfc) struct cper_pstore_record { struct cper_record_header hdr; struct cper_section_descriptor sec_hdr; char data[]; } __packed; static int reader_pos; static int erst_open_pstore(struct pstore_info *psi) { if (erst_disable) return -ENODEV; return erst_get_record_id_begin(&reader_pos); } static int erst_close_pstore(struct pstore_info *psi) { erst_get_record_id_end(); return 0; } static ssize_t erst_reader(struct pstore_record *record) { int rc; ssize_t len = 0; u64 record_id; struct cper_pstore_record *rcd; size_t rcd_len = sizeof(*rcd) + erst_info.bufsize; if (erst_disable) return -ENODEV; rcd = kmalloc(rcd_len, GFP_KERNEL); if (!rcd) { rc = -ENOMEM; goto out; } skip: rc = erst_get_record_id_next(&reader_pos, &record_id); if (rc) goto out; /* no more record */ if (record_id == APEI_ERST_INVALID_RECORD_ID) { rc = -EINVAL; goto out; } len = erst_read_record(record_id, &rcd->hdr, rcd_len, sizeof(*rcd), &CPER_CREATOR_PSTORE); /* The record may be cleared by others, try read next record */ if (len == -ENOENT) goto skip; else if (len < 0) goto out; record->buf = kmalloc(len, GFP_KERNEL); if (record->buf == NULL) { rc = -ENOMEM; goto out; } memcpy(record->buf, rcd->data, len - sizeof(*rcd)); record->id = record_id; record->compressed = false; record->ecc_notice_size = 0; if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG_Z)) { record->type = PSTORE_TYPE_DMESG; record->compressed = true; } else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG)) record->type = PSTORE_TYPE_DMESG; else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_MCE)) record->type = PSTORE_TYPE_MCE; else record->type = PSTORE_TYPE_MAX; if (rcd->hdr.validation_bits & CPER_VALID_TIMESTAMP) record->time.tv_sec = rcd->hdr.timestamp; else record->time.tv_sec = 0; record->time.tv_nsec = 0; out: kfree(rcd); return (rc < 0) ? rc : (len - sizeof(*rcd)); } static int erst_writer(struct pstore_record *record) { struct cper_pstore_record *rcd = (struct cper_pstore_record *) (erst_info.buf - sizeof(*rcd)); int ret; memset(rcd, 0, sizeof(*rcd)); memcpy(rcd->hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE); rcd->hdr.revision = CPER_RECORD_REV; rcd->hdr.signature_end = CPER_SIG_END; rcd->hdr.section_count = 1; rcd->hdr.error_severity = CPER_SEV_FATAL; /* timestamp valid. platform_id, partition_id are invalid */ rcd->hdr.validation_bits = CPER_VALID_TIMESTAMP; rcd->hdr.timestamp = ktime_get_real_seconds(); rcd->hdr.record_length = sizeof(*rcd) + record->size; rcd->hdr.creator_id = CPER_CREATOR_PSTORE; rcd->hdr.notification_type = CPER_NOTIFY_MCE; rcd->hdr.record_id = cper_next_record_id(); rcd->hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR; rcd->sec_hdr.section_offset = sizeof(*rcd); rcd->sec_hdr.section_length = record->size; rcd->sec_hdr.revision = CPER_SEC_REV; /* fru_id and fru_text is invalid */ rcd->sec_hdr.validation_bits = 0; rcd->sec_hdr.flags = CPER_SEC_PRIMARY; switch (record->type) { case PSTORE_TYPE_DMESG: if (record->compressed) rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG_Z; else rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG; break; case PSTORE_TYPE_MCE: rcd->sec_hdr.section_type = CPER_SECTION_TYPE_MCE; break; default: return -EINVAL; } rcd->sec_hdr.section_severity = CPER_SEV_FATAL; ret = erst_write(&rcd->hdr); record->id = rcd->hdr.record_id; return ret; } static int erst_clearer(struct pstore_record *record) { return erst_clear(record->id); } static int __init erst_init(void) { int rc = 0; acpi_status status; struct apei_exec_context ctx; struct apei_resources erst_resources; struct resource *r; char *buf; if (acpi_disabled) goto err; if (erst_disable) { pr_info( "Error Record Serialization Table (ERST) support is disabled.\n"); goto err; } status = acpi_get_table(ACPI_SIG_ERST, 0, (struct acpi_table_header **)&erst_tab); if (status == AE_NOT_FOUND) goto err; else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err("Failed to get table, %s\n", msg); rc = -EINVAL; goto err; } rc = erst_check_table(erst_tab); if (rc) { pr_err(FW_BUG "ERST table is invalid.\n"); goto err_put_erst_tab; } apei_resources_init(&erst_resources); erst_exec_ctx_init(&ctx); rc = apei_exec_collect_resources(&ctx, &erst_resources); if (rc) goto err_fini; rc = apei_resources_request(&erst_resources, "APEI ERST"); if (rc) goto err_fini; rc = apei_exec_pre_map_gars(&ctx); if (rc) goto err_release; rc = erst_get_erange(&erst_erange); if (rc) { if (rc == -ENODEV) pr_info( "The corresponding hardware device or firmware implementation " "is not available.\n"); else pr_err("Failed to get Error Log Address Range.\n"); goto err_unmap_reg; } r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST"); if (!r) { pr_err("Can not request [mem %#010llx-%#010llx] for ERST.\n", (unsigned long long)erst_erange.base, (unsigned long long)erst_erange.base + erst_erange.size - 1); rc = -EIO; goto err_unmap_reg; } rc = -ENOMEM; erst_erange.vaddr = ioremap_cache(erst_erange.base, erst_erange.size); if (!erst_erange.vaddr) goto err_release_erange; pr_info( "Error Record Serialization Table (ERST) support is initialized.\n"); buf = kmalloc(erst_erange.size, GFP_KERNEL); if (buf) { erst_info.buf = buf + sizeof(struct cper_pstore_record); erst_info.bufsize = erst_erange.size - sizeof(struct cper_pstore_record); rc = pstore_register(&erst_info); if (rc) { if (rc != -EPERM) pr_info( "Could not register with persistent store.\n"); erst_info.buf = NULL; erst_info.bufsize = 0; kfree(buf); } } else pr_err( "Failed to allocate %lld bytes for persistent store error log.\n", erst_erange.size); /* Cleanup ERST Resources */ apei_resources_fini(&erst_resources); return 0; err_release_erange: release_mem_region(erst_erange.base, erst_erange.size); err_unmap_reg: apei_exec_post_unmap_gars(&ctx); err_release: apei_resources_release(&erst_resources); err_fini: apei_resources_fini(&erst_resources); err_put_erst_tab: acpi_put_table((struct acpi_table_header *)erst_tab); err: erst_disable = 1; return rc; } device_initcall(erst_init);
linux-master
drivers/acpi/apei/erst.c
// SPDX-License-Identifier: GPL-2.0-only /* * APEI Generic Hardware Error Source support * * Generic Hardware Error Source provides a way to report platform * hardware errors (such as that from chipset). It works in so called * "Firmware First" mode, that is, hardware errors are reported to * firmware firstly, then reported to Linux by firmware. This way, * some non-standard hardware error registers or non-standard hardware * link can be checked by firmware to produce more hardware error * information for Linux. * * For more information about Generic Hardware Error Source, please * refer to ACPI Specification version 4.0, section 17.3.2.6 * * Copyright 2010,2011 Intel Corp. * Author: Huang Ying <[email protected]> */ #include <linux/arm_sdei.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/cper.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> #include <linux/irq_work.h> #include <linux/llist.h> #include <linux/genalloc.h> #include <linux/pci.h> #include <linux/pfn.h> #include <linux/aer.h> #include <linux/nmi.h> #include <linux/sched/clock.h> #include <linux/uuid.h> #include <linux/ras.h> #include <linux/task_work.h> #include <acpi/actbl1.h> #include <acpi/ghes.h> #include <acpi/apei.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> #include <ras/ras_event.h> #include "apei-internal.h" #define GHES_PFX "GHES: " #define GHES_ESTATUS_MAX_SIZE 65536 #define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3 /* This is just an estimation for memory pool allocation */ #define GHES_ESTATUS_CACHE_AVG_SIZE 512 #define GHES_ESTATUS_CACHES_SIZE 4 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL /* Prevent too many caches are allocated because of RCU */ #define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2) #define GHES_ESTATUS_CACHE_LEN(estatus_len) \ (sizeof(struct ghes_estatus_cache) + (estatus_len)) #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \ ((struct acpi_hest_generic_status *) \ ((struct ghes_estatus_cache *)(estatus_cache) + 1)) #define GHES_ESTATUS_NODE_LEN(estatus_len) \ (sizeof(struct ghes_estatus_node) + (estatus_len)) #define GHES_ESTATUS_FROM_NODE(estatus_node) \ ((struct acpi_hest_generic_status *) \ ((struct ghes_estatus_node *)(estatus_node) + 1)) #define GHES_VENDOR_ENTRY_LEN(gdata_len) \ (sizeof(struct ghes_vendor_record_entry) + (gdata_len)) #define GHES_GDATA_FROM_VENDOR_ENTRY(vendor_entry) \ ((struct acpi_hest_generic_data *) \ ((struct ghes_vendor_record_entry *)(vendor_entry) + 1)) /* * NMI-like notifications vary by architecture, before the compiler can prune * unused static functions it needs a value for these enums. */ #ifndef CONFIG_ARM_SDE_INTERFACE #define FIX_APEI_GHES_SDEI_NORMAL __end_of_fixed_addresses #define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses #endif static ATOMIC_NOTIFIER_HEAD(ghes_report_chain); static inline bool is_hest_type_generic_v2(struct ghes *ghes) { return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2; } /* * This driver isn't really modular, however for the time being, * continuing to use module_param is the easiest way to remain * compatible with existing boot arg use cases. */ bool ghes_disable; module_param_named(disable, ghes_disable, bool, 0); /* * "ghes.edac_force_enable" forcibly enables ghes_edac and skips the platform * check. */ static bool ghes_edac_force_enable; module_param_named(edac_force_enable, ghes_edac_force_enable, bool, 0); /* * All error sources notified with HED (Hardware Error Device) share a * single notifier callback, so they need to be linked and checked one * by one. This holds true for NMI too. * * RCU is used for these lists, so ghes_list_mutex is only used for * list changing, not for traversing. */ static LIST_HEAD(ghes_hed); static DEFINE_MUTEX(ghes_list_mutex); /* * A list of GHES devices which are given to the corresponding EDAC driver * ghes_edac for further use. */ static LIST_HEAD(ghes_devs); static DEFINE_MUTEX(ghes_devs_mutex); /* * Because the memory area used to transfer hardware error information * from BIOS to Linux can be determined only in NMI, IRQ or timer * handler, but general ioremap can not be used in atomic context, so * the fixmap is used instead. * * This spinlock is used to prevent the fixmap entry from being used * simultaneously. */ static DEFINE_SPINLOCK(ghes_notify_lock_irq); struct ghes_vendor_record_entry { struct work_struct work; int error_severity; char vendor_record[]; }; static struct gen_pool *ghes_estatus_pool; static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; static atomic_t ghes_estatus_cache_alloced; static int ghes_panic_timeout __read_mostly = 30; static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx) { phys_addr_t paddr; pgprot_t prot; paddr = PFN_PHYS(pfn); prot = arch_apei_get_mem_attribute(paddr); __set_fixmap(fixmap_idx, paddr, prot); return (void __iomem *) __fix_to_virt(fixmap_idx); } static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) { int _idx = virt_to_fix((unsigned long)vaddr); WARN_ON_ONCE(fixmap_idx != _idx); clear_fixmap(fixmap_idx); } int ghes_estatus_pool_init(unsigned int num_ghes) { unsigned long addr, len; int rc; ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); if (!ghes_estatus_pool) return -ENOMEM; len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX; len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE); addr = (unsigned long)vmalloc(PAGE_ALIGN(len)); if (!addr) goto err_pool_alloc; rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); if (rc) goto err_pool_add; return 0; err_pool_add: vfree((void *)addr); err_pool_alloc: gen_pool_destroy(ghes_estatus_pool); return -ENOMEM; } static int map_gen_v2(struct ghes *ghes) { return apei_map_generic_address(&ghes->generic_v2->read_ack_register); } static void unmap_gen_v2(struct ghes *ghes) { apei_unmap_generic_address(&ghes->generic_v2->read_ack_register); } static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2) { int rc; u64 val = 0; rc = apei_read(&val, &gv2->read_ack_register); if (rc) return; val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset; val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset; apei_write(val, &gv2->read_ack_register); } static struct ghes *ghes_new(struct acpi_hest_generic *generic) { struct ghes *ghes; unsigned int error_block_length; int rc; ghes = kzalloc(sizeof(*ghes), GFP_KERNEL); if (!ghes) return ERR_PTR(-ENOMEM); ghes->generic = generic; if (is_hest_type_generic_v2(ghes)) { rc = map_gen_v2(ghes); if (rc) goto err_free; } rc = apei_map_generic_address(&generic->error_status_address); if (rc) goto err_unmap_read_ack_addr; error_block_length = generic->error_block_length; if (error_block_length > GHES_ESTATUS_MAX_SIZE) { pr_warn(FW_WARN GHES_PFX "Error status block length is too long: %u for " "generic hardware error source: %d.\n", error_block_length, generic->header.source_id); error_block_length = GHES_ESTATUS_MAX_SIZE; } ghes->estatus = kmalloc(error_block_length, GFP_KERNEL); if (!ghes->estatus) { rc = -ENOMEM; goto err_unmap_status_addr; } return ghes; err_unmap_status_addr: apei_unmap_generic_address(&generic->error_status_address); err_unmap_read_ack_addr: if (is_hest_type_generic_v2(ghes)) unmap_gen_v2(ghes); err_free: kfree(ghes); return ERR_PTR(rc); } static void ghes_fini(struct ghes *ghes) { kfree(ghes->estatus); apei_unmap_generic_address(&ghes->generic->error_status_address); if (is_hest_type_generic_v2(ghes)) unmap_gen_v2(ghes); } static inline int ghes_severity(int severity) { switch (severity) { case CPER_SEV_INFORMATIONAL: return GHES_SEV_NO; case CPER_SEV_CORRECTED: return GHES_SEV_CORRECTED; case CPER_SEV_RECOVERABLE: return GHES_SEV_RECOVERABLE; case CPER_SEV_FATAL: return GHES_SEV_PANIC; default: /* Unknown, go panic */ return GHES_SEV_PANIC; } } static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, int from_phys, enum fixed_addresses fixmap_idx) { void __iomem *vaddr; u64 offset; u32 trunk; while (len > 0) { offset = paddr - (paddr & PAGE_MASK); vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx); trunk = PAGE_SIZE - offset; trunk = min(trunk, len); if (from_phys) memcpy_fromio(buffer, vaddr + offset, trunk); else memcpy_toio(vaddr + offset, buffer, trunk); len -= trunk; paddr += trunk; buffer += trunk; ghes_unmap(vaddr, fixmap_idx); } } /* Check the top-level record header has an appropriate size. */ static int __ghes_check_estatus(struct ghes *ghes, struct acpi_hest_generic_status *estatus) { u32 len = cper_estatus_len(estatus); if (len < sizeof(*estatus)) { pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n"); return -EIO; } if (len > ghes->generic->error_block_length) { pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n"); return -EIO; } if (cper_estatus_check_header(estatus)) { pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n"); return -EIO; } return 0; } /* Read the CPER block, returning its address, and header in estatus. */ static int __ghes_peek_estatus(struct ghes *ghes, struct acpi_hest_generic_status *estatus, u64 *buf_paddr, enum fixed_addresses fixmap_idx) { struct acpi_hest_generic *g = ghes->generic; int rc; rc = apei_read(buf_paddr, &g->error_status_address); if (rc) { *buf_paddr = 0; pr_warn_ratelimited(FW_WARN GHES_PFX "Failed to read error status block address for hardware error source: %d.\n", g->header.source_id); return -EIO; } if (!*buf_paddr) return -ENOENT; ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1, fixmap_idx); if (!estatus->block_status) { *buf_paddr = 0; return -ENOENT; } return 0; } static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus, u64 buf_paddr, enum fixed_addresses fixmap_idx, size_t buf_len) { ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx); if (cper_estatus_check(estatus)) { pr_warn_ratelimited(FW_WARN GHES_PFX "Failed to read error status block!\n"); return -EIO; } return 0; } static int ghes_read_estatus(struct ghes *ghes, struct acpi_hest_generic_status *estatus, u64 *buf_paddr, enum fixed_addresses fixmap_idx) { int rc; rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx); if (rc) return rc; rc = __ghes_check_estatus(ghes, estatus); if (rc) return rc; return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx, cper_estatus_len(estatus)); } static void ghes_clear_estatus(struct ghes *ghes, struct acpi_hest_generic_status *estatus, u64 buf_paddr, enum fixed_addresses fixmap_idx) { estatus->block_status = 0; if (!buf_paddr) return; ghes_copy_tofrom_phys(estatus, buf_paddr, sizeof(estatus->block_status), 0, fixmap_idx); /* * GHESv2 type HEST entries introduce support for error acknowledgment, * so only acknowledge the error if this support is present. */ if (is_hest_type_generic_v2(ghes)) ghes_ack_error(ghes->generic_v2); } /* * Called as task_work before returning to user-space. * Ensure any queued work has been done before we return to the context that * triggered the notification. */ static void ghes_kick_task_work(struct callback_head *head) { struct acpi_hest_generic_status *estatus; struct ghes_estatus_node *estatus_node; u32 node_len; estatus_node = container_of(head, struct ghes_estatus_node, task_work); if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) memory_failure_queue_kick(estatus_node->task_work_cpu); estatus = GHES_ESTATUS_FROM_NODE(estatus_node); node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus)); gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); } static bool ghes_do_memory_failure(u64 physical_addr, int flags) { unsigned long pfn; if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) return false; pfn = PHYS_PFN(physical_addr); if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) { pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid address in generic error data: %#llx\n", physical_addr); return false; } memory_failure_queue(pfn, flags); return true; } static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev) { int flags = -1; int sec_sev = ghes_severity(gdata->error_severity); struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) return false; /* iff following two events can be handled properly by now */ if (sec_sev == GHES_SEV_CORRECTED && (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED)) flags = MF_SOFT_OFFLINE; if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE) flags = 0; if (flags != -1) return ghes_do_memory_failure(mem_err->physical_addr, flags); return false; } static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev) { struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata); bool queued = false; int sec_sev, i; char *p; log_arm_hw_error(err); sec_sev = ghes_severity(gdata->error_severity); if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE) return false; p = (char *)(err + 1); for (i = 0; i < err->err_info_num; i++) { struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p; bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR); bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR); const char *error_type = "unknown error"; /* * The field (err_info->error_info & BIT(26)) is fixed to set to * 1 in some old firmware of HiSilicon Kunpeng920. We assume that * firmware won't mix corrected errors in an uncorrected section, * and don't filter out 'corrected' error here. */ if (is_cache && has_pa) { queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0); p += err_info->length; continue; } if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs)) error_type = cper_proc_error_type_strs[err_info->type]; pr_warn_ratelimited(FW_WARN GHES_PFX "Unhandled processor error type: %s\n", error_type); p += err_info->length; } return queued; } /* * PCIe AER errors need to be sent to the AER driver for reporting and * recovery. The GHES severities map to the following AER severities and * require the following handling: * * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE * These need to be reported by the AER driver but no recovery is * necessary. * GHES_SEV_RECOVERABLE -> AER_NONFATAL * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL * These both need to be reported and recovered from by the AER driver. * GHES_SEV_PANIC does not make it to this handling since the kernel must * panic. */ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata) { #ifdef CONFIG_ACPI_APEI_PCIEAER struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID && pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) { unsigned int devfn; int aer_severity; devfn = PCI_DEVFN(pcie_err->device_id.device, pcie_err->device_id.function); aer_severity = cper_severity_to_aer(gdata->error_severity); /* * If firmware reset the component to contain * the error, we must reinitialize it before * use, so treat it as a fatal AER error. */ if (gdata->flags & CPER_SEC_RESET) aer_severity = AER_FATAL; aer_recover_queue(pcie_err->device_id.segment, pcie_err->device_id.bus, devfn, aer_severity, (struct aer_capability_regs *) pcie_err->aer_info); } #endif } static BLOCKING_NOTIFIER_HEAD(vendor_record_notify_list); int ghes_register_vendor_record_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vendor_record_notify_list, nb); } EXPORT_SYMBOL_GPL(ghes_register_vendor_record_notifier); void ghes_unregister_vendor_record_notifier(struct notifier_block *nb) { blocking_notifier_chain_unregister(&vendor_record_notify_list, nb); } EXPORT_SYMBOL_GPL(ghes_unregister_vendor_record_notifier); static void ghes_vendor_record_work_func(struct work_struct *work) { struct ghes_vendor_record_entry *entry; struct acpi_hest_generic_data *gdata; u32 len; entry = container_of(work, struct ghes_vendor_record_entry, work); gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry); blocking_notifier_call_chain(&vendor_record_notify_list, entry->error_severity, gdata); len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata)); gen_pool_free(ghes_estatus_pool, (unsigned long)entry, len); } static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, int sev) { struct acpi_hest_generic_data *copied_gdata; struct ghes_vendor_record_entry *entry; u32 len; len = GHES_VENDOR_ENTRY_LEN(acpi_hest_get_record_size(gdata)); entry = (void *)gen_pool_alloc(ghes_estatus_pool, len); if (!entry) return; copied_gdata = GHES_GDATA_FROM_VENDOR_ENTRY(entry); memcpy(copied_gdata, gdata, acpi_hest_get_record_size(gdata)); entry->error_severity = sev; INIT_WORK(&entry->work, ghes_vendor_record_work_func); schedule_work(&entry->work); } static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { int sev, sec_sev; struct acpi_hest_generic_data *gdata; guid_t *sec_type; const guid_t *fru_id = &guid_null; char *fru_text = ""; bool queued = false; sev = ghes_severity(estatus->error_severity); apei_estatus_for_each_section(estatus, gdata) { sec_type = (guid_t *)gdata->section_type; sec_sev = ghes_severity(gdata->error_severity); if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) fru_id = (guid_t *)gdata->fru_id; if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) fru_text = gdata->fru_text; if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err); arch_apei_report_mem_error(sev, mem_err); queued = ghes_handle_memory_failure(gdata, sev); } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { ghes_handle_aer(gdata); } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { queued = ghes_handle_arm_hw_error(gdata, sev); } else { void *err = acpi_hest_get_payload(gdata); ghes_defer_non_standard_event(gdata, sev); log_non_standard_event(sec_type, fru_id, fru_text, sec_sev, err, gdata->error_data_length); } } return queued; } static void __ghes_print_estatus(const char *pfx, const struct acpi_hest_generic *generic, const struct acpi_hest_generic_status *estatus) { static atomic_t seqno; unsigned int curr_seqno; char pfx_seq[64]; if (pfx == NULL) { if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) pfx = KERN_WARNING; else pfx = KERN_ERR; } curr_seqno = atomic_inc_return(&seqno); snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno); printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", pfx_seq, generic->header.source_id); cper_estatus_print(pfx_seq, estatus); } static int ghes_print_estatus(const char *pfx, const struct acpi_hest_generic *generic, const struct acpi_hest_generic_status *estatus) { /* Not more than 2 messages every 5 seconds */ static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2); static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2); struct ratelimit_state *ratelimit; if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) ratelimit = &ratelimit_corrected; else ratelimit = &ratelimit_uncorrected; if (__ratelimit(ratelimit)) { __ghes_print_estatus(pfx, generic, estatus); return 1; } return 0; } /* * GHES error status reporting throttle, to report more kinds of * errors, instead of just most frequently occurred errors. */ static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus) { u32 len; int i, cached = 0; unsigned long long now; struct ghes_estatus_cache *cache; struct acpi_hest_generic_status *cache_estatus; len = cper_estatus_len(estatus); rcu_read_lock(); for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { cache = rcu_dereference(ghes_estatus_caches[i]); if (cache == NULL) continue; if (len != cache->estatus_len) continue; cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); if (memcmp(estatus, cache_estatus, len)) continue; atomic_inc(&cache->count); now = sched_clock(); if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) cached = 1; break; } rcu_read_unlock(); return cached; } static struct ghes_estatus_cache *ghes_estatus_cache_alloc( struct acpi_hest_generic *generic, struct acpi_hest_generic_status *estatus) { int alloced; u32 len, cache_len; struct ghes_estatus_cache *cache; struct acpi_hest_generic_status *cache_estatus; alloced = atomic_add_return(1, &ghes_estatus_cache_alloced); if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) { atomic_dec(&ghes_estatus_cache_alloced); return NULL; } len = cper_estatus_len(estatus); cache_len = GHES_ESTATUS_CACHE_LEN(len); cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); if (!cache) { atomic_dec(&ghes_estatus_cache_alloced); return NULL; } cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); memcpy(cache_estatus, estatus, len); cache->estatus_len = len; atomic_set(&cache->count, 0); cache->generic = generic; cache->time_in = sched_clock(); return cache; } static void ghes_estatus_cache_rcu_free(struct rcu_head *head) { struct ghes_estatus_cache *cache; u32 len; cache = container_of(head, struct ghes_estatus_cache, rcu); len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); len = GHES_ESTATUS_CACHE_LEN(len); gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); atomic_dec(&ghes_estatus_cache_alloced); } static void ghes_estatus_cache_add(struct acpi_hest_generic *generic, struct acpi_hest_generic_status *estatus) { unsigned long long now, duration, period, max_period = 0; struct ghes_estatus_cache *cache, *new_cache; struct ghes_estatus_cache __rcu *victim; int i, slot = -1, count; new_cache = ghes_estatus_cache_alloc(generic, estatus); if (!new_cache) return; rcu_read_lock(); now = sched_clock(); for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { cache = rcu_dereference(ghes_estatus_caches[i]); if (cache == NULL) { slot = i; break; } duration = now - cache->time_in; if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) { slot = i; break; } count = atomic_read(&cache->count); period = duration; do_div(period, (count + 1)); if (period > max_period) { max_period = period; slot = i; } } rcu_read_unlock(); if (slot != -1) { /* * Use release semantics to ensure that ghes_estatus_cached() * running on another CPU will see the updated cache fields if * it can see the new value of the pointer. */ victim = xchg_release(&ghes_estatus_caches[slot], RCU_INITIALIZER(new_cache)); /* * At this point, victim may point to a cached item different * from the one based on which we selected the slot. Instead of * going to the loop again to pick another slot, let's just * drop the other item anyway: this may cause a false cache * miss later on, but that won't cause any problems. */ if (victim) call_rcu(&unrcu_pointer(victim)->rcu, ghes_estatus_cache_rcu_free); } } static void __ghes_panic(struct ghes *ghes, struct acpi_hest_generic_status *estatus, u64 buf_paddr, enum fixed_addresses fixmap_idx) { __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus); ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx); /* reboot to log the error! */ if (!panic_timeout) panic_timeout = ghes_panic_timeout; panic("Fatal hardware error!"); } static int ghes_proc(struct ghes *ghes) { struct acpi_hest_generic_status *estatus = ghes->estatus; u64 buf_paddr; int rc; rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ); if (rc) goto out; if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC) __ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ); if (!ghes_estatus_cached(estatus)) { if (ghes_print_estatus(NULL, ghes->generic, estatus)) ghes_estatus_cache_add(ghes->generic, estatus); } ghes_do_proc(ghes, estatus); out: ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ); return rc; } static void ghes_add_timer(struct ghes *ghes) { struct acpi_hest_generic *g = ghes->generic; unsigned long expire; if (!g->notify.poll_interval) { pr_warn(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", g->header.source_id); return; } expire = jiffies + msecs_to_jiffies(g->notify.poll_interval); ghes->timer.expires = round_jiffies_relative(expire); add_timer(&ghes->timer); } static void ghes_poll_func(struct timer_list *t) { struct ghes *ghes = from_timer(ghes, t, timer); unsigned long flags; spin_lock_irqsave(&ghes_notify_lock_irq, flags); ghes_proc(ghes); spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); if (!(ghes->flags & GHES_EXITING)) ghes_add_timer(ghes); } static irqreturn_t ghes_irq_func(int irq, void *data) { struct ghes *ghes = data; unsigned long flags; int rc; spin_lock_irqsave(&ghes_notify_lock_irq, flags); rc = ghes_proc(ghes); spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); if (rc) return IRQ_NONE; return IRQ_HANDLED; } static int ghes_notify_hed(struct notifier_block *this, unsigned long event, void *data) { struct ghes *ghes; unsigned long flags; int ret = NOTIFY_DONE; spin_lock_irqsave(&ghes_notify_lock_irq, flags); rcu_read_lock(); list_for_each_entry_rcu(ghes, &ghes_hed, list) { if (!ghes_proc(ghes)) ret = NOTIFY_OK; } rcu_read_unlock(); spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); return ret; } static struct notifier_block ghes_notifier_hed = { .notifier_call = ghes_notify_hed, }; /* * Handlers for CPER records may not be NMI safe. For example, * memory_failure_queue() takes spinlocks and calls schedule_work_on(). * In any NMI-like handler, memory from ghes_estatus_pool is used to save * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes * ghes_proc_in_irq() to run in IRQ context where each estatus in * ghes_estatus_llist is processed. * * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache * to suppress frequent messages. */ static struct llist_head ghes_estatus_llist; static struct irq_work ghes_proc_irq_work; static void ghes_proc_in_irq(struct irq_work *irq_work) { struct llist_node *llnode, *next; struct ghes_estatus_node *estatus_node; struct acpi_hest_generic *generic; struct acpi_hest_generic_status *estatus; bool task_work_pending; u32 len, node_len; int ret; llnode = llist_del_all(&ghes_estatus_llist); /* * Because the time order of estatus in list is reversed, * revert it back to proper order. */ llnode = llist_reverse_order(llnode); while (llnode) { next = llnode->next; estatus_node = llist_entry(llnode, struct ghes_estatus_node, llnode); estatus = GHES_ESTATUS_FROM_NODE(estatus_node); len = cper_estatus_len(estatus); node_len = GHES_ESTATUS_NODE_LEN(len); task_work_pending = ghes_do_proc(estatus_node->ghes, estatus); if (!ghes_estatus_cached(estatus)) { generic = estatus_node->generic; if (ghes_print_estatus(NULL, generic, estatus)) ghes_estatus_cache_add(generic, estatus); } if (task_work_pending && current->mm) { estatus_node->task_work.func = ghes_kick_task_work; estatus_node->task_work_cpu = smp_processor_id(); ret = task_work_add(current, &estatus_node->task_work, TWA_RESUME); if (ret) estatus_node->task_work.func = NULL; } if (!estatus_node->task_work.func) gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); llnode = next; } } static void ghes_print_queued_estatus(void) { struct llist_node *llnode; struct ghes_estatus_node *estatus_node; struct acpi_hest_generic *generic; struct acpi_hest_generic_status *estatus; llnode = llist_del_all(&ghes_estatus_llist); /* * Because the time order of estatus in list is reversed, * revert it back to proper order. */ llnode = llist_reverse_order(llnode); while (llnode) { estatus_node = llist_entry(llnode, struct ghes_estatus_node, llnode); estatus = GHES_ESTATUS_FROM_NODE(estatus_node); generic = estatus_node->generic; ghes_print_estatus(NULL, generic, estatus); llnode = llnode->next; } } static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, enum fixed_addresses fixmap_idx) { struct acpi_hest_generic_status *estatus, tmp_header; struct ghes_estatus_node *estatus_node; u32 len, node_len; u64 buf_paddr; int sev, rc; if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG)) return -EOPNOTSUPP; rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx); if (rc) { ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx); return rc; } rc = __ghes_check_estatus(ghes, &tmp_header); if (rc) { ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx); return rc; } len = cper_estatus_len(&tmp_header); node_len = GHES_ESTATUS_NODE_LEN(len); estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len); if (!estatus_node) return -ENOMEM; estatus_node->ghes = ghes; estatus_node->generic = ghes->generic; estatus_node->task_work.func = NULL; estatus = GHES_ESTATUS_FROM_NODE(estatus_node); if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) { ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx); rc = -ENOENT; goto no_work; } sev = ghes_severity(estatus->error_severity); if (sev >= GHES_SEV_PANIC) { ghes_print_queued_estatus(); __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx); } ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx); /* This error has been reported before, don't process it again. */ if (ghes_estatus_cached(estatus)) goto no_work; llist_add(&estatus_node->llnode, &ghes_estatus_llist); return rc; no_work: gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); return rc; } static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list, enum fixed_addresses fixmap_idx) { int ret = -ENOENT; struct ghes *ghes; rcu_read_lock(); list_for_each_entry_rcu(ghes, rcu_list, list) { if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) ret = 0; } rcu_read_unlock(); if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret) irq_work_queue(&ghes_proc_irq_work); return ret; } #ifdef CONFIG_ACPI_APEI_SEA static LIST_HEAD(ghes_sea); /* * Return 0 only if one of the SEA error sources successfully reported an error * record sent from the firmware. */ int ghes_notify_sea(void) { static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea); int rv; raw_spin_lock(&ghes_notify_lock_sea); rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA); raw_spin_unlock(&ghes_notify_lock_sea); return rv; } static void ghes_sea_add(struct ghes *ghes) { mutex_lock(&ghes_list_mutex); list_add_rcu(&ghes->list, &ghes_sea); mutex_unlock(&ghes_list_mutex); } static void ghes_sea_remove(struct ghes *ghes) { mutex_lock(&ghes_list_mutex); list_del_rcu(&ghes->list); mutex_unlock(&ghes_list_mutex); synchronize_rcu(); } #else /* CONFIG_ACPI_APEI_SEA */ static inline void ghes_sea_add(struct ghes *ghes) { } static inline void ghes_sea_remove(struct ghes *ghes) { } #endif /* CONFIG_ACPI_APEI_SEA */ #ifdef CONFIG_HAVE_ACPI_APEI_NMI /* * NMI may be triggered on any CPU, so ghes_in_nmi is used for * having only one concurrent reader. */ static atomic_t ghes_in_nmi = ATOMIC_INIT(0); static LIST_HEAD(ghes_nmi); static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) { static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi); int ret = NMI_DONE; if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) return ret; raw_spin_lock(&ghes_notify_lock_nmi); if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI)) ret = NMI_HANDLED; raw_spin_unlock(&ghes_notify_lock_nmi); atomic_dec(&ghes_in_nmi); return ret; } static void ghes_nmi_add(struct ghes *ghes) { mutex_lock(&ghes_list_mutex); if (list_empty(&ghes_nmi)) register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes"); list_add_rcu(&ghes->list, &ghes_nmi); mutex_unlock(&ghes_list_mutex); } static void ghes_nmi_remove(struct ghes *ghes) { mutex_lock(&ghes_list_mutex); list_del_rcu(&ghes->list); if (list_empty(&ghes_nmi)) unregister_nmi_handler(NMI_LOCAL, "ghes"); mutex_unlock(&ghes_list_mutex); /* * To synchronize with NMI handler, ghes can only be * freed after NMI handler finishes. */ synchronize_rcu(); } #else /* CONFIG_HAVE_ACPI_APEI_NMI */ static inline void ghes_nmi_add(struct ghes *ghes) { } static inline void ghes_nmi_remove(struct ghes *ghes) { } #endif /* CONFIG_HAVE_ACPI_APEI_NMI */ static void ghes_nmi_init_cxt(void) { init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); } static int __ghes_sdei_callback(struct ghes *ghes, enum fixed_addresses fixmap_idx) { if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) { irq_work_queue(&ghes_proc_irq_work); return 0; } return -ENOENT; } static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs, void *arg) { static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal); struct ghes *ghes = arg; int err; raw_spin_lock(&ghes_notify_lock_sdei_normal); err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL); raw_spin_unlock(&ghes_notify_lock_sdei_normal); return err; } static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs, void *arg) { static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical); struct ghes *ghes = arg; int err; raw_spin_lock(&ghes_notify_lock_sdei_critical); err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL); raw_spin_unlock(&ghes_notify_lock_sdei_critical); return err; } static int apei_sdei_register_ghes(struct ghes *ghes) { if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) return -EOPNOTSUPP; return sdei_register_ghes(ghes, ghes_sdei_normal_callback, ghes_sdei_critical_callback); } static int apei_sdei_unregister_ghes(struct ghes *ghes) { if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) return -EOPNOTSUPP; return sdei_unregister_ghes(ghes); } static int ghes_probe(struct platform_device *ghes_dev) { struct acpi_hest_generic *generic; struct ghes *ghes = NULL; unsigned long flags; int rc = -EINVAL; generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; if (!generic->enabled) return -ENODEV; switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: case ACPI_HEST_NOTIFY_EXTERNAL: case ACPI_HEST_NOTIFY_SCI: case ACPI_HEST_NOTIFY_GSIV: case ACPI_HEST_NOTIFY_GPIO: break; case ACPI_HEST_NOTIFY_SEA: if (!IS_ENABLED(CONFIG_ACPI_APEI_SEA)) { pr_warn(GHES_PFX "Generic hardware error source: %d notified via SEA is not supported\n", generic->header.source_id); rc = -ENOTSUPP; goto err; } break; case ACPI_HEST_NOTIFY_NMI: if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) { pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n", generic->header.source_id); goto err; } break; case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) { pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n", generic->header.source_id); goto err; } break; case ACPI_HEST_NOTIFY_LOCAL: pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", generic->header.source_id); goto err; default: pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", generic->notify.type, generic->header.source_id); goto err; } rc = -EIO; if (generic->error_block_length < sizeof(struct acpi_hest_generic_status)) { pr_warn(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", generic->error_block_length, generic->header.source_id); goto err; } ghes = ghes_new(generic); if (IS_ERR(ghes)) { rc = PTR_ERR(ghes); ghes = NULL; goto err; } switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: timer_setup(&ghes->timer, ghes_poll_func, 0); ghes_add_timer(ghes); break; case ACPI_HEST_NOTIFY_EXTERNAL: /* External interrupt vector is GSI */ rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq); if (rc) { pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", generic->header.source_id); goto err; } rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED, "GHES IRQ", ghes); if (rc) { pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", generic->header.source_id); goto err; } break; case ACPI_HEST_NOTIFY_SCI: case ACPI_HEST_NOTIFY_GSIV: case ACPI_HEST_NOTIFY_GPIO: mutex_lock(&ghes_list_mutex); if (list_empty(&ghes_hed)) register_acpi_hed_notifier(&ghes_notifier_hed); list_add_rcu(&ghes->list, &ghes_hed); mutex_unlock(&ghes_list_mutex); break; case ACPI_HEST_NOTIFY_SEA: ghes_sea_add(ghes); break; case ACPI_HEST_NOTIFY_NMI: ghes_nmi_add(ghes); break; case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: rc = apei_sdei_register_ghes(ghes); if (rc) goto err; break; default: BUG(); } platform_set_drvdata(ghes_dev, ghes); ghes->dev = &ghes_dev->dev; mutex_lock(&ghes_devs_mutex); list_add_tail(&ghes->elist, &ghes_devs); mutex_unlock(&ghes_devs_mutex); /* Handle any pending errors right away */ spin_lock_irqsave(&ghes_notify_lock_irq, flags); ghes_proc(ghes); spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); return 0; err: if (ghes) { ghes_fini(ghes); kfree(ghes); } return rc; } static int ghes_remove(struct platform_device *ghes_dev) { int rc; struct ghes *ghes; struct acpi_hest_generic *generic; ghes = platform_get_drvdata(ghes_dev); generic = ghes->generic; ghes->flags |= GHES_EXITING; switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: timer_shutdown_sync(&ghes->timer); break; case ACPI_HEST_NOTIFY_EXTERNAL: free_irq(ghes->irq, ghes); break; case ACPI_HEST_NOTIFY_SCI: case ACPI_HEST_NOTIFY_GSIV: case ACPI_HEST_NOTIFY_GPIO: mutex_lock(&ghes_list_mutex); list_del_rcu(&ghes->list); if (list_empty(&ghes_hed)) unregister_acpi_hed_notifier(&ghes_notifier_hed); mutex_unlock(&ghes_list_mutex); synchronize_rcu(); break; case ACPI_HEST_NOTIFY_SEA: ghes_sea_remove(ghes); break; case ACPI_HEST_NOTIFY_NMI: ghes_nmi_remove(ghes); break; case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: rc = apei_sdei_unregister_ghes(ghes); if (rc) return rc; break; default: BUG(); break; } ghes_fini(ghes); mutex_lock(&ghes_devs_mutex); list_del(&ghes->elist); mutex_unlock(&ghes_devs_mutex); kfree(ghes); return 0; } static struct platform_driver ghes_platform_driver = { .driver = { .name = "GHES", }, .probe = ghes_probe, .remove = ghes_remove, }; void __init acpi_ghes_init(void) { int rc; sdei_init(); if (acpi_disabled) return; switch (hest_disable) { case HEST_NOT_FOUND: return; case HEST_DISABLED: pr_info(GHES_PFX "HEST is not enabled!\n"); return; default: break; } if (ghes_disable) { pr_info(GHES_PFX "GHES is not enabled!\n"); return; } ghes_nmi_init_cxt(); rc = platform_driver_register(&ghes_platform_driver); if (rc) return; rc = apei_osc_setup(); if (rc == 0 && osc_sb_apei_support_acked) pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n"); else if (rc == 0 && !osc_sb_apei_support_acked) pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n"); else if (rc && osc_sb_apei_support_acked) pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); else pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); } /* * Known x86 systems that prefer GHES error reporting: */ static struct acpi_platform_list plat_list[] = { {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions}, { } /* End */ }; struct list_head *ghes_get_devices(void) { int idx = -1; if (IS_ENABLED(CONFIG_X86)) { idx = acpi_match_platform_list(plat_list); if (idx < 0) { if (!ghes_edac_force_enable) return NULL; pr_warn_once("Force-loading ghes_edac on an unsupported platform. You're on your own!\n"); } } else if (list_empty(&ghes_devs)) { return NULL; } return &ghes_devs; } EXPORT_SYMBOL_GPL(ghes_get_devices); void ghes_register_report_chain(struct notifier_block *nb) { atomic_notifier_chain_register(&ghes_report_chain, nb); } EXPORT_SYMBOL_GPL(ghes_register_report_chain); void ghes_unregister_report_chain(struct notifier_block *nb) { atomic_notifier_chain_unregister(&ghes_report_chain, nb); } EXPORT_SYMBOL_GPL(ghes_unregister_report_chain);
linux-master
drivers/acpi/apei/ghes.c
// SPDX-License-Identifier: GPL-2.0-only /* * APEI Hardware Error Source Table support * * HEST describes error sources in detail; communicates operational * parameters (i.e. severity levels, masking bits, and threshold * values) to Linux as necessary. It also allows the BIOS to report * non-standard error sources to Linux (for example, chipset-specific * error registers). * * For more information about HEST, please refer to ACPI Specification * version 4.0, section 17.3.2. * * Copyright 2009 Intel Corp. * Author: Huang Ying <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/kdebug.h> #include <linux/highmem.h> #include <linux/io.h> #include <linux/platform_device.h> #include <acpi/apei.h> #include <acpi/ghes.h> #include "apei-internal.h" #define HEST_PFX "HEST: " int hest_disable; EXPORT_SYMBOL_GPL(hest_disable); /* HEST table parsing */ static struct acpi_table_hest *__read_mostly hest_tab; static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root), [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer), [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge), [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic), [ACPI_HEST_TYPE_GENERIC_ERROR_V2] = sizeof(struct acpi_hest_generic_v2), [ACPI_HEST_TYPE_IA32_DEFERRED_CHECK] = -1, }; static inline bool is_generic_error(struct acpi_hest_header *hest_hdr) { return hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR || hest_hdr->type == ACPI_HEST_TYPE_GENERIC_ERROR_V2; } static int hest_esrc_len(struct acpi_hest_header *hest_hdr) { u16 hest_type = hest_hdr->type; int len; if (hest_type >= ACPI_HEST_TYPE_RESERVED) return 0; len = hest_esrc_len_tab[hest_type]; if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) { struct acpi_hest_ia_corrected *cmc; cmc = (struct acpi_hest_ia_corrected *)hest_hdr; len = sizeof(*cmc) + cmc->num_hardware_banks * sizeof(struct acpi_hest_ia_error_bank); } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) { struct acpi_hest_ia_machine_check *mc; mc = (struct acpi_hest_ia_machine_check *)hest_hdr; len = sizeof(*mc) + mc->num_hardware_banks * sizeof(struct acpi_hest_ia_error_bank); } else if (hest_type == ACPI_HEST_TYPE_IA32_DEFERRED_CHECK) { struct acpi_hest_ia_deferred_check *mc; mc = (struct acpi_hest_ia_deferred_check *)hest_hdr; len = sizeof(*mc) + mc->num_hardware_banks * sizeof(struct acpi_hest_ia_error_bank); } BUG_ON(len == -1); return len; }; typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data); static int apei_hest_parse(apei_hest_func_t func, void *data) { struct acpi_hest_header *hest_hdr; int i, rc, len; if (hest_disable || !hest_tab) return -EINVAL; hest_hdr = (struct acpi_hest_header *)(hest_tab + 1); for (i = 0; i < hest_tab->error_source_count; i++) { len = hest_esrc_len(hest_hdr); if (!len) { pr_warn(FW_WARN HEST_PFX "Unknown or unused hardware error source " "type: %d for hardware error source: %d.\n", hest_hdr->type, hest_hdr->source_id); return -EINVAL; } if ((void *)hest_hdr + len > (void *)hest_tab + hest_tab->header.length) { pr_warn(FW_BUG HEST_PFX "Table contents overflow for hardware error source: %d.\n", hest_hdr->source_id); return -EINVAL; } rc = func(hest_hdr, data); if (rc) return rc; hest_hdr = (void *)hest_hdr + len; } return 0; } /* * Check if firmware advertises firmware first mode. We need FF bit to be set * along with a set of MC banks which work in FF mode. */ static int __init hest_parse_cmc(struct acpi_hest_header *hest_hdr, void *data) { if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) return 0; if (!acpi_disable_cmcff) return !arch_apei_enable_cmcff(hest_hdr, data); return 0; } struct ghes_arr { struct platform_device **ghes_devs; unsigned int count; }; static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) { int *count = data; if (is_generic_error(hest_hdr)) (*count)++; return 0; } static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) { struct platform_device *ghes_dev; struct ghes_arr *ghes_arr = data; int rc, i; if (!is_generic_error(hest_hdr)) return 0; if (!((struct acpi_hest_generic *)hest_hdr)->enabled) return 0; for (i = 0; i < ghes_arr->count; i++) { struct acpi_hest_header *hdr; ghes_dev = ghes_arr->ghes_devs[i]; hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data; if (hdr->source_id == hest_hdr->source_id) { pr_warn(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n", hdr->source_id); return -EIO; } } ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); if (!ghes_dev) return -ENOMEM; rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *)); if (rc) goto err; rc = platform_device_add(ghes_dev); if (rc) goto err; ghes_arr->ghes_devs[ghes_arr->count++] = ghes_dev; return 0; err: platform_device_put(ghes_dev); return rc; } static int __init hest_ghes_dev_register(unsigned int ghes_count) { int rc, i; struct ghes_arr ghes_arr; ghes_arr.count = 0; ghes_arr.ghes_devs = kmalloc_array(ghes_count, sizeof(void *), GFP_KERNEL); if (!ghes_arr.ghes_devs) return -ENOMEM; rc = apei_hest_parse(hest_parse_ghes, &ghes_arr); if (rc) goto err; rc = ghes_estatus_pool_init(ghes_count); if (rc) goto err; out: kfree(ghes_arr.ghes_devs); return rc; err: for (i = 0; i < ghes_arr.count; i++) platform_device_unregister(ghes_arr.ghes_devs[i]); goto out; } static int __init setup_hest_disable(char *str) { hest_disable = HEST_DISABLED; return 1; } __setup("hest_disable", setup_hest_disable); void __init acpi_hest_init(void) { acpi_status status; int rc; unsigned int ghes_count = 0; if (hest_disable) { pr_info(HEST_PFX "Table parsing disabled.\n"); return; } status = acpi_get_table(ACPI_SIG_HEST, 0, (struct acpi_table_header **)&hest_tab); if (status == AE_NOT_FOUND) { hest_disable = HEST_NOT_FOUND; return; } else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err(HEST_PFX "Failed to get table, %s\n", msg); hest_disable = HEST_DISABLED; return; } rc = apei_hest_parse(hest_parse_cmc, NULL); if (rc) goto err; if (!ghes_disable) { rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); if (rc) goto err; if (ghes_count) rc = hest_ghes_dev_register(ghes_count); if (rc) goto err; } pr_info(HEST_PFX "Table parsing has been initialized.\n"); return; err: hest_disable = HEST_DISABLED; acpi_put_table((struct acpi_table_header *)hest_tab); }
linux-master
drivers/acpi/apei/hest.c
// SPDX-License-Identifier: GPL-2.0-only /* * APEI Error Record Serialization Table debug support * * ERST is a way provided by APEI to save and retrieve hardware error * information to and from a persistent store. This file provide the * debugging/testing support for ERST kernel support and firmware * implementation. * * Copyright 2010 Intel Corp. * Author: Huang Ying <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/uaccess.h> #include <acpi/apei.h> #include <linux/miscdevice.h> #include "apei-internal.h" #define ERST_DBG_PFX "ERST DBG: " #define ERST_DBG_RECORD_LEN_MAX 0x4000 static void *erst_dbg_buf; static unsigned int erst_dbg_buf_len; /* Prevent erst_dbg_read/write from being invoked concurrently */ static DEFINE_MUTEX(erst_dbg_mutex); static int erst_dbg_open(struct inode *inode, struct file *file) { int rc, *pos; if (erst_disable) return -ENODEV; pos = (int *)&file->private_data; rc = erst_get_record_id_begin(pos); if (rc) return rc; return nonseekable_open(inode, file); } static int erst_dbg_release(struct inode *inode, struct file *file) { erst_get_record_id_end(); return 0; } static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { int rc; u64 record_id; u32 record_count; switch (cmd) { case APEI_ERST_CLEAR_RECORD: rc = copy_from_user(&record_id, (void __user *)arg, sizeof(record_id)); if (rc) return -EFAULT; return erst_clear(record_id); case APEI_ERST_GET_RECORD_COUNT: rc = erst_get_record_count(); if (rc < 0) return rc; record_count = rc; rc = put_user(record_count, (u32 __user *)arg); if (rc) return rc; return 0; default: return -ENOTTY; } } static ssize_t erst_dbg_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) { int rc, *pos; ssize_t len = 0; u64 id; if (*off) return -EINVAL; if (mutex_lock_interruptible(&erst_dbg_mutex) != 0) return -EINTR; pos = (int *)&filp->private_data; retry_next: rc = erst_get_record_id_next(pos, &id); if (rc) goto out; /* no more record */ if (id == APEI_ERST_INVALID_RECORD_ID) { /* * If the persistent store is empty initially, the function * 'erst_read' below will return "-ENOENT" value. This causes * 'retry_next' label is entered again. The returned value * should be zero indicating the read operation is EOF. */ len = 0; goto out; } retry: rc = len = erst_read_record(id, erst_dbg_buf, erst_dbg_buf_len, erst_dbg_buf_len, NULL); /* The record may be cleared by others, try read next record */ if (rc == -ENOENT) goto retry_next; if (rc < 0) goto out; if (len > ERST_DBG_RECORD_LEN_MAX) { pr_warn(ERST_DBG_PFX "Record (ID: 0x%llx) length is too long: %zd\n", id, len); rc = -EIO; goto out; } if (len > erst_dbg_buf_len) { void *p; rc = -ENOMEM; p = kmalloc(len, GFP_KERNEL); if (!p) goto out; kfree(erst_dbg_buf); erst_dbg_buf = p; erst_dbg_buf_len = len; goto retry; } rc = -EINVAL; if (len > usize) goto out; rc = -EFAULT; if (copy_to_user(ubuf, erst_dbg_buf, len)) goto out; rc = 0; out: mutex_unlock(&erst_dbg_mutex); return rc ? rc : len; } static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf, size_t usize, loff_t *off) { int rc; struct cper_record_header *rcd; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (usize > ERST_DBG_RECORD_LEN_MAX) { pr_err(ERST_DBG_PFX "Too long record to be written\n"); return -EINVAL; } if (mutex_lock_interruptible(&erst_dbg_mutex)) return -EINTR; if (usize > erst_dbg_buf_len) { void *p; rc = -ENOMEM; p = kmalloc(usize, GFP_KERNEL); if (!p) goto out; kfree(erst_dbg_buf); erst_dbg_buf = p; erst_dbg_buf_len = usize; } rc = copy_from_user(erst_dbg_buf, ubuf, usize); if (rc) { rc = -EFAULT; goto out; } rcd = erst_dbg_buf; rc = -EINVAL; if (rcd->record_length != usize) goto out; rc = erst_write(erst_dbg_buf); out: mutex_unlock(&erst_dbg_mutex); return rc < 0 ? rc : usize; } static const struct file_operations erst_dbg_ops = { .owner = THIS_MODULE, .open = erst_dbg_open, .release = erst_dbg_release, .read = erst_dbg_read, .write = erst_dbg_write, .unlocked_ioctl = erst_dbg_ioctl, .llseek = no_llseek, }; static struct miscdevice erst_dbg_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "erst_dbg", .fops = &erst_dbg_ops, }; static __init int erst_dbg_init(void) { if (erst_disable) { pr_info(ERST_DBG_PFX "ERST support is disabled.\n"); return -ENODEV; } return misc_register(&erst_dbg_dev); } static __exit void erst_dbg_exit(void) { misc_deregister(&erst_dbg_dev); kfree(erst_dbg_buf); } module_init(erst_dbg_init); module_exit(erst_dbg_exit); MODULE_AUTHOR("Huang Ying"); MODULE_DESCRIPTION("APEI Error Record Serialization Table debug support"); MODULE_LICENSE("GPL");
linux-master
drivers/acpi/apei/erst-dbg.c
// SPDX-License-Identifier: GPL-2.0 /* * Architecture-specific ACPI-based support for suspend-to-idle. * * Author: Rafael J. Wysocki <[email protected]> * Author: Srinivas Pandruvada <[email protected]> * Author: Shyam Sundar S K <[email protected]> * * On platforms supporting the Low Power S0 Idle interface there is an ACPI * device object with the PNP0D80 compatible device ID (System Power Management * Controller) and a specific _DSM method under it. That method, if present, * can be used to indicate to the platform that the OS is transitioning into a * low-power state in which certain types of activity are not desirable or that * it is leaving such a state, which allows the platform to adjust its operation * mode accordingly. */ #include <linux/acpi.h> #include <linux/device.h> #include <linux/dmi.h> #include <linux/suspend.h> #include "../sleep.h" #ifdef CONFIG_SUSPEND static bool sleep_no_lps0 __read_mostly; module_param(sleep_no_lps0, bool, 0644); MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface"); static const struct acpi_device_id lps0_device_ids[] = { {"PNP0D80", }, {"", }, }; /* Microsoft platform agnostic UUID */ #define ACPI_LPS0_DSM_UUID_MICROSOFT "11e00d56-ce64-47ce-837b-1f898f9aa461" #define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66" #define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1 #define ACPI_LPS0_SCREEN_OFF 3 #define ACPI_LPS0_SCREEN_ON 4 #define ACPI_LPS0_ENTRY 5 #define ACPI_LPS0_EXIT 6 #define ACPI_LPS0_MS_ENTRY 7 #define ACPI_LPS0_MS_EXIT 8 /* AMD */ #define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721" #define ACPI_LPS0_ENTRY_AMD 2 #define ACPI_LPS0_EXIT_AMD 3 #define ACPI_LPS0_SCREEN_OFF_AMD 4 #define ACPI_LPS0_SCREEN_ON_AMD 5 static acpi_handle lps0_device_handle; static guid_t lps0_dsm_guid; static int lps0_dsm_func_mask; static guid_t lps0_dsm_guid_microsoft; static int lps0_dsm_func_mask_microsoft; static int lps0_dsm_state; /* Device constraint entry structure */ struct lpi_device_info { char *name; int enabled; union acpi_object *package; }; /* Constraint package structure */ struct lpi_device_constraint { int uid; int min_dstate; int function_states; }; struct lpi_constraints { acpi_handle handle; int min_dstate; }; /* AMD Constraint package structure */ struct lpi_device_constraint_amd { char *name; int enabled; int function_states; int min_dstate; }; static LIST_HEAD(lps0_s2idle_devops_head); static struct lpi_constraints *lpi_constraints_table; static int lpi_constraints_table_size; static int rev_id; #define for_each_lpi_constraint(entry) \ for (int i = 0; \ entry = &lpi_constraints_table[i], i < lpi_constraints_table_size; \ i++) static void lpi_device_get_constraints_amd(void) { union acpi_object *out_obj; int i, j, k; out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid, rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS, NULL, ACPI_TYPE_PACKAGE); acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n", out_obj ? "successful" : "failed"); if (!out_obj) return; for (i = 0; i < out_obj->package.count; i++) { union acpi_object *package = &out_obj->package.elements[i]; if (package->type == ACPI_TYPE_PACKAGE) { if (lpi_constraints_table) { acpi_handle_err(lps0_device_handle, "Duplicate constraints list\n"); goto free_acpi_buffer; } lpi_constraints_table = kcalloc(package->package.count, sizeof(*lpi_constraints_table), GFP_KERNEL); if (!lpi_constraints_table) goto free_acpi_buffer; acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n"); for (j = 0; j < package->package.count; j++) { union acpi_object *info_obj = &package->package.elements[j]; struct lpi_device_constraint_amd dev_info = {}; struct lpi_constraints *list; acpi_status status; list = &lpi_constraints_table[lpi_constraints_table_size]; for (k = 0; k < info_obj->package.count; k++) { union acpi_object *obj = &info_obj->package.elements[k]; switch (k) { case 0: dev_info.enabled = obj->integer.value; break; case 1: dev_info.name = obj->string.pointer; break; case 2: dev_info.function_states = obj->integer.value; break; case 3: dev_info.min_dstate = obj->integer.value; break; } } acpi_handle_debug(lps0_device_handle, "Name:%s, Enabled: %d, States: %d, MinDstate: %d\n", dev_info.name, dev_info.enabled, dev_info.function_states, dev_info.min_dstate); if (!dev_info.enabled || !dev_info.name || !dev_info.min_dstate) continue; status = acpi_get_handle(NULL, dev_info.name, &list->handle); if (ACPI_FAILURE(status)) continue; list->min_dstate = dev_info.min_dstate; lpi_constraints_table_size++; } } } acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n"); free_acpi_buffer: ACPI_FREE(out_obj); } static void lpi_device_get_constraints(void) { union acpi_object *out_obj; int i; out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid, 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS, NULL, ACPI_TYPE_PACKAGE); acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n", out_obj ? "successful" : "failed"); if (!out_obj) return; lpi_constraints_table = kcalloc(out_obj->package.count, sizeof(*lpi_constraints_table), GFP_KERNEL); if (!lpi_constraints_table) goto free_acpi_buffer; acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n"); for (i = 0; i < out_obj->package.count; i++) { struct lpi_constraints *constraint; acpi_status status; union acpi_object *package = &out_obj->package.elements[i]; struct lpi_device_info info = { }; int package_count = 0, j; if (!package) continue; for (j = 0; j < package->package.count; j++) { union acpi_object *element = &(package->package.elements[j]); switch (element->type) { case ACPI_TYPE_INTEGER: info.enabled = element->integer.value; break; case ACPI_TYPE_STRING: info.name = element->string.pointer; break; case ACPI_TYPE_PACKAGE: package_count = element->package.count; info.package = element->package.elements; break; } } if (!info.enabled || !info.package || !info.name) continue; constraint = &lpi_constraints_table[lpi_constraints_table_size]; status = acpi_get_handle(NULL, info.name, &constraint->handle); if (ACPI_FAILURE(status)) continue; acpi_handle_debug(lps0_device_handle, "index:%d Name:%s\n", i, info.name); constraint->min_dstate = -1; for (j = 0; j < package_count; j++) { union acpi_object *info_obj = &info.package[j]; union acpi_object *cnstr_pkg; union acpi_object *obj; struct lpi_device_constraint dev_info; switch (info_obj->type) { case ACPI_TYPE_INTEGER: /* version */ break; case ACPI_TYPE_PACKAGE: if (info_obj->package.count < 2) break; cnstr_pkg = info_obj->package.elements; obj = &cnstr_pkg[0]; dev_info.uid = obj->integer.value; obj = &cnstr_pkg[1]; dev_info.min_dstate = obj->integer.value; acpi_handle_debug(lps0_device_handle, "uid:%d min_dstate:%s\n", dev_info.uid, acpi_power_state_string(dev_info.min_dstate)); constraint->min_dstate = dev_info.min_dstate; break; } } if (constraint->min_dstate < 0) { acpi_handle_debug(lps0_device_handle, "Incomplete constraint defined\n"); continue; } lpi_constraints_table_size++; } acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n"); free_acpi_buffer: ACPI_FREE(out_obj); } /** * acpi_get_lps0_constraint - Get the LPS0 constraint for a device. * @adev: Device to get the constraint for. * * The LPS0 constraint is the shallowest (minimum) power state in which the * device can be so as to allow the platform as a whole to achieve additional * energy conservation by utilizing a system-wide low-power state. * * Returns: * - ACPI power state value of the constraint for @adev on success. * - Otherwise, ACPI_STATE_UNKNOWN. */ int acpi_get_lps0_constraint(struct acpi_device *adev) { struct lpi_constraints *entry; for_each_lpi_constraint(entry) { if (adev->handle == entry->handle) return entry->min_dstate; } return ACPI_STATE_UNKNOWN; } static void lpi_check_constraints(void) { struct lpi_constraints *entry; for_each_lpi_constraint(entry) { struct acpi_device *adev = acpi_fetch_acpi_dev(entry->handle); if (!adev) continue; acpi_handle_debug(entry->handle, "LPI: required min power state:%s current power state:%s\n", acpi_power_state_string(entry->min_dstate), acpi_power_state_string(adev->power.state)); if (!adev->flags.power_manageable) { acpi_handle_info(entry->handle, "LPI: Device not power manageable\n"); entry->handle = NULL; continue; } if (adev->power.state < entry->min_dstate) acpi_handle_info(entry->handle, "LPI: Constraint not met; min power state:%s current power state:%s\n", acpi_power_state_string(entry->min_dstate), acpi_power_state_string(adev->power.state)); } } static bool acpi_s2idle_vendor_amd(void) { return boot_cpu_data.x86_vendor == X86_VENDOR_AMD; } static const char *acpi_sleep_dsm_state_to_str(unsigned int state) { if (lps0_dsm_func_mask_microsoft || !acpi_s2idle_vendor_amd()) { switch (state) { case ACPI_LPS0_SCREEN_OFF: return "screen off"; case ACPI_LPS0_SCREEN_ON: return "screen on"; case ACPI_LPS0_ENTRY: return "lps0 entry"; case ACPI_LPS0_EXIT: return "lps0 exit"; case ACPI_LPS0_MS_ENTRY: return "lps0 ms entry"; case ACPI_LPS0_MS_EXIT: return "lps0 ms exit"; } } else { switch (state) { case ACPI_LPS0_SCREEN_ON_AMD: return "screen on"; case ACPI_LPS0_SCREEN_OFF_AMD: return "screen off"; case ACPI_LPS0_ENTRY_AMD: return "lps0 entry"; case ACPI_LPS0_EXIT_AMD: return "lps0 exit"; } } return "unknown"; } static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid) { union acpi_object *out_obj; if (!(func_mask & (1 << func))) return; out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid, rev_id, func, NULL); ACPI_FREE(out_obj); lps0_dsm_state = func; if (pm_debug_messages_on) { acpi_handle_info(lps0_device_handle, "%s transitioned to state %s\n", out_obj ? "Successfully" : "Failed to", acpi_sleep_dsm_state_to_str(lps0_dsm_state)); } } static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid) { union acpi_object *obj; int ret = -EINVAL; guid_parse(uuid, dsm_guid); obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL); /* Check if the _DSM is present and as expected. */ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 || obj->buffer.length > sizeof(u32)) { acpi_handle_debug(handle, "_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev); goto out; } ret = *(int *)obj->buffer.pointer; acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret); out: ACPI_FREE(obj); return ret; } struct amd_lps0_hid_device_data { const bool check_off_by_one; }; static const struct amd_lps0_hid_device_data amd_picasso = { .check_off_by_one = true, }; static const struct amd_lps0_hid_device_data amd_cezanne = { .check_off_by_one = false, }; static const struct acpi_device_id amd_hid_ids[] = { {"AMD0004", (kernel_ulong_t)&amd_picasso, }, {"AMD0005", (kernel_ulong_t)&amd_picasso, }, {"AMDI0005", (kernel_ulong_t)&amd_picasso, }, {"AMDI0006", (kernel_ulong_t)&amd_cezanne, }, {} }; static int lps0_device_attach(struct acpi_device *adev, const struct acpi_device_id *not_used) { if (lps0_device_handle) return 0; lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID_MICROSOFT, 0, &lps0_dsm_guid_microsoft); if (acpi_s2idle_vendor_amd()) { static const struct acpi_device_id *dev_id; const struct amd_lps0_hid_device_data *data; for (dev_id = &amd_hid_ids[0]; dev_id->id[0]; dev_id++) if (acpi_dev_hid_uid_match(adev, dev_id->id, NULL)) break; if (dev_id->id[0]) data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data; else data = &amd_cezanne; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid); if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) { lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); } else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) { lps0_dsm_func_mask_microsoft = -EINVAL; acpi_handle_debug(adev->handle, "_DSM Using AMD method\n"); } } else { rev_id = 1; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid); lps0_dsm_func_mask_microsoft = -EINVAL; } if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0) return 0; //function evaluation failed lps0_device_handle = adev->handle; if (acpi_s2idle_vendor_amd()) lpi_device_get_constraints_amd(); else lpi_device_get_constraints(); /* * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set in * the FADT and the default suspend mode was not set from the command * line. */ if ((acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) && mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3) { mem_sleep_current = PM_SUSPEND_TO_IDLE; pr_info("Low-power S0 idle used by default for system suspend\n"); } /* * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the * EC GPE to be enabled while suspended for certain wakeup devices to * work, so mark it as wakeup-capable. */ acpi_ec_mark_gpe_for_wake(); return 0; } static struct acpi_scan_handler lps0_handler = { .ids = lps0_device_ids, .attach = lps0_device_attach, }; int acpi_s2idle_prepare_late(void) { struct acpi_s2idle_dev_ops *handler; if (!lps0_device_handle || sleep_no_lps0) return 0; if (pm_debug_messages_on) lpi_check_constraints(); /* Screen off */ if (lps0_dsm_func_mask > 0) acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ? ACPI_LPS0_SCREEN_OFF_AMD : ACPI_LPS0_SCREEN_OFF, lps0_dsm_func_mask, lps0_dsm_guid); if (lps0_dsm_func_mask_microsoft > 0) acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); /* LPS0 entry */ if (lps0_dsm_func_mask > 0) acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ? ACPI_LPS0_ENTRY_AMD : ACPI_LPS0_ENTRY, lps0_dsm_func_mask, lps0_dsm_guid); if (lps0_dsm_func_mask_microsoft > 0) { /* modern standby entry */ acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); } list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) { if (handler->prepare) handler->prepare(); } return 0; } void acpi_s2idle_check(void) { struct acpi_s2idle_dev_ops *handler; if (!lps0_device_handle || sleep_no_lps0) return; list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) { if (handler->check) handler->check(); } } void acpi_s2idle_restore_early(void) { struct acpi_s2idle_dev_ops *handler; if (!lps0_device_handle || sleep_no_lps0) return; list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) if (handler->restore) handler->restore(); /* LPS0 exit */ if (lps0_dsm_func_mask > 0) acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ? ACPI_LPS0_EXIT_AMD : ACPI_LPS0_EXIT, lps0_dsm_func_mask, lps0_dsm_guid); if (lps0_dsm_func_mask_microsoft > 0) acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); /* Modern standby exit */ if (lps0_dsm_func_mask_microsoft > 0) acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); /* Screen on */ if (lps0_dsm_func_mask_microsoft > 0) acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); if (lps0_dsm_func_mask > 0) acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ? ACPI_LPS0_SCREEN_ON_AMD : ACPI_LPS0_SCREEN_ON, lps0_dsm_func_mask, lps0_dsm_guid); } static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = { .begin = acpi_s2idle_begin, .prepare = acpi_s2idle_prepare, .prepare_late = acpi_s2idle_prepare_late, .check = acpi_s2idle_check, .wake = acpi_s2idle_wake, .restore_early = acpi_s2idle_restore_early, .restore = acpi_s2idle_restore, .end = acpi_s2idle_end, }; void __init acpi_s2idle_setup(void) { acpi_scan_add_handler(&lps0_handler); s2idle_set_ops(&acpi_s2idle_ops_lps0); } int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg) { unsigned int sleep_flags; if (!lps0_device_handle || sleep_no_lps0) return -ENODEV; sleep_flags = lock_system_sleep(); list_add(&arg->list_node, &lps0_s2idle_devops_head); unlock_system_sleep(sleep_flags); return 0; } EXPORT_SYMBOL_GPL(acpi_register_lps0_dev); void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg) { unsigned int sleep_flags; if (!lps0_device_handle || sleep_no_lps0) return; sleep_flags = lock_system_sleep(); list_del(&arg->list_node); unlock_system_sleep(sleep_flags); } EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev); #endif /* CONFIG_SUSPEND */
linux-master
drivers/acpi/x86/s2idle.c
// SPDX-License-Identifier: GPL-2.0-only /* * apple.c - Apple ACPI quirks * Copyright (C) 2017 Lukas Wunner <[email protected]> */ #include <linux/acpi.h> #include <linux/bitmap.h> #include <linux/platform_data/x86/apple.h> #include <linux/uuid.h> #include "../internal.h" /* Apple _DSM device properties GUID */ static const guid_t apple_prp_guid = GUID_INIT(0xa0b5b7c6, 0x1318, 0x441c, 0xb0, 0xc9, 0xfe, 0x69, 0x5e, 0xaf, 0x94, 0x9b); /** * acpi_extract_apple_properties - retrieve and convert Apple _DSM properties * @adev: ACPI device for which to retrieve the properties * * Invoke Apple's custom _DSM once to check the protocol version and once more * to retrieve the properties. They are marshalled up in a single package as * alternating key/value elements, unlike _DSD which stores them as a package * of 2-element packages. Convert to _DSD format and make them available under * the primary fwnode. */ void acpi_extract_apple_properties(struct acpi_device *adev) { unsigned int i, j = 0, newsize = 0, numprops, numvalid; union acpi_object *props, *newprops; unsigned long *valid = NULL; void *free_space; if (!x86_apple_machine) return; props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 0, NULL, ACPI_TYPE_BUFFER); if (!props) return; if (!props->buffer.length) goto out_free; if (props->buffer.pointer[0] != 3) { acpi_handle_info(adev->handle, FW_INFO "unsupported properties version %*ph\n", props->buffer.length, props->buffer.pointer); goto out_free; } ACPI_FREE(props); props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 1, NULL, ACPI_TYPE_PACKAGE); if (!props) return; numprops = props->package.count / 2; if (!numprops) goto out_free; valid = bitmap_zalloc(numprops, GFP_KERNEL); if (!valid) goto out_free; /* newsize = key length + value length of each tuple */ for (i = 0; i < numprops; i++) { union acpi_object *key = &props->package.elements[i * 2]; union acpi_object *val = &props->package.elements[i * 2 + 1]; if ( key->type != ACPI_TYPE_STRING || (val->type != ACPI_TYPE_INTEGER && val->type != ACPI_TYPE_BUFFER && val->type != ACPI_TYPE_STRING)) continue; /* skip invalid properties */ __set_bit(i, valid); newsize += key->string.length + 1; if ( val->type == ACPI_TYPE_BUFFER) newsize += val->buffer.length; else if (val->type == ACPI_TYPE_STRING) newsize += val->string.length + 1; } numvalid = bitmap_weight(valid, numprops); if (numprops > numvalid) acpi_handle_info(adev->handle, FW_INFO "skipped %u properties: wrong type\n", numprops - numvalid); if (numvalid == 0) goto out_free; /* newsize += top-level package + 3 objects for each key/value tuple */ newsize += (1 + 3 * numvalid) * sizeof(union acpi_object); newprops = ACPI_ALLOCATE_ZEROED(newsize); if (!newprops) goto out_free; /* layout: top-level package | packages | key/value tuples | strings */ newprops->type = ACPI_TYPE_PACKAGE; newprops->package.count = numvalid; newprops->package.elements = &newprops[1]; free_space = &newprops[1 + 3 * numvalid]; for_each_set_bit(i, valid, numprops) { union acpi_object *key = &props->package.elements[i * 2]; union acpi_object *val = &props->package.elements[i * 2 + 1]; unsigned int k = 1 + numvalid + j * 2; /* index into newprops */ unsigned int v = k + 1; newprops[1 + j].type = ACPI_TYPE_PACKAGE; newprops[1 + j].package.count = 2; newprops[1 + j].package.elements = &newprops[k]; newprops[k].type = ACPI_TYPE_STRING; newprops[k].string.length = key->string.length; newprops[k].string.pointer = free_space; memcpy(free_space, key->string.pointer, key->string.length); free_space += key->string.length + 1; newprops[v].type = val->type; if (val->type == ACPI_TYPE_INTEGER) { newprops[v].integer.value = val->integer.value; } else if (val->type == ACPI_TYPE_STRING) { newprops[v].string.length = val->string.length; newprops[v].string.pointer = free_space; memcpy(free_space, val->string.pointer, val->string.length); free_space += val->string.length + 1; } else { newprops[v].buffer.length = val->buffer.length; newprops[v].buffer.pointer = free_space; memcpy(free_space, val->buffer.pointer, val->buffer.length); free_space += val->buffer.length; } j++; /* count valid properties */ } WARN_ON(free_space != (void *)newprops + newsize); adev->data.pointer = newprops; acpi_data_add_props(&adev->data, &apple_prp_guid, newprops); out_free: ACPI_FREE(props); bitmap_free(valid); }
linux-master
drivers/acpi/x86/apple.c
// SPDX-License-Identifier: GPL-2.0-only /* * X86 ACPI Utility Functions * * Copyright (C) 2017 Hans de Goede <[email protected]> * * Based on various non upstream patches to support the CHT Whiskey Cove PMIC: * Copyright (C) 2013-2015 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "ACPI: " fmt #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/platform_device.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include "../internal.h" /* * Some ACPI devices are hidden (status == 0x0) in recent BIOS-es because * some recent Windows drivers bind to one device but poke at multiple * devices at the same time, so the others get hidden. * * Some BIOS-es (temporarily) hide specific APCI devices to work around Windows * driver bugs. We use DMI matching to match known cases of this. * * Likewise sometimes some not-actually present devices are sometimes * reported as present, which may cause issues. * * We work around this by using the below quirk list to override the status * reported by the _STA method with a fixed value (ACPI_STA_DEFAULT or 0). * Note this MUST only be done for devices where this is safe. * * This status overriding is limited to specific CPU (SoC) models both to * avoid potentially causing trouble on other models and because some HIDs * are re-used on different SoCs for completely different devices. */ struct override_status_id { struct acpi_device_id hid[2]; struct x86_cpu_id cpu_ids[2]; struct dmi_system_id dmi_ids[2]; /* Optional */ const char *uid; const char *path; unsigned long long status; }; #define ENTRY(status, hid, uid, path, cpu_model, dmi...) { \ { { hid, }, {} }, \ { X86_MATCH_INTEL_FAM6_MODEL(cpu_model, NULL), {} }, \ { { .matches = dmi }, {} }, \ uid, \ path, \ status, \ } #define PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_model, dmi) #define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ ENTRY(0, hid, uid, NULL, cpu_model, dmi) #define PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_model, dmi) #define NOT_PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ ENTRY(0, "", NULL, path, cpu_model, dmi) static const struct override_status_id override_status_ids[] = { /* * Bay / Cherry Trail PWM directly poked by GPU driver in win10, * but Linux uses a separate PWM driver, harmless if not used. */ PRESENT_ENTRY_HID("80860F09", "1", ATOM_SILVERMONT, {}), PRESENT_ENTRY_HID("80862288", "1", ATOM_AIRMONT, {}), /* The Xiaomi Mi Pad 2 uses PWM2 for touchkeys backlight control */ PRESENT_ENTRY_HID("80862289", "2", ATOM_AIRMONT, { DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"), DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"), }), /* * The INT0002 device is necessary to clear wakeup interrupt sources * on Cherry Trail devices, without it we get nobody cared IRQ msgs. */ PRESENT_ENTRY_HID("INT0002", "1", ATOM_AIRMONT, {}), /* * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides * the touchscreen ACPI device until a certain time * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed * *and* _STA has been called at least 3 times since. */ PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), }), PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"), }), /* * The GPD win BIOS dated 20170221 has disabled the accelerometer, the * drivers sometimes cause crashes under Windows and this is how the * manufacturer has solved this :| The DMI match may not seem unique, * but it is. In the 67000+ DMI decode dumps from linux-hardware.org * only 116 have board_vendor set to "AMI Corporation" and of those 116 * only the GPD win and pocket entries' board_name is "Default string". * * Unfortunately the GPD pocket also uses these strings and its BIOS * was copy-pasted from the GPD win, so it has a disabled KIOX000A * node which we should not enable, thus we also check the BIOS date. */ PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "02/21/2017") }), PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "03/20/2017") }), PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "05/25/2017") }), /* * The GPD win/pocket have a PCI wifi card, but its DSDT has the SDIO * mmc controller enabled and that has a child-device which _PS3 * method sets a GPIO causing the PCI wifi card to turn off. * See above remark about uniqueness of the DMI match. */ NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", ATOM_AIRMONT, { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), }), /* * The LSM303D on the Lenovo Yoga Tablet 2 series is present * as both ACCL0001 and MAGN0001. As we can only ever register an * i2c client for one of them, ignore MAGN0001. */ NOT_PRESENT_ENTRY_HID("MAGN0001", "1", ATOM_SILVERMONT, { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_FAMILY, "YOGATablet2"), }), }; bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status) { bool ret = false; unsigned int i; for (i = 0; i < ARRAY_SIZE(override_status_ids); i++) { if (!x86_match_cpu(override_status_ids[i].cpu_ids)) continue; if (override_status_ids[i].dmi_ids[0].matches[0].slot && !dmi_check_system(override_status_ids[i].dmi_ids)) continue; if (override_status_ids[i].path) { struct acpi_buffer path = { ACPI_ALLOCATE_BUFFER, NULL }; bool match; if (acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &path)) continue; match = strcmp((char *)path.pointer, override_status_ids[i].path) == 0; kfree(path.pointer); if (!match) continue; } else { if (acpi_match_device_ids(adev, override_status_ids[i].hid)) continue; if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, override_status_ids[i].uid)) continue; } *status = override_status_ids[i].status; ret = true; break; } return ret; } /* * AMD systems from Renoir and Lucienne *require* that the NVME controller * is put into D3 over a Modern Standby / suspend-to-idle cycle. * * This is "typically" accomplished using the `StorageD3Enable` * property in the _DSD that is checked via the `acpi_storage_d3` function * but this property was introduced after many of these systems launched * and most OEM systems don't have it in their BIOS. * * The Microsoft documentation for StorageD3Enable mentioned that Windows has * a hardcoded allowlist for D3 support, which was used for these platforms. * * This allows quirking on Linux in a similar fashion. * * Cezanne systems shouldn't *normally* need this as the BIOS includes * StorageD3Enable. But for two reasons we have added it. * 1) The BIOS on a number of Dell systems have ambiguity * between the same value used for _ADR on ACPI nodes GPP1.DEV0 and GPP1.NVME. * GPP1.NVME is needed to get StorageD3Enable node set properly. * https://bugzilla.kernel.org/show_bug.cgi?id=216440 * https://bugzilla.kernel.org/show_bug.cgi?id=216773 * https://bugzilla.kernel.org/show_bug.cgi?id=217003 * 2) On at least one HP system StorageD3Enable is missing on the second NVME disk in the system. */ static const struct x86_cpu_id storage_d3_cpu_ids[] = { X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */ X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */ X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */ {} }; bool force_storage_d3(void) { return x86_match_cpu(storage_d3_cpu_ids); } /* * x86 ACPI boards which ship with only Android as their factory image usually * declare a whole bunch of bogus I2C devices in their ACPI tables and sometimes * there are issues with serdev devices on these boards too, e.g. the resource * points to the wrong serdev_controller. * * Instantiating I2C / serdev devs for these bogus devs causes various issues, * e.g. GPIO/IRQ resource conflicts because sometimes drivers do bind to them. * The Android x86 kernel fork shipped on these devices has some special code * to remove the bogus I2C clients (and AFAICT serdevs are ignored completely). * * The acpi_quirk_skip_*_enumeration() functions below are used by the I2C or * serdev code to skip instantiating any I2C or serdev devs on broken boards. * * In case of I2C an exception is made for HIDs on the i2c_acpi_known_good_ids * list. These are known to always be correct (and in case of the audio-codecs * the drivers heavily rely on the codec being enumerated through ACPI). * * Note these boards typically do actually have I2C and serdev devices, * just different ones then the ones described in their DSDT. The devices * which are actually present are manually instantiated by the * drivers/platform/x86/x86-android-tablets.c kernel module. */ #define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0) #define ACPI_QUIRK_UART1_SKIP BIT(1) #define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(2) #define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(3) #define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(4) #define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(5) static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { /* * 1. Devices with only the skip / don't-skip AC and battery quirks, * sorted alphabetically. */ { /* ECS EF20EA, AXP288 PMIC but uses separate fuel-gauge */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "EF20EA"), }, .driver_data = (void *)ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY }, { /* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "80XF"), DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), }, .driver_data = (void *)ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY }, /* * 2. Devices which also have the skip i2c/serdev quirks and which * need the x86-android-tablets module to properly work. */ #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) { /* Acer Iconia One 7 B1-750 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_UART1_TTY_UART2_SKIP | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Lenovo Yoga Book X90F/L */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_UART1_SKIP | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Lenovo Yoga Tablet 2 1050F/L */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), /* Partial match on beginning of BIOS version */ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, { /* Lenovo Yoga Tab 3 Pro X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, { /* Medion Lifetab S10346 */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), /* Way too generic, also match on BIOS data */ DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, { /* Nextbook Ares 8 (BYT version)*/ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Nextbook Ares 8A (CHT version)*/ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), DMI_MATCH(DMI_BIOS_VERSION, "M882"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, { /* Whitelabel (sold as various brands) TM800A550L */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), /* Above strings are too generic, also match on BIOS version */ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, #endif {} }; #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) static const struct acpi_device_id i2c_acpi_known_good_ids[] = { { "10EC5640", 0 }, /* RealTek ALC5640 audio codec */ { "10EC5651", 0 }, /* RealTek ALC5651 audio codec */ { "INT33F4", 0 }, /* X-Powers AXP288 PMIC */ { "INT33FD", 0 }, /* Intel Crystal Cove PMIC */ { "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */ { "NPCE69A", 0 }, /* Asus Transformer keyboard dock */ {} }; bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev) { const struct dmi_system_id *dmi_id; long quirks; dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); if (!dmi_id) return false; quirks = (unsigned long)dmi_id->driver_data; if (!(quirks & ACPI_QUIRK_SKIP_I2C_CLIENTS)) return false; return acpi_match_device_ids(adev, i2c_acpi_known_good_ids); } EXPORT_SYMBOL_GPL(acpi_quirk_skip_i2c_client_enumeration); int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip) { struct acpi_device *adev = ACPI_COMPANION(controller_parent); const struct dmi_system_id *dmi_id; long quirks = 0; u64 uid; int ret; *skip = false; ret = acpi_dev_uid_to_integer(adev, &uid); if (ret) return 0; /* to not match on PNP enumerated debug UARTs */ if (!dev_is_platform(controller_parent)) return 0; dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); if (dmi_id) quirks = (unsigned long)dmi_id->driver_data; if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1) *skip = true; if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) { if (uid == 1) return -ENODEV; /* Create tty cdev instead of serdev */ if (uid == 2) *skip = true; } return 0; } EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration); bool acpi_quirk_skip_gpio_event_handlers(void) { const struct dmi_system_id *dmi_id; long quirks; dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); if (!dmi_id) return false; quirks = (unsigned long)dmi_id->driver_data; return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS); } EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers); #endif /* Lists of PMIC ACPI HIDs with an (often better) native charger driver */ static const struct { const char *hid; int hrv; } acpi_skip_ac_and_battery_pmic_ids[] = { { "INT33F4", -1 }, /* X-Powers AXP288 PMIC */ { "INT34D3", 3 }, /* Intel Cherrytrail Whiskey Cove PMIC */ }; bool acpi_quirk_skip_acpi_ac_and_battery(void) { const struct dmi_system_id *dmi_id; long quirks = 0; int i; dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); if (dmi_id) quirks = (unsigned long)dmi_id->driver_data; if (quirks & ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY) return true; if (quirks & ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY) return false; for (i = 0; i < ARRAY_SIZE(acpi_skip_ac_and_battery_pmic_ids); i++) { if (acpi_dev_present(acpi_skip_ac_and_battery_pmic_ids[i].hid, "1", acpi_skip_ac_and_battery_pmic_ids[i].hrv)) { pr_info_once("found native %s PMIC, skipping ACPI AC and battery devices\n", acpi_skip_ac_and_battery_pmic_ids[i].hid); return true; } } return false; } EXPORT_SYMBOL_GPL(acpi_quirk_skip_acpi_ac_and_battery); /* This section provides a workaround for a specific x86 system * which requires disabling of mwait to work correctly. */ static int __init acpi_proc_quirk_set_no_mwait(const struct dmi_system_id *id) { pr_notice("%s detected - disabling mwait for CPU C-states\n", id->ident); boot_option_idle_override = IDLE_NOMWAIT; return 0; } static const struct dmi_system_id acpi_proc_quirk_mwait_dmi_table[] __initconst = { { .callback = acpi_proc_quirk_set_no_mwait, .ident = "Extensa 5220", .matches = { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), DMI_MATCH(DMI_BOARD_NAME, "Columbia"), }, .driver_data = NULL, }, {} }; void __init acpi_proc_quirk_mwait_check(void) { /* * Check whether the system is DMI table. If yes, OSPM * should not use mwait for CPU-states. */ dmi_check_system(acpi_proc_quirk_mwait_dmi_table); }
linux-master
drivers/acpi/x86/utils.c
// SPDX-License-Identifier: GPL-2.0-only /* * dptf_pch_fivr: DPTF PCH FIVR Participant driver * Copyright (c) 2020, Intel Corporation. */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> struct pch_fivr_resp { u64 status; u64 result; }; static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp *fivr_resp) { struct acpi_buffer resp = { sizeof(struct pch_fivr_resp), fivr_resp}; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer format = { sizeof("NN"), "NN" }; union acpi_object *obj; acpi_status status; int ret = -EFAULT; status = acpi_evaluate_object(handle, method, NULL, &buffer); if (ACPI_FAILURE(status)) return ret; obj = buffer.pointer; if (!obj || obj->type != ACPI_TYPE_PACKAGE) goto release_buffer; status = acpi_extract_package(obj, &format, &resp); if (ACPI_FAILURE(status)) goto release_buffer; if (fivr_resp->status) goto release_buffer; ret = 0; release_buffer: kfree(buffer.pointer); return ret; } /* * Presentation of attributes which are defined for INTC10xx * They are: * freq_mhz_low_clock : Set PCH FIVR switching freq for * FIVR clock 19.2MHz and 24MHz * freq_mhz_high_clock : Set PCH FIVR switching freq for * FIVR clock 38.4MHz */ #define PCH_FIVR_SHOW(name, method) \ static ssize_t name##_show(struct device *dev,\ struct device_attribute *attr,\ char *buf)\ {\ struct acpi_device *acpi_dev = dev_get_drvdata(dev);\ struct pch_fivr_resp fivr_resp;\ int status;\ \ status = pch_fivr_read(acpi_dev->handle, #method, &fivr_resp);\ if (status)\ return status;\ \ return sprintf(buf, "%llu\n", fivr_resp.result);\ } #define PCH_FIVR_STORE(name, method) \ static ssize_t name##_store(struct device *dev,\ struct device_attribute *attr,\ const char *buf, size_t count)\ {\ struct acpi_device *acpi_dev = dev_get_drvdata(dev);\ acpi_status status;\ u32 val;\ \ if (kstrtouint(buf, 0, &val) < 0)\ return -EINVAL;\ \ status = acpi_execute_simple_method(acpi_dev->handle, #method, val);\ if (ACPI_SUCCESS(status))\ return count;\ \ return -EINVAL;\ } PCH_FIVR_SHOW(freq_mhz_low_clock, GFC0) PCH_FIVR_SHOW(freq_mhz_high_clock, GFC1) PCH_FIVR_SHOW(ssc_clock_info, GEMI) PCH_FIVR_SHOW(fivr_switching_freq_mhz, GFCS) PCH_FIVR_SHOW(fivr_switching_fault_status, GFFS) PCH_FIVR_STORE(freq_mhz_low_clock, RFC0) PCH_FIVR_STORE(freq_mhz_high_clock, RFC1) static DEVICE_ATTR_RW(freq_mhz_low_clock); static DEVICE_ATTR_RW(freq_mhz_high_clock); static DEVICE_ATTR_RO(ssc_clock_info); static DEVICE_ATTR_RO(fivr_switching_freq_mhz); static DEVICE_ATTR_RO(fivr_switching_fault_status); static struct attribute *fivr_attrs[] = { &dev_attr_freq_mhz_low_clock.attr, &dev_attr_freq_mhz_high_clock.attr, &dev_attr_ssc_clock_info.attr, &dev_attr_fivr_switching_freq_mhz.attr, &dev_attr_fivr_switching_fault_status.attr, NULL }; static const struct attribute_group pch_fivr_attribute_group = { .attrs = fivr_attrs, .name = "pch_fivr_switch_frequency" }; static int pch_fivr_add(struct platform_device *pdev) { struct acpi_device *acpi_dev; unsigned long long ptype; acpi_status status; int result; acpi_dev = ACPI_COMPANION(&(pdev->dev)); if (!acpi_dev) return -ENODEV; status = acpi_evaluate_integer(acpi_dev->handle, "PTYP", NULL, &ptype); if (ACPI_FAILURE(status) || ptype != 0x05) return -ENODEV; result = sysfs_create_group(&pdev->dev.kobj, &pch_fivr_attribute_group); if (result) return result; platform_set_drvdata(pdev, acpi_dev); return 0; } static int pch_fivr_remove(struct platform_device *pdev) { sysfs_remove_group(&pdev->dev.kobj, &pch_fivr_attribute_group); return 0; } static const struct acpi_device_id pch_fivr_device_ids[] = { {"INTC1045", 0}, {"INTC1049", 0}, {"INTC1064", 0}, {"INTC10A3", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids); static struct platform_driver pch_fivr_driver = { .probe = pch_fivr_add, .remove = pch_fivr_remove, .driver = { .name = "dptf_pch_fivr", .acpi_match_table = pch_fivr_device_ids, }, }; module_platform_driver(pch_fivr_driver); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("ACPI DPTF PCH FIVR driver");
linux-master
drivers/acpi/dptf/dptf_pch_fivr.c
// SPDX-License-Identifier: GPL-2.0-only /* * ACPI support for int340x thermal drivers * * Copyright (C) 2014, Intel Corporation * Authors: Zhang Rui <[email protected]> */ #include <linux/acpi.h> #include <linux/module.h> #include "../internal.h" #define INT3401_DEVICE 0X01 static const struct acpi_device_id int340x_thermal_device_ids[] = { {"INT3400"}, {"INT3401", INT3401_DEVICE}, {"INT3402"}, {"INT3403"}, {"INT3404"}, {"INT3406"}, {"INT3407"}, {"INT3408"}, {"INT3409"}, {"INT340A"}, {"INT340B"}, {"INT3532"}, {"INTC1040"}, {"INTC1041"}, {"INTC1042"}, {"INTC1043"}, {"INTC1044"}, {"INTC1045"}, {"INTC1046"}, {"INTC1047"}, {"INTC1048"}, {"INTC1049"}, {"INTC1050"}, {"INTC1060"}, {"INTC1061"}, {"INTC1062"}, {"INTC1063"}, {"INTC1064"}, {"INTC1065"}, {"INTC1066"}, {"INTC10A0"}, {"INTC10A1"}, {"INTC10A2"}, {"INTC10A3"}, {"INTC10A4"}, {"INTC10A5"}, {""}, }; static int int340x_thermal_handler_attach(struct acpi_device *adev, const struct acpi_device_id *id) { if (IS_ENABLED(CONFIG_INT340X_THERMAL)) acpi_create_platform_device(adev, NULL); /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) && id->driver_data == INT3401_DEVICE) acpi_create_platform_device(adev, NULL); return 1; } static struct acpi_scan_handler int340x_thermal_handler = { .ids = int340x_thermal_device_ids, .attach = int340x_thermal_handler_attach, }; void __init acpi_int340x_thermal_init(void) { acpi_scan_add_handler(&int340x_thermal_handler); }
linux-master
drivers/acpi/dptf/int340x_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * dptf_power: DPTF platform power driver * Copyright (c) 2016, Intel Corporation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/acpi.h> #include <linux/platform_device.h> /* * Presentation of attributes which are defined for INT3407 and INT3532. * They are: * PMAX : Maximum platform power * PSRC : Platform power source * ARTG : Adapter rating * CTYP : Charger type * PROP : Rest of worst case platform Power * PBSS : Power Battery Steady State * RBHF : High Frequency Impedance * VBNL : Instantaneous No-Load Voltage * CMPP : Current Discharge Capability */ #define DPTF_POWER_SHOW(name, object) \ static ssize_t name##_show(struct device *dev,\ struct device_attribute *attr,\ char *buf)\ {\ struct acpi_device *acpi_dev = dev_get_drvdata(dev);\ unsigned long long val;\ acpi_status status;\ \ status = acpi_evaluate_integer(acpi_dev->handle, #object,\ NULL, &val);\ if (ACPI_SUCCESS(status))\ return sprintf(buf, "%d\n", (int)val);\ else \ return -EINVAL;\ } DPTF_POWER_SHOW(max_platform_power_mw, PMAX) DPTF_POWER_SHOW(platform_power_source, PSRC) DPTF_POWER_SHOW(adapter_rating_mw, ARTG) DPTF_POWER_SHOW(battery_steady_power_mw, PBSS) DPTF_POWER_SHOW(charger_type, CTYP) DPTF_POWER_SHOW(rest_of_platform_power_mw, PROP) DPTF_POWER_SHOW(max_steady_state_power_mw, PBSS) DPTF_POWER_SHOW(high_freq_impedance_mohm, RBHF) DPTF_POWER_SHOW(no_load_voltage_mv, VBNL) DPTF_POWER_SHOW(current_discharge_capbility_ma, CMPP); static DEVICE_ATTR_RO(max_platform_power_mw); static DEVICE_ATTR_RO(platform_power_source); static DEVICE_ATTR_RO(adapter_rating_mw); static DEVICE_ATTR_RO(battery_steady_power_mw); static DEVICE_ATTR_RO(charger_type); static DEVICE_ATTR_RO(rest_of_platform_power_mw); static DEVICE_ATTR_RO(max_steady_state_power_mw); static DEVICE_ATTR_RO(high_freq_impedance_mohm); static DEVICE_ATTR_RO(no_load_voltage_mv); static DEVICE_ATTR_RO(current_discharge_capbility_ma); static ssize_t prochot_confirm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_device *acpi_dev = dev_get_drvdata(dev); acpi_status status; int seq_no; if (kstrtouint(buf, 0, &seq_no) < 0) return -EINVAL; status = acpi_execute_simple_method(acpi_dev->handle, "PBOK", seq_no); if (ACPI_SUCCESS(status)) return count; return -EINVAL; } static DEVICE_ATTR_WO(prochot_confirm); static struct attribute *dptf_power_attrs[] = { &dev_attr_max_platform_power_mw.attr, &dev_attr_platform_power_source.attr, &dev_attr_adapter_rating_mw.attr, &dev_attr_battery_steady_power_mw.attr, &dev_attr_charger_type.attr, &dev_attr_rest_of_platform_power_mw.attr, &dev_attr_prochot_confirm.attr, NULL }; static const struct attribute_group dptf_power_attribute_group = { .attrs = dptf_power_attrs, .name = "dptf_power" }; static struct attribute *dptf_battery_attrs[] = { &dev_attr_max_platform_power_mw.attr, &dev_attr_max_steady_state_power_mw.attr, &dev_attr_high_freq_impedance_mohm.attr, &dev_attr_no_load_voltage_mv.attr, &dev_attr_current_discharge_capbility_ma.attr, NULL }; static const struct attribute_group dptf_battery_attribute_group = { .attrs = dptf_battery_attrs, .name = "dptf_battery" }; #define MAX_POWER_CHANGED 0x80 #define POWER_STATE_CHANGED 0x81 #define STEADY_STATE_POWER_CHANGED 0x83 #define POWER_PROP_CHANGE_EVENT 0x84 #define IMPEDANCE_CHANGED 0x85 #define VOLTAGE_CURRENT_CHANGED 0x86 static long long dptf_participant_type(acpi_handle handle) { unsigned long long ptype; acpi_status status; status = acpi_evaluate_integer(handle, "PTYP", NULL, &ptype); if (ACPI_FAILURE(status)) return -ENODEV; return ptype; } static void dptf_power_notify(acpi_handle handle, u32 event, void *data) { struct platform_device *pdev = data; char *attr; switch (event) { case POWER_STATE_CHANGED: attr = "platform_power_source"; break; case POWER_PROP_CHANGE_EVENT: attr = "rest_of_platform_power_mw"; break; case MAX_POWER_CHANGED: attr = "max_platform_power_mw"; break; case STEADY_STATE_POWER_CHANGED: attr = "max_steady_state_power_mw"; break; case IMPEDANCE_CHANGED: attr = "high_freq_impedance_mohm"; break; case VOLTAGE_CURRENT_CHANGED: attr = "no_load_voltage_mv"; break; default: dev_err(&pdev->dev, "Unsupported event [0x%x]\n", event); return; } /* * Notify that an attribute is changed, so that user space can read * again. */ if (dptf_participant_type(handle) == 0x0CULL) sysfs_notify(&pdev->dev.kobj, "dptf_battery", attr); else sysfs_notify(&pdev->dev.kobj, "dptf_power", attr); } static int dptf_power_add(struct platform_device *pdev) { const struct attribute_group *attr_group; struct acpi_device *acpi_dev; unsigned long long ptype; int result; acpi_dev = ACPI_COMPANION(&(pdev->dev)); if (!acpi_dev) return -ENODEV; ptype = dptf_participant_type(acpi_dev->handle); if (ptype == 0x11) attr_group = &dptf_power_attribute_group; else if (ptype == 0x0C) attr_group = &dptf_battery_attribute_group; else return -ENODEV; result = acpi_install_notify_handler(acpi_dev->handle, ACPI_DEVICE_NOTIFY, dptf_power_notify, (void *)pdev); if (result) return result; result = sysfs_create_group(&pdev->dev.kobj, attr_group); if (result) { acpi_remove_notify_handler(acpi_dev->handle, ACPI_DEVICE_NOTIFY, dptf_power_notify); return result; } platform_set_drvdata(pdev, acpi_dev); return 0; } static int dptf_power_remove(struct platform_device *pdev) { struct acpi_device *acpi_dev = platform_get_drvdata(pdev); acpi_remove_notify_handler(acpi_dev->handle, ACPI_DEVICE_NOTIFY, dptf_power_notify); if (dptf_participant_type(acpi_dev->handle) == 0x0CULL) sysfs_remove_group(&pdev->dev.kobj, &dptf_battery_attribute_group); else sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group); return 0; } static const struct acpi_device_id int3407_device_ids[] = { {"INT3407", 0}, {"INT3532", 0}, {"INTC1047", 0}, {"INTC1050", 0}, {"INTC1060", 0}, {"INTC1061", 0}, {"INTC1065", 0}, {"INTC1066", 0}, {"INTC10A4", 0}, {"INTC10A5", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3407_device_ids); static struct platform_driver dptf_power_driver = { .probe = dptf_power_add, .remove = dptf_power_remove, .driver = { .name = "dptf_power", .acpi_match_table = int3407_device_ids, }, }; module_platform_driver(dptf_power_driver); MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("ACPI DPTF platform power driver");
linux-master
drivers/acpi/dptf/dptf_power.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2022-2023, Ventana Micro Systems Inc * Author: Sunil V L <[email protected]> * */ #define pr_fmt(fmt) "ACPI: RHCT: " fmt #include <linux/acpi.h> static struct acpi_table_header *acpi_get_rhct(void) { static struct acpi_table_header *rhct; acpi_status status; /* * RHCT will be used at runtime on every CPU, so we * don't need to call acpi_put_table() to release the table mapping. */ if (!rhct) { status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct); if (ACPI_FAILURE(status)) { pr_warn_once("No RHCT table found\n"); return NULL; } } return rhct; } /* * During early boot, the caller should call acpi_get_table() and pass its pointer to * these functions(and free up later). At run time, since this table can be used * multiple times, NULL may be passed in order to use the cached table. */ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const char **isa) { struct acpi_rhct_node_header *node, *ref_node, *end; u32 size_hdr = sizeof(struct acpi_rhct_node_header); u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info); struct acpi_rhct_hart_info *hart_info; struct acpi_rhct_isa_string *isa_node; struct acpi_table_rhct *rhct; u32 *hart_info_node_offset; u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); BUG_ON(acpi_disabled); if (!table) { rhct = (struct acpi_table_rhct *)acpi_get_rhct(); if (!rhct) return -ENOENT; } else { rhct = (struct acpi_table_rhct *)table; } end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length); for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset); node < end; node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) { if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) { hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr); hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo); if (acpi_cpu_id != hart_info->uid) continue; for (int i = 0; i < hart_info->num_offsets; i++) { ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, hart_info_node_offset[i]); if (ref_node->type == ACPI_RHCT_NODE_TYPE_ISA_STRING) { isa_node = ACPI_ADD_PTR(struct acpi_rhct_isa_string, ref_node, size_hdr); *isa = isa_node->isa; return 0; } } } } return -1; }
linux-master
drivers/acpi/riscv/rhct.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi_numa.c - ACPI NUMA support * * Copyright (C) 2002 Takayoshi Kochi <[email protected]> */ #define pr_fmt(fmt) "ACPI: " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/acpi.h> #include <linux/memblock.h> #include <linux/numa.h> #include <linux/nodemask.h> #include <linux/topology.h> static nodemask_t nodes_found_map = NODE_MASK_NONE; /* maps to convert between proximity domain and logical node ID */ static int pxm_to_node_map[MAX_PXM_DOMAINS] = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE }; static int node_to_pxm_map[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; unsigned char acpi_srat_revision __initdata; static int acpi_numa __initdata; void __init disable_srat(void) { acpi_numa = -1; } int pxm_to_node(int pxm) { if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off) return NUMA_NO_NODE; return pxm_to_node_map[pxm]; } EXPORT_SYMBOL(pxm_to_node); int node_to_pxm(int node) { if (node < 0) return PXM_INVAL; return node_to_pxm_map[node]; } static void __acpi_map_pxm_to_node(int pxm, int node) { if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm]) pxm_to_node_map[pxm] = node; if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node]) node_to_pxm_map[node] = pxm; } int acpi_map_pxm_to_node(int pxm) { int node; if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off) return NUMA_NO_NODE; node = pxm_to_node_map[pxm]; if (node == NUMA_NO_NODE) { if (nodes_weight(nodes_found_map) >= MAX_NUMNODES) return NUMA_NO_NODE; node = first_unset_node(nodes_found_map); __acpi_map_pxm_to_node(pxm, node); node_set(node, nodes_found_map); } return node; } EXPORT_SYMBOL(acpi_map_pxm_to_node); static void __init acpi_table_print_srat_entry(struct acpi_subtable_header *header) { switch (header->type) { case ACPI_SRAT_TYPE_CPU_AFFINITY: { struct acpi_srat_cpu_affinity *p = (struct acpi_srat_cpu_affinity *)header; pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", p->apic_id, p->local_sapic_eid, p->proximity_domain_lo, (p->flags & ACPI_SRAT_CPU_ENABLED) ? "enabled" : "disabled"); } break; case ACPI_SRAT_TYPE_MEMORY_AFFINITY: { struct acpi_srat_mem_affinity *p = (struct acpi_srat_mem_affinity *)header; pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n", (unsigned long long)p->base_address, (unsigned long long)p->length, p->proximity_domain, (p->flags & ACPI_SRAT_MEM_ENABLED) ? "enabled" : "disabled", (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ? " hot-pluggable" : "", (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ? " non-volatile" : ""); } break; case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: { struct acpi_srat_x2apic_cpu_affinity *p = (struct acpi_srat_x2apic_cpu_affinity *)header; pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n", p->apic_id, p->proximity_domain, (p->flags & ACPI_SRAT_CPU_ENABLED) ? "enabled" : "disabled"); } break; case ACPI_SRAT_TYPE_GICC_AFFINITY: { struct acpi_srat_gicc_affinity *p = (struct acpi_srat_gicc_affinity *)header; pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", p->acpi_processor_uid, p->proximity_domain, (p->flags & ACPI_SRAT_GICC_ENABLED) ? "enabled" : "disabled"); } break; case ACPI_SRAT_TYPE_GENERIC_AFFINITY: { struct acpi_srat_generic_affinity *p = (struct acpi_srat_generic_affinity *)header; if (p->device_handle_type == 0) { /* * For pci devices this may be the only place they * are assigned a proximity domain */ pr_debug("SRAT Generic Initiator(Seg:%u BDF:%u) in proximity domain %d %s\n", *(u16 *)(&p->device_handle[0]), *(u16 *)(&p->device_handle[2]), p->proximity_domain, (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ? "enabled" : "disabled"); } else { /* * In this case we can rely on the device having a * proximity domain reference */ pr_debug("SRAT Generic Initiator(HID=%.8s UID=%.4s) in proximity domain %d %s\n", (char *)(&p->device_handle[0]), (char *)(&p->device_handle[8]), p->proximity_domain, (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ? "enabled" : "disabled"); } } break; default: pr_warn("Found unsupported SRAT entry (type = 0x%x)\n", header->type); break; } } /* * A lot of BIOS fill in 10 (= no distance) everywhere. This messes * up the NUMA heuristics which wants the local node to have a smaller * distance than the others. * Do some quick checks here and only use the SLIT if it passes. */ static int __init slit_valid(struct acpi_table_slit *slit) { int i, j; int d = slit->locality_count; for (i = 0; i < d; i++) { for (j = 0; j < d; j++) { u8 val = slit->entry[d*i + j]; if (i == j) { if (val != LOCAL_DISTANCE) return 0; } else if (val <= LOCAL_DISTANCE) return 0; } } return 1; } void __init bad_srat(void) { pr_err("SRAT: SRAT not used.\n"); disable_srat(); } int __init srat_disabled(void) { return acpi_numa < 0; } #if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) /* * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for * I/O localities since SRAT does not list them. I/O localities are * not supported at this point. */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { int i, j; for (i = 0; i < slit->locality_count; i++) { const int from_node = pxm_to_node(i); if (from_node == NUMA_NO_NODE) continue; for (j = 0; j < slit->locality_count; j++) { const int to_node = pxm_to_node(j); if (to_node == NUMA_NO_NODE) continue; numa_set_distance(from_node, to_node, slit->entry[slit->locality_count * i + j]); } } } /* * Default callback for parsing of the Proximity Domain <-> Memory * Area mappings */ int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { u64 start, end; u32 hotpluggable; int node, pxm; if (srat_disabled()) goto out_err; if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) { pr_err("SRAT: Unexpected header length: %d\n", ma->header.length); goto out_err_bad_srat; } if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) goto out_err; hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE); start = ma->base_address; end = start + ma->length; pxm = ma->proximity_domain; if (acpi_srat_revision <= 1) pxm &= 0xff; node = acpi_map_pxm_to_node(pxm); if (node == NUMA_NO_NODE) { pr_err("SRAT: Too many proximity domains.\n"); goto out_err_bad_srat; } if (numa_add_memblk(node, start, end) < 0) { pr_err("SRAT: Failed to add memblk to node %u [mem %#010Lx-%#010Lx]\n", node, (unsigned long long) start, (unsigned long long) end - 1); goto out_err_bad_srat; } node_set(node, numa_nodes_parsed); pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n", node, pxm, (unsigned long long) start, (unsigned long long) end - 1, hotpluggable ? " hotplug" : "", ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : ""); /* Mark hotplug range in memblock. */ if (hotpluggable && memblock_mark_hotplug(start, ma->length)) pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n", (unsigned long long)start, (unsigned long long)end - 1); max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1)); return 0; out_err_bad_srat: bad_srat(); out_err: return -EINVAL; } static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, void *arg, const unsigned long table_end) { struct acpi_cedt_cfmws *cfmws; int *fake_pxm = arg; u64 start, end; int node; cfmws = (struct acpi_cedt_cfmws *)header; start = cfmws->base_hpa; end = cfmws->base_hpa + cfmws->window_size; /* Skip if the SRAT already described the NUMA details for this HPA */ node = phys_to_target_node(start); if (node != NUMA_NO_NODE) return 0; node = acpi_map_pxm_to_node(*fake_pxm); if (node == NUMA_NO_NODE) { pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n"); return -EINVAL; } if (numa_add_memblk(node, start, end) < 0) { /* CXL driver must handle the NUMA_NO_NODE case */ pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n", node, start, end); } node_set(node, numa_nodes_parsed); /* Set the next available fake_pxm value */ (*fake_pxm)++; return 0; } #else static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, void *arg, const unsigned long table_end) { return 0; } #endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */ static int __init acpi_parse_slit(struct acpi_table_header *table) { struct acpi_table_slit *slit = (struct acpi_table_slit *)table; if (!slit_valid(slit)) { pr_info("SLIT table looks invalid. Not used.\n"); return -EINVAL; } acpi_numa_slit_init(slit); return 0; } void __init __weak acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) { pr_warn("Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id); } static int __init acpi_parse_x2apic_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_x2apic_cpu_affinity *processor_affinity; processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header; acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ acpi_numa_x2apic_affinity_init(processor_affinity); return 0; } static int __init acpi_parse_processor_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_cpu_affinity *processor_affinity; processor_affinity = (struct acpi_srat_cpu_affinity *)header; acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ acpi_numa_processor_affinity_init(processor_affinity); return 0; } static int __init acpi_parse_gicc_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_gicc_affinity *processor_affinity; processor_affinity = (struct acpi_srat_gicc_affinity *)header; acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ acpi_numa_gicc_affinity_init(processor_affinity); return 0; } #if defined(CONFIG_X86) || defined(CONFIG_ARM64) static int __init acpi_parse_gi_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_generic_affinity *gi_affinity; int node; gi_affinity = (struct acpi_srat_generic_affinity *)header; if (!gi_affinity) return -EINVAL; acpi_table_print_srat_entry(&header->common); if (!(gi_affinity->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED)) return -EINVAL; node = acpi_map_pxm_to_node(gi_affinity->proximity_domain); if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { pr_err("SRAT: Too many proximity domains.\n"); return -EINVAL; } node_set(node, numa_nodes_parsed); node_set_state(node, N_GENERIC_INITIATOR); return 0; } #else static int __init acpi_parse_gi_affinity(union acpi_subtable_headers *header, const unsigned long end) { return 0; } #endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */ static int __initdata parsed_numa_memblks; static int __init acpi_parse_memory_affinity(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_srat_mem_affinity *memory_affinity; memory_affinity = (struct acpi_srat_mem_affinity *)header; acpi_table_print_srat_entry(&header->common); /* let architecture-dependent part to do it */ if (!acpi_numa_memory_affinity_init(memory_affinity)) parsed_numa_memblks++; return 0; } static int __init acpi_parse_srat(struct acpi_table_header *table) { struct acpi_table_srat *srat = (struct acpi_table_srat *)table; acpi_srat_revision = srat->header.revision; /* Real work done in acpi_table_parse_srat below. */ return 0; } static int __init acpi_table_parse_srat(enum acpi_srat_type id, acpi_tbl_entry_handler handler, unsigned int max_entries) { return acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), id, handler, max_entries); } int __init acpi_numa_init(void) { int i, fake_pxm, cnt = 0; if (acpi_disabled) return -EINVAL; /* * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= * SRAT cpu entries could have different order with that in MADT. * So go over all cpu entries in SRAT to get apicid to node mapping. */ /* SRAT: System Resource Affinity Table */ if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { struct acpi_subtable_proc srat_proc[4]; memset(srat_proc, 0, sizeof(srat_proc)); srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY; srat_proc[0].handler = acpi_parse_processor_affinity; srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY; srat_proc[1].handler = acpi_parse_x2apic_affinity; srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY; srat_proc[2].handler = acpi_parse_gicc_affinity; srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY; srat_proc[3].handler = acpi_parse_gi_affinity; acpi_table_parse_entries_array(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), srat_proc, ARRAY_SIZE(srat_proc), 0); cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, acpi_parse_memory_affinity, 0); } /* SLIT: System Locality Information Table */ acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); /* * CXL Fixed Memory Window Structures (CFMWS) must be parsed * after the SRAT. Create NUMA Nodes for CXL memory ranges that * are defined in the CFMWS and not already defined in the SRAT. * Initialize a fake_pxm as the first available PXM to emulate. */ /* fake_pxm is the next unused PXM value after SRAT parsing */ for (i = 0, fake_pxm = -1; i < MAX_NUMNODES - 1; i++) { if (node_to_pxm_map[i] > fake_pxm) fake_pxm = node_to_pxm_map[i]; } fake_pxm++; acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws, &fake_pxm); if (cnt < 0) return cnt; else if (!parsed_numa_memblks) return -ENOENT; return 0; } static int acpi_get_pxm(acpi_handle h) { unsigned long long pxm; acpi_status status; acpi_handle handle; acpi_handle phandle = h; do { handle = phandle; status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); if (ACPI_SUCCESS(status)) return pxm; status = acpi_get_parent(handle, &phandle); } while (ACPI_SUCCESS(status)); return -1; } int acpi_get_node(acpi_handle handle) { int pxm; pxm = acpi_get_pxm(handle); return pxm_to_node(pxm); } EXPORT_SYMBOL(acpi_get_node);
linux-master
drivers/acpi/numa/srat.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2019, Intel Corporation. * * Heterogeneous Memory Attributes Table (HMAT) representation * * This program parses and reports the platform's HMAT tables, and registers * the applicable attributes with the node's interfaces. */ #define pr_fmt(fmt) "acpi/hmat: " fmt #include <linux/acpi.h> #include <linux/bitops.h> #include <linux/device.h> #include <linux/init.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/platform_device.h> #include <linux/list_sort.h> #include <linux/memregion.h> #include <linux/memory.h> #include <linux/mutex.h> #include <linux/node.h> #include <linux/sysfs.h> #include <linux/dax.h> static u8 hmat_revision; static int hmat_disable __initdata; void __init disable_hmat(void) { hmat_disable = 1; } static LIST_HEAD(targets); static LIST_HEAD(initiators); static LIST_HEAD(localities); static DEFINE_MUTEX(target_lock); /* * The defined enum order is used to prioritize attributes to break ties when * selecting the best performing node. */ enum locality_types { WRITE_LATENCY, READ_LATENCY, WRITE_BANDWIDTH, READ_BANDWIDTH, }; static struct memory_locality *localities_types[4]; struct target_cache { struct list_head node; struct node_cache_attrs cache_attrs; }; struct memory_target { struct list_head node; unsigned int memory_pxm; unsigned int processor_pxm; struct resource memregions; struct node_hmem_attrs hmem_attrs[2]; struct list_head caches; struct node_cache_attrs cache_attrs; bool registered; }; struct memory_initiator { struct list_head node; unsigned int processor_pxm; bool has_cpu; }; struct memory_locality { struct list_head node; struct acpi_hmat_locality *hmat_loc; }; static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm) { struct memory_initiator *initiator; list_for_each_entry(initiator, &initiators, node) if (initiator->processor_pxm == cpu_pxm) return initiator; return NULL; } static struct memory_target *find_mem_target(unsigned int mem_pxm) { struct memory_target *target; list_for_each_entry(target, &targets, node) if (target->memory_pxm == mem_pxm) return target; return NULL; } static __init void alloc_memory_initiator(unsigned int cpu_pxm) { struct memory_initiator *initiator; if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE) return; initiator = find_mem_initiator(cpu_pxm); if (initiator) return; initiator = kzalloc(sizeof(*initiator), GFP_KERNEL); if (!initiator) return; initiator->processor_pxm = cpu_pxm; initiator->has_cpu = node_state(pxm_to_node(cpu_pxm), N_CPU); list_add_tail(&initiator->node, &initiators); } static __init void alloc_memory_target(unsigned int mem_pxm, resource_size_t start, resource_size_t len) { struct memory_target *target; target = find_mem_target(mem_pxm); if (!target) { target = kzalloc(sizeof(*target), GFP_KERNEL); if (!target) return; target->memory_pxm = mem_pxm; target->processor_pxm = PXM_INVAL; target->memregions = (struct resource) { .name = "ACPI mem", .start = 0, .end = -1, .flags = IORESOURCE_MEM, }; list_add_tail(&target->node, &targets); INIT_LIST_HEAD(&target->caches); } /* * There are potentially multiple ranges per PXM, so record each * in the per-target memregions resource tree. */ if (!__request_region(&target->memregions, start, len, "memory target", IORESOURCE_MEM)) pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n", start, start + len, mem_pxm); } static __init const char *hmat_data_type(u8 type) { switch (type) { case ACPI_HMAT_ACCESS_LATENCY: return "Access Latency"; case ACPI_HMAT_READ_LATENCY: return "Read Latency"; case ACPI_HMAT_WRITE_LATENCY: return "Write Latency"; case ACPI_HMAT_ACCESS_BANDWIDTH: return "Access Bandwidth"; case ACPI_HMAT_READ_BANDWIDTH: return "Read Bandwidth"; case ACPI_HMAT_WRITE_BANDWIDTH: return "Write Bandwidth"; default: return "Reserved"; } } static __init const char *hmat_data_type_suffix(u8 type) { switch (type) { case ACPI_HMAT_ACCESS_LATENCY: case ACPI_HMAT_READ_LATENCY: case ACPI_HMAT_WRITE_LATENCY: return " nsec"; case ACPI_HMAT_ACCESS_BANDWIDTH: case ACPI_HMAT_READ_BANDWIDTH: case ACPI_HMAT_WRITE_BANDWIDTH: return " MB/s"; default: return ""; } } static u32 hmat_normalize(u16 entry, u64 base, u8 type) { u32 value; /* * Check for invalid and overflow values */ if (entry == 0xffff || !entry) return 0; else if (base > (UINT_MAX / (entry))) return 0; /* * Divide by the base unit for version 1, convert latency from * picosenonds to nanoseconds if revision 2. */ value = entry * base; if (hmat_revision == 1) { if (value < 10) return 0; value = DIV_ROUND_UP(value, 10); } else if (hmat_revision == 2) { switch (type) { case ACPI_HMAT_ACCESS_LATENCY: case ACPI_HMAT_READ_LATENCY: case ACPI_HMAT_WRITE_LATENCY: value = DIV_ROUND_UP(value, 1000); break; default: break; } } return value; } static void hmat_update_target_access(struct memory_target *target, u8 type, u32 value, int access) { switch (type) { case ACPI_HMAT_ACCESS_LATENCY: target->hmem_attrs[access].read_latency = value; target->hmem_attrs[access].write_latency = value; break; case ACPI_HMAT_READ_LATENCY: target->hmem_attrs[access].read_latency = value; break; case ACPI_HMAT_WRITE_LATENCY: target->hmem_attrs[access].write_latency = value; break; case ACPI_HMAT_ACCESS_BANDWIDTH: target->hmem_attrs[access].read_bandwidth = value; target->hmem_attrs[access].write_bandwidth = value; break; case ACPI_HMAT_READ_BANDWIDTH: target->hmem_attrs[access].read_bandwidth = value; break; case ACPI_HMAT_WRITE_BANDWIDTH: target->hmem_attrs[access].write_bandwidth = value; break; default: break; } } static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc) { struct memory_locality *loc; loc = kzalloc(sizeof(*loc), GFP_KERNEL); if (!loc) { pr_notice_once("Failed to allocate HMAT locality\n"); return; } loc->hmat_loc = hmat_loc; list_add_tail(&loc->node, &localities); switch (hmat_loc->data_type) { case ACPI_HMAT_ACCESS_LATENCY: localities_types[READ_LATENCY] = loc; localities_types[WRITE_LATENCY] = loc; break; case ACPI_HMAT_READ_LATENCY: localities_types[READ_LATENCY] = loc; break; case ACPI_HMAT_WRITE_LATENCY: localities_types[WRITE_LATENCY] = loc; break; case ACPI_HMAT_ACCESS_BANDWIDTH: localities_types[READ_BANDWIDTH] = loc; localities_types[WRITE_BANDWIDTH] = loc; break; case ACPI_HMAT_READ_BANDWIDTH: localities_types[READ_BANDWIDTH] = loc; break; case ACPI_HMAT_WRITE_BANDWIDTH: localities_types[WRITE_BANDWIDTH] = loc; break; default: break; } } static __init int hmat_parse_locality(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_hmat_locality *hmat_loc = (void *)header; struct memory_target *target; unsigned int init, targ, total_size, ipds, tpds; u32 *inits, *targs, value; u16 *entries; u8 type, mem_hier; if (hmat_loc->header.length < sizeof(*hmat_loc)) { pr_notice("Unexpected locality header length: %u\n", hmat_loc->header.length); return -EINVAL; } type = hmat_loc->data_type; mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY; ipds = hmat_loc->number_of_initiator_Pds; tpds = hmat_loc->number_of_target_Pds; total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds + sizeof(*inits) * ipds + sizeof(*targs) * tpds; if (hmat_loc->header.length < total_size) { pr_notice("Unexpected locality header length:%u, minimum required:%u\n", hmat_loc->header.length, total_size); return -EINVAL; } pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n", hmat_loc->flags, hmat_data_type(type), ipds, tpds, hmat_loc->entry_base_unit); inits = (u32 *)(hmat_loc + 1); targs = inits + ipds; entries = (u16 *)(targs + tpds); for (init = 0; init < ipds; init++) { alloc_memory_initiator(inits[init]); for (targ = 0; targ < tpds; targ++) { value = hmat_normalize(entries[init * tpds + targ], hmat_loc->entry_base_unit, type); pr_info(" Initiator-Target[%u-%u]:%u%s\n", inits[init], targs[targ], value, hmat_data_type_suffix(type)); if (mem_hier == ACPI_HMAT_MEMORY) { target = find_mem_target(targs[targ]); if (target && target->processor_pxm == inits[init]) { hmat_update_target_access(target, type, value, 0); /* If the node has a CPU, update access 1 */ if (node_state(pxm_to_node(inits[init]), N_CPU)) hmat_update_target_access(target, type, value, 1); } } } } if (mem_hier == ACPI_HMAT_MEMORY) hmat_add_locality(hmat_loc); return 0; } static __init int hmat_parse_cache(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_hmat_cache *cache = (void *)header; struct memory_target *target; struct target_cache *tcache; u32 attrs; if (cache->header.length < sizeof(*cache)) { pr_notice("Unexpected cache header length: %u\n", cache->header.length); return -EINVAL; } attrs = cache->cache_attributes; pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n", cache->memory_PD, cache->cache_size, attrs, cache->number_of_SMBIOShandles); target = find_mem_target(cache->memory_PD); if (!target) return 0; tcache = kzalloc(sizeof(*tcache), GFP_KERNEL); if (!tcache) { pr_notice_once("Failed to allocate HMAT cache info\n"); return 0; } tcache->cache_attrs.size = cache->cache_size; tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4; tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16; switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) { case ACPI_HMAT_CA_DIRECT_MAPPED: tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP; break; case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING: tcache->cache_attrs.indexing = NODE_CACHE_INDEXED; break; case ACPI_HMAT_CA_NONE: default: tcache->cache_attrs.indexing = NODE_CACHE_OTHER; break; } switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) { case ACPI_HMAT_CP_WB: tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK; break; case ACPI_HMAT_CP_WT: tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH; break; case ACPI_HMAT_CP_NONE: default: tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER; break; } list_add_tail(&tcache->node, &target->caches); return 0; } static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_hmat_proximity_domain *p = (void *)header; struct memory_target *target = NULL; if (p->header.length != sizeof(*p)) { pr_notice("Unexpected address range header length: %u\n", p->header.length); return -EINVAL; } if (hmat_revision == 1) pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n", p->reserved3, p->reserved4, p->flags, p->processor_PD, p->memory_PD); else pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n", p->flags, p->processor_PD, p->memory_PD); if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) || hmat_revision > 1) { target = find_mem_target(p->memory_PD); if (!target) { pr_debug("Memory Domain missing from SRAT\n"); return -EINVAL; } } if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) { int p_node = pxm_to_node(p->processor_PD); if (p_node == NUMA_NO_NODE) { pr_debug("Invalid Processor Domain\n"); return -EINVAL; } target->processor_pxm = p->processor_PD; } return 0; } static int __init hmat_parse_subtable(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_hmat_structure *hdr = (void *)header; if (!hdr) return -EINVAL; switch (hdr->type) { case ACPI_HMAT_TYPE_PROXIMITY: return hmat_parse_proximity_domain(header, end); case ACPI_HMAT_TYPE_LOCALITY: return hmat_parse_locality(header, end); case ACPI_HMAT_TYPE_CACHE: return hmat_parse_cache(header, end); default: return -EINVAL; } } static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_srat_mem_affinity *ma = (void *)header; if (!ma) return -EINVAL; if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) return 0; alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length); return 0; } static u32 hmat_initiator_perf(struct memory_target *target, struct memory_initiator *initiator, struct acpi_hmat_locality *hmat_loc) { unsigned int ipds, tpds, i, idx = 0, tdx = 0; u32 *inits, *targs; u16 *entries; ipds = hmat_loc->number_of_initiator_Pds; tpds = hmat_loc->number_of_target_Pds; inits = (u32 *)(hmat_loc + 1); targs = inits + ipds; entries = (u16 *)(targs + tpds); for (i = 0; i < ipds; i++) { if (inits[i] == initiator->processor_pxm) { idx = i; break; } } if (i == ipds) return 0; for (i = 0; i < tpds; i++) { if (targs[i] == target->memory_pxm) { tdx = i; break; } } if (i == tpds) return 0; return hmat_normalize(entries[idx * tpds + tdx], hmat_loc->entry_base_unit, hmat_loc->data_type); } static bool hmat_update_best(u8 type, u32 value, u32 *best) { bool updated = false; if (!value) return false; switch (type) { case ACPI_HMAT_ACCESS_LATENCY: case ACPI_HMAT_READ_LATENCY: case ACPI_HMAT_WRITE_LATENCY: if (!*best || *best > value) { *best = value; updated = true; } break; case ACPI_HMAT_ACCESS_BANDWIDTH: case ACPI_HMAT_READ_BANDWIDTH: case ACPI_HMAT_WRITE_BANDWIDTH: if (!*best || *best < value) { *best = value; updated = true; } break; } return updated; } static int initiator_cmp(void *priv, const struct list_head *a, const struct list_head *b) { struct memory_initiator *ia; struct memory_initiator *ib; ia = list_entry(a, struct memory_initiator, node); ib = list_entry(b, struct memory_initiator, node); return ia->processor_pxm - ib->processor_pxm; } static int initiators_to_nodemask(unsigned long *p_nodes) { struct memory_initiator *initiator; if (list_empty(&initiators)) return -ENXIO; list_for_each_entry(initiator, &initiators, node) set_bit(initiator->processor_pxm, p_nodes); return 0; } static void hmat_register_target_initiators(struct memory_target *target) { static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); struct memory_initiator *initiator; unsigned int mem_nid, cpu_nid; struct memory_locality *loc = NULL; u32 best = 0; bool access0done = false; int i; mem_nid = pxm_to_node(target->memory_pxm); /* * If the Address Range Structure provides a local processor pxm, link * only that one. Otherwise, find the best performance attributes and * register all initiators that match. */ if (target->processor_pxm != PXM_INVAL) { cpu_nid = pxm_to_node(target->processor_pxm); register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); access0done = true; if (node_state(cpu_nid, N_CPU)) { register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); return; } } if (list_empty(&localities)) return; /* * We need the initiator list sorted so we can use bitmap_clear for * previously set initiators when we find a better memory accessor. * We'll also use the sorting to prime the candidate nodes with known * initiators. */ bitmap_zero(p_nodes, MAX_NUMNODES); list_sort(NULL, &initiators, initiator_cmp); if (initiators_to_nodemask(p_nodes) < 0) return; if (!access0done) { for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { loc = localities_types[i]; if (!loc) continue; best = 0; list_for_each_entry(initiator, &initiators, node) { u32 value; if (!test_bit(initiator->processor_pxm, p_nodes)) continue; value = hmat_initiator_perf(target, initiator, loc->hmat_loc); if (hmat_update_best(loc->hmat_loc->data_type, value, &best)) bitmap_clear(p_nodes, 0, initiator->processor_pxm); if (value != best) clear_bit(initiator->processor_pxm, p_nodes); } if (best) hmat_update_target_access(target, loc->hmat_loc->data_type, best, 0); } for_each_set_bit(i, p_nodes, MAX_NUMNODES) { cpu_nid = pxm_to_node(i); register_memory_node_under_compute_node(mem_nid, cpu_nid, 0); } } /* Access 1 ignores Generic Initiators */ bitmap_zero(p_nodes, MAX_NUMNODES); if (initiators_to_nodemask(p_nodes) < 0) return; for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { loc = localities_types[i]; if (!loc) continue; best = 0; list_for_each_entry(initiator, &initiators, node) { u32 value; if (!initiator->has_cpu) { clear_bit(initiator->processor_pxm, p_nodes); continue; } if (!test_bit(initiator->processor_pxm, p_nodes)) continue; value = hmat_initiator_perf(target, initiator, loc->hmat_loc); if (hmat_update_best(loc->hmat_loc->data_type, value, &best)) bitmap_clear(p_nodes, 0, initiator->processor_pxm); if (value != best) clear_bit(initiator->processor_pxm, p_nodes); } if (best) hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1); } for_each_set_bit(i, p_nodes, MAX_NUMNODES) { cpu_nid = pxm_to_node(i); register_memory_node_under_compute_node(mem_nid, cpu_nid, 1); } } static void hmat_register_target_cache(struct memory_target *target) { unsigned mem_nid = pxm_to_node(target->memory_pxm); struct target_cache *tcache; list_for_each_entry(tcache, &target->caches, node) node_add_cache(mem_nid, &tcache->cache_attrs); } static void hmat_register_target_perf(struct memory_target *target, int access) { unsigned mem_nid = pxm_to_node(target->memory_pxm); node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access); } static void hmat_register_target_devices(struct memory_target *target) { struct resource *res; /* * Do not bother creating devices if no driver is available to * consume them. */ if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM)) return; for (res = target->memregions.child; res; res = res->sibling) { int target_nid = pxm_to_node(target->memory_pxm); hmem_register_resource(target_nid, res); } } static void hmat_register_target(struct memory_target *target) { int nid = pxm_to_node(target->memory_pxm); /* * Devices may belong to either an offline or online * node, so unconditionally add them. */ hmat_register_target_devices(target); /* * Skip offline nodes. This can happen when memory * marked EFI_MEMORY_SP, "specific purpose", is applied * to all the memory in a proximity domain leading to * the node being marked offline / unplugged, or if * memory-only "hotplug" node is offline. */ if (nid == NUMA_NO_NODE || !node_online(nid)) return; mutex_lock(&target_lock); if (!target->registered) { hmat_register_target_initiators(target); hmat_register_target_cache(target); hmat_register_target_perf(target, 0); hmat_register_target_perf(target, 1); target->registered = true; } mutex_unlock(&target_lock); } static void hmat_register_targets(void) { struct memory_target *target; list_for_each_entry(target, &targets, node) hmat_register_target(target); } static int hmat_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_target *target; struct memory_notify *mnb = arg; int pxm, nid = mnb->status_change_nid; if (nid == NUMA_NO_NODE || action != MEM_ONLINE) return NOTIFY_OK; pxm = node_to_pxm(nid); target = find_mem_target(pxm); if (!target) return NOTIFY_OK; hmat_register_target(target); return NOTIFY_OK; } static __init void hmat_free_structures(void) { struct memory_target *target, *tnext; struct memory_locality *loc, *lnext; struct memory_initiator *initiator, *inext; struct target_cache *tcache, *cnext; list_for_each_entry_safe(target, tnext, &targets, node) { struct resource *res, *res_next; list_for_each_entry_safe(tcache, cnext, &target->caches, node) { list_del(&tcache->node); kfree(tcache); } list_del(&target->node); res = target->memregions.child; while (res) { res_next = res->sibling; __release_region(&target->memregions, res->start, resource_size(res)); res = res_next; } kfree(target); } list_for_each_entry_safe(initiator, inext, &initiators, node) { list_del(&initiator->node); kfree(initiator); } list_for_each_entry_safe(loc, lnext, &localities, node) { list_del(&loc->node); kfree(loc); } } static __init int hmat_init(void) { struct acpi_table_header *tbl; enum acpi_hmat_type i; acpi_status status; if (srat_disabled() || hmat_disable) return 0; status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl); if (ACPI_FAILURE(status)) return 0; if (acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), ACPI_SRAT_TYPE_MEMORY_AFFINITY, srat_parse_mem_affinity, 0) < 0) goto out_put; acpi_put_table(tbl); status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl); if (ACPI_FAILURE(status)) goto out_put; hmat_revision = tbl->revision; switch (hmat_revision) { case 1: case 2: break; default: pr_notice("Ignoring: Unknown revision:%d\n", hmat_revision); goto out_put; } for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) { if (acpi_table_parse_entries(ACPI_SIG_HMAT, sizeof(struct acpi_table_hmat), i, hmat_parse_subtable, 0) < 0) { pr_notice("Ignoring: Invalid table"); goto out_put; } } hmat_register_targets(); /* Keep the table and structures if the notifier may use them */ if (!hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI)) return 0; out_put: hmat_free_structures(); acpi_put_table(tbl); return 0; } subsys_initcall(hmat_init);
linux-master
drivers/acpi/numa/hmat.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Cherry Trail Crystal Cove PMIC operation region driver * * Copyright (C) 2019 Hans de Goede <[email protected]> */ #include <linux/acpi.h> #include <linux/init.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "intel_pmic.h" /* * We have no docs for the CHT Crystal Cove PMIC. The Asus Zenfone-2 kernel * code has 2 Crystal Cove regulator drivers, one calls the PMIC a "Crystal * Cove Plus" PMIC and talks about Cherry Trail, so presumably that one * could be used to get register info for the regulators if we need to * implement regulator support in the future. * * For now the sole purpose of this driver is to make * intel_soc_pmic_exec_mipi_pmic_seq_element work on devices with a * CHT Crystal Cove PMIC. */ static const struct intel_pmic_opregion_data intel_chtcrc_pmic_opregion_data = { .lpat_raw_to_temp = acpi_lpat_raw_to_temp, .pmic_i2c_address = 0x6e, }; static int intel_chtcrc_pmic_opregion_probe(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); return intel_pmic_install_opregion_handler(&pdev->dev, ACPI_HANDLE(pdev->dev.parent), pmic->regmap, &intel_chtcrc_pmic_opregion_data); } static struct platform_driver intel_chtcrc_pmic_opregion_driver = { .probe = intel_chtcrc_pmic_opregion_probe, .driver = { .name = "cht_crystal_cove_pmic", }, }; builtin_platform_driver(intel_chtcrc_pmic_opregion_driver);
linux-master
drivers/acpi/pmic/intel_pmic_chtcrc.c
// SPDX-License-Identifier: GPL-2.0 /* * XPower AXP288 PMIC operation region driver * * Copyright (C) 2014 Intel Corporation. All rights reserved. */ #include <linux/acpi.h> #include <linux/init.h> #include <linux/mfd/axp20x.h> #include <linux/regmap.h> #include <linux/platform_device.h> #include <asm/iosf_mbi.h> #include "intel_pmic.h" #define XPOWER_GPADC_LOW 0x5b #define XPOWER_GPI1_CTRL 0x92 #define GPI1_LDO_MASK GENMASK(2, 0) #define GPI1_LDO_ON (3 << 0) #define GPI1_LDO_OFF (4 << 0) #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) #define AXP288_ADC_TS_CURRENT_OFF (0 << 0) #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) #define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) #define AXP288_ADC_TS_CURRENT_ON (3 << 0) static struct pmic_table power_table[] = { { .address = 0x00, .reg = 0x13, .bit = 0x05, }, /* ALD1 */ { .address = 0x04, .reg = 0x13, .bit = 0x06, }, /* ALD2 */ { .address = 0x08, .reg = 0x13, .bit = 0x07, }, /* ALD3 */ { .address = 0x0c, .reg = 0x12, .bit = 0x03, }, /* DLD1 */ { .address = 0x10, .reg = 0x12, .bit = 0x04, }, /* DLD2 */ { .address = 0x14, .reg = 0x12, .bit = 0x05, }, /* DLD3 */ { .address = 0x18, .reg = 0x12, .bit = 0x06, }, /* DLD4 */ { .address = 0x1c, .reg = 0x12, .bit = 0x00, }, /* ELD1 */ { .address = 0x20, .reg = 0x12, .bit = 0x01, }, /* ELD2 */ { .address = 0x24, .reg = 0x12, .bit = 0x02, }, /* ELD3 */ { .address = 0x28, .reg = 0x13, .bit = 0x02, }, /* FLD1 */ { .address = 0x2c, .reg = 0x13, .bit = 0x03, }, /* FLD2 */ { .address = 0x30, .reg = 0x13, .bit = 0x04, }, /* FLD3 */ { .address = 0x34, .reg = 0x10, .bit = 0x03, }, /* BUC1 */ { .address = 0x38, .reg = 0x10, .bit = 0x06, }, /* BUC2 */ { .address = 0x3c, .reg = 0x10, .bit = 0x05, }, /* BUC3 */ { .address = 0x40, .reg = 0x10, .bit = 0x04, }, /* BUC4 */ { .address = 0x44, .reg = 0x10, .bit = 0x01, }, /* BUC5 */ { .address = 0x48, .reg = 0x10, .bit = 0x00 }, /* BUC6 */ { .address = 0x4c, .reg = 0x92, }, /* GPI1 */ }; /* TMP0 - TMP5 are the same, all from GPADC */ static struct pmic_table thermal_table[] = { { .address = 0x00, .reg = XPOWER_GPADC_LOW }, { .address = 0x0c, .reg = XPOWER_GPADC_LOW }, { .address = 0x18, .reg = XPOWER_GPADC_LOW }, { .address = 0x24, .reg = XPOWER_GPADC_LOW }, { .address = 0x30, .reg = XPOWER_GPADC_LOW }, { .address = 0x3c, .reg = XPOWER_GPADC_LOW }, }; static int intel_xpower_pmic_get_power(struct regmap *regmap, int reg, int bit, u64 *value) { int data; if (regmap_read(regmap, reg, &data)) return -EIO; /* GPIO1 LDO regulator needs special handling */ if (reg == XPOWER_GPI1_CTRL) *value = ((data & GPI1_LDO_MASK) == GPI1_LDO_ON); else *value = (data & BIT(bit)) ? 1 : 0; return 0; } static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg, int bit, bool on) { int data, ret; ret = iosf_mbi_block_punit_i2c_access(); if (ret) return ret; /* GPIO1 LDO regulator needs special handling */ if (reg == XPOWER_GPI1_CTRL) { ret = regmap_update_bits(regmap, reg, GPI1_LDO_MASK, on ? GPI1_LDO_ON : GPI1_LDO_OFF); goto out; } if (regmap_read(regmap, reg, &data)) { ret = -EIO; goto out; } if (on) data |= BIT(bit); else data &= ~BIT(bit); if (regmap_write(regmap, reg, data)) ret = -EIO; out: iosf_mbi_unblock_punit_i2c_access(); return ret; } /** * intel_xpower_pmic_get_raw_temp(): Get raw temperature reading from the PMIC * * @regmap: regmap of the PMIC device * @reg: register to get the reading * * Return a positive value on success, errno on failure. */ static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) { int ret, adc_ts_pin_ctrl; u8 buf[2]; /* * The current-source used for the battery temp-sensor (TS) is shared * with the GPADC. For proper fuel-gauge and charger operation the TS * current-source needs to be permanently on. But to read the GPADC we * need to temporary switch the TS current-source to ondemand, so that * the GPADC can use it, otherwise we will always read an all 0 value. * * Note that the switching from on to on-ondemand is not necessary * when the TS current-source is off (this happens on devices which * do not use the TS-pin). */ ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl); if (ret) return ret; if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { /* * AXP288_ADC_TS_PIN_CTRL reads are cached by the regmap, so * this does to a single I2C-transfer, and thus there is no * need to explicitly call iosf_mbi_block_punit_i2c_access(). */ ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_CURRENT_ON_OFF_MASK, AXP288_ADC_TS_CURRENT_ON_ONDEMAND); if (ret) return ret; /* Wait a bit after switching the current-source */ usleep_range(6000, 10000); } ret = iosf_mbi_block_punit_i2c_access(); if (ret) return ret; ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); if (ret == 0) ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) { regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_CURRENT_ON_OFF_MASK, AXP288_ADC_TS_CURRENT_ON); } iosf_mbi_unblock_punit_i2c_access(); return ret; } static int intel_xpower_exec_mipi_pmic_seq_element(struct regmap *regmap, u16 i2c_address, u32 reg_address, u32 value, u32 mask) { int ret; if (i2c_address != 0x34) { pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n", __func__, i2c_address, reg_address, value, mask); return -ENXIO; } ret = iosf_mbi_block_punit_i2c_access(); if (ret) return ret; ret = regmap_update_bits(regmap, reg_address, mask, value); iosf_mbi_unblock_punit_i2c_access(); return ret; } static int intel_xpower_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, int raw) { struct acpi_lpat first = lpat_table->lpat[0]; struct acpi_lpat last = lpat_table->lpat[lpat_table->lpat_count - 1]; /* * Some LPAT tables in the ACPI Device for the AXP288 PMIC for some * reason only describe a small temperature range, e.g. 27° - 37° * Celcius. Resulting in errors when the tablet is idle in a cool room. * * To avoid these errors clamp the raw value to be inside the LPAT. */ if (first.raw < last.raw) raw = clamp(raw, first.raw, last.raw); else raw = clamp(raw, last.raw, first.raw); return acpi_lpat_raw_to_temp(lpat_table, raw); } static const struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = { .get_power = intel_xpower_pmic_get_power, .update_power = intel_xpower_pmic_update_power, .get_raw_temp = intel_xpower_pmic_get_raw_temp, .exec_mipi_pmic_seq_element = intel_xpower_exec_mipi_pmic_seq_element, .lpat_raw_to_temp = intel_xpower_lpat_raw_to_temp, .power_table = power_table, .power_table_count = ARRAY_SIZE(power_table), .thermal_table = thermal_table, .thermal_table_count = ARRAY_SIZE(thermal_table), .pmic_i2c_address = 0x34, }; static acpi_status intel_xpower_pmic_gpio_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { return AE_OK; } static int intel_xpower_pmic_opregion_probe(struct platform_device *pdev) { struct device *parent = pdev->dev.parent; struct axp20x_dev *axp20x = dev_get_drvdata(parent); acpi_status status; int result; status = acpi_install_address_space_handler(ACPI_HANDLE(parent), ACPI_ADR_SPACE_GPIO, intel_xpower_pmic_gpio_handler, NULL, NULL); if (ACPI_FAILURE(status)) return -ENODEV; result = intel_pmic_install_opregion_handler(&pdev->dev, ACPI_HANDLE(parent), axp20x->regmap, &intel_xpower_pmic_opregion_data); if (result) acpi_remove_address_space_handler(ACPI_HANDLE(parent), ACPI_ADR_SPACE_GPIO, intel_xpower_pmic_gpio_handler); return result; } static struct platform_driver intel_xpower_pmic_opregion_driver = { .probe = intel_xpower_pmic_opregion_probe, .driver = { .name = "axp288_pmic_acpi", }, }; builtin_platform_driver(intel_xpower_pmic_opregion_driver);
linux-master
drivers/acpi/pmic/intel_pmic_xpower.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel BXT WhiskeyCove PMIC operation region driver * * Copyright (C) 2015 Intel Corporation. All rights reserved. */ #include <linux/init.h> #include <linux/acpi.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/regmap.h> #include <linux/platform_device.h> #include "intel_pmic.h" #define WHISKEY_COVE_ALRT_HIGH_BIT_MASK 0x0F #define WHISKEY_COVE_ADC_HIGH_BIT(x) (((x & 0x0F) << 8)) #define WHISKEY_COVE_ADC_CURSRC(x) (((x & 0xF0) >> 4)) #define VR_MODE_DISABLED 0 #define VR_MODE_AUTO BIT(0) #define VR_MODE_NORMAL BIT(1) #define VR_MODE_SWITCH BIT(2) #define VR_MODE_ECO (BIT(0)|BIT(1)) #define VSWITCH2_OUTPUT BIT(5) #define VSWITCH1_OUTPUT BIT(4) #define VUSBPHY_CHARGE BIT(1) static struct pmic_table power_table[] = { { .address = 0x0, .reg = 0x63, .bit = VR_MODE_AUTO, }, /* VDD1 -> VDD1CNT */ { .address = 0x04, .reg = 0x65, .bit = VR_MODE_AUTO, }, /* VDD2 -> VDD2CNT */ { .address = 0x08, .reg = 0x67, .bit = VR_MODE_AUTO, }, /* VDD3 -> VDD3CNT */ { .address = 0x0c, .reg = 0x6d, .bit = VR_MODE_AUTO, }, /* VLFX -> VFLEXCNT */ { .address = 0x10, .reg = 0x6f, .bit = VR_MODE_NORMAL, }, /* VP1A -> VPROG1ACNT */ { .address = 0x14, .reg = 0x70, .bit = VR_MODE_NORMAL, }, /* VP1B -> VPROG1BCNT */ { .address = 0x18, .reg = 0x71, .bit = VR_MODE_NORMAL, }, /* VP1C -> VPROG1CCNT */ { .address = 0x1c, .reg = 0x72, .bit = VR_MODE_NORMAL, }, /* VP1D -> VPROG1DCNT */ { .address = 0x20, .reg = 0x73, .bit = VR_MODE_NORMAL, }, /* VP2A -> VPROG2ACNT */ { .address = 0x24, .reg = 0x74, .bit = VR_MODE_NORMAL, }, /* VP2B -> VPROG2BCNT */ { .address = 0x28, .reg = 0x75, .bit = VR_MODE_NORMAL, }, /* VP2C -> VPROG2CCNT */ { .address = 0x2c, .reg = 0x76, .bit = VR_MODE_NORMAL, }, /* VP3A -> VPROG3ACNT */ { .address = 0x30, .reg = 0x77, .bit = VR_MODE_NORMAL, }, /* VP3B -> VPROG3BCNT */ { .address = 0x34, .reg = 0x78, .bit = VSWITCH2_OUTPUT, }, /* VSW2 -> VLD0CNT Bit 5*/ { .address = 0x38, .reg = 0x78, .bit = VSWITCH1_OUTPUT, }, /* VSW1 -> VLD0CNT Bit 4 */ { .address = 0x3c, .reg = 0x78, .bit = VUSBPHY_CHARGE, }, /* VUPY -> VLDOCNT Bit 1 */ { .address = 0x40, .reg = 0x7b, .bit = VR_MODE_NORMAL, }, /* VRSO -> VREFSOCCNT*/ { .address = 0x44, .reg = 0xA0, .bit = VR_MODE_NORMAL, }, /* VP1E -> VPROG1ECNT */ { .address = 0x48, .reg = 0xA1, .bit = VR_MODE_NORMAL, }, /* VP1F -> VPROG1FCNT */ { .address = 0x4c, .reg = 0xA2, .bit = VR_MODE_NORMAL, }, /* VP2D -> VPROG2DCNT */ { .address = 0x50, .reg = 0xA3, .bit = VR_MODE_NORMAL, }, /* VP4A -> VPROG4ACNT */ { .address = 0x54, .reg = 0xA4, .bit = VR_MODE_NORMAL, }, /* VP4B -> VPROG4BCNT */ { .address = 0x58, .reg = 0xA5, .bit = VR_MODE_NORMAL, }, /* VP4C -> VPROG4CCNT */ { .address = 0x5c, .reg = 0xA6, .bit = VR_MODE_NORMAL, }, /* VP4D -> VPROG4DCNT */ { .address = 0x60, .reg = 0xA7, .bit = VR_MODE_NORMAL, }, /* VP5A -> VPROG5ACNT */ { .address = 0x64, .reg = 0xA8, .bit = VR_MODE_NORMAL, }, /* VP5B -> VPROG5BCNT */ { .address = 0x68, .reg = 0xA9, .bit = VR_MODE_NORMAL, }, /* VP6A -> VPROG6ACNT */ { .address = 0x6c, .reg = 0xAA, .bit = VR_MODE_NORMAL, }, /* VP6B -> VPROG6BCNT */ { .address = 0x70, .reg = 0x36, .bit = BIT(2), }, /* SDWN_N -> MODEMCTRL Bit 2 */ { .address = 0x74, .reg = 0x36, .bit = BIT(0), } /* MOFF -> MODEMCTRL Bit 0 */ }; static struct pmic_table thermal_table[] = { { .address = 0x00, .reg = 0x4F39 }, { .address = 0x04, .reg = 0x4F24 }, { .address = 0x08, .reg = 0x4F26 }, { .address = 0x0c, .reg = 0x4F3B }, { .address = 0x10, .reg = 0x4F28 }, { .address = 0x14, .reg = 0x4F2A }, { .address = 0x18, .reg = 0x4F3D }, { .address = 0x1c, .reg = 0x4F2C }, { .address = 0x20, .reg = 0x4F2E }, { .address = 0x24, .reg = 0x4F3F }, { .address = 0x28, .reg = 0x4F30 }, { .address = 0x30, .reg = 0x4F41 }, { .address = 0x34, .reg = 0x4F32 }, { .address = 0x3c, .reg = 0x4F43 }, { .address = 0x40, .reg = 0x4F34 }, { .address = 0x48, .reg = 0x4F6A, .bit = 0, }, { .address = 0x4C, .reg = 0x4F6A, .bit = 1 }, { .address = 0x50, .reg = 0x4F6A, .bit = 2 }, { .address = 0x54, .reg = 0x4F6A, .bit = 4 }, { .address = 0x58, .reg = 0x4F6A, .bit = 5 }, { .address = 0x5C, .reg = 0x4F6A, .bit = 3 }, }; static int intel_bxtwc_pmic_get_power(struct regmap *regmap, int reg, int bit, u64 *value) { int data; if (regmap_read(regmap, reg, &data)) return -EIO; *value = (data & bit) ? 1 : 0; return 0; } static int intel_bxtwc_pmic_update_power(struct regmap *regmap, int reg, int bit, bool on) { u8 val, mask = bit; if (on) val = 0xFF; else val = 0x0; return regmap_update_bits(regmap, reg, mask, val); } static int intel_bxtwc_pmic_get_raw_temp(struct regmap *regmap, int reg) { unsigned int val, adc_val, reg_val; u8 temp_l, temp_h, cursrc; unsigned long rlsb; static const unsigned long rlsb_array[] = { 0, 260420, 130210, 65100, 32550, 16280, 8140, 4070, 2030, 0, 260420, 130210 }; if (regmap_read(regmap, reg, &val)) return -EIO; temp_l = (u8) val; if (regmap_read(regmap, (reg - 1), &val)) return -EIO; temp_h = (u8) val; reg_val = temp_l | WHISKEY_COVE_ADC_HIGH_BIT(temp_h); cursrc = WHISKEY_COVE_ADC_CURSRC(temp_h); rlsb = rlsb_array[cursrc]; adc_val = reg_val * rlsb / 1000; return adc_val; } static int intel_bxtwc_pmic_update_aux(struct regmap *regmap, int reg, int raw) { u32 bsr_num; u16 resi_val, count = 0, thrsh = 0; u8 alrt_h, alrt_l, cursel = 0; bsr_num = raw; bsr_num /= (1 << 5); count = fls(bsr_num) - 1; cursel = clamp_t(s8, (count - 7), 0, 7); thrsh = raw / (1 << (4 + cursel)); resi_val = (cursel << 9) | thrsh; alrt_h = (resi_val >> 8) & WHISKEY_COVE_ALRT_HIGH_BIT_MASK; if (regmap_update_bits(regmap, reg - 1, WHISKEY_COVE_ALRT_HIGH_BIT_MASK, alrt_h)) return -EIO; alrt_l = (u8)resi_val; return regmap_write(regmap, reg, alrt_l); } static int intel_bxtwc_pmic_get_policy(struct regmap *regmap, int reg, int bit, u64 *value) { u8 mask = BIT(bit); unsigned int val; if (regmap_read(regmap, reg, &val)) return -EIO; *value = (val & mask) >> bit; return 0; } static int intel_bxtwc_pmic_update_policy(struct regmap *regmap, int reg, int bit, int enable) { u8 mask = BIT(bit), val = enable << bit; return regmap_update_bits(regmap, reg, mask, val); } static const struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = { .get_power = intel_bxtwc_pmic_get_power, .update_power = intel_bxtwc_pmic_update_power, .get_raw_temp = intel_bxtwc_pmic_get_raw_temp, .update_aux = intel_bxtwc_pmic_update_aux, .get_policy = intel_bxtwc_pmic_get_policy, .update_policy = intel_bxtwc_pmic_update_policy, .lpat_raw_to_temp = acpi_lpat_raw_to_temp, .power_table = power_table, .power_table_count = ARRAY_SIZE(power_table), .thermal_table = thermal_table, .thermal_table_count = ARRAY_SIZE(thermal_table), }; static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); return intel_pmic_install_opregion_handler(&pdev->dev, ACPI_HANDLE(pdev->dev.parent), pmic->regmap, &intel_bxtwc_pmic_opregion_data); } static const struct platform_device_id bxt_wc_opregion_id_table[] = { { .name = "bxt_wcove_region" }, {}, }; static struct platform_driver intel_bxtwc_pmic_opregion_driver = { .probe = intel_bxtwc_pmic_opregion_probe, .driver = { .name = "bxt_whiskey_cove_pmic", }, .id_table = bxt_wc_opregion_id_table, }; builtin_platform_driver(intel_bxtwc_pmic_opregion_driver);
linux-master
drivers/acpi/pmic/intel_pmic_bxtwc.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Bay Trail Crystal Cove PMIC operation region driver * * Copyright (C) 2014 Intel Corporation. All rights reserved. */ #include <linux/acpi.h> #include <linux/init.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "intel_pmic.h" #define PWR_SOURCE_SELECT BIT(1) #define PMIC_A0LOCK_REG 0xc5 static struct pmic_table power_table[] = { /* { .address = 0x00, .reg = ??, .bit = ??, }, ** VSYS */ { .address = 0x04, .reg = 0x63, .bit = 0x00, }, /* SYSX -> VSYS_SX */ { .address = 0x08, .reg = 0x62, .bit = 0x00, }, /* SYSU -> VSYS_U */ { .address = 0x0c, .reg = 0x64, .bit = 0x00, }, /* SYSS -> VSYS_S */ { .address = 0x10, .reg = 0x6a, .bit = 0x00, }, /* V50S -> V5P0S */ { .address = 0x14, .reg = 0x6b, .bit = 0x00, }, /* HOST -> VHOST, USB2/3 host */ { .address = 0x18, .reg = 0x6c, .bit = 0x00, }, /* VBUS -> VBUS, USB2/3 OTG */ { .address = 0x1c, .reg = 0x6d, .bit = 0x00, }, /* HDMI -> VHDMI */ /* { .address = 0x20, .reg = ??, .bit = ??, }, ** S285 */ { .address = 0x24, .reg = 0x66, .bit = 0x00, }, /* X285 -> V2P85SX, camera */ /* { .address = 0x28, .reg = ??, .bit = ??, }, ** V33A */ { .address = 0x2c, .reg = 0x69, .bit = 0x00, }, /* V33S -> V3P3S, display/ssd/audio */ { .address = 0x30, .reg = 0x68, .bit = 0x00, }, /* V33U -> V3P3U, SDIO wifi&bt */ /* { .address = 0x34 .. 0x40, .reg = ??, .bit = ??, }, ** V33I, V18A, REFQ, V12A */ { .address = 0x44, .reg = 0x5c, .bit = 0x00, }, /* V18S -> V1P8S, SOC/USB PHY/SIM */ { .address = 0x48, .reg = 0x5d, .bit = 0x00, }, /* V18X -> V1P8SX, eMMC/camara/audio */ { .address = 0x4c, .reg = 0x5b, .bit = 0x00, }, /* V18U -> V1P8U, LPDDR */ { .address = 0x50, .reg = 0x61, .bit = 0x00, }, /* V12X -> V1P2SX, SOC SFR */ { .address = 0x54, .reg = 0x60, .bit = 0x00, }, /* V12S -> V1P2S, MIPI */ /* { .address = 0x58, .reg = ??, .bit = ??, }, ** V10A */ { .address = 0x5c, .reg = 0x56, .bit = 0x00, }, /* V10S -> V1P0S, SOC GFX */ { .address = 0x60, .reg = 0x57, .bit = 0x00, }, /* V10X -> V1P0SX, SOC display/DDR IO/PCIe */ { .address = 0x64, .reg = 0x59, .bit = 0x00, }, /* V105 -> V1P05S, L2 SRAM */ }; static struct pmic_table thermal_table[] = { { .address = 0x00, .reg = 0x75 }, { .address = 0x04, .reg = 0x95 }, { .address = 0x08, .reg = 0x97 }, { .address = 0x0c, .reg = 0x77 }, { .address = 0x10, .reg = 0x9a }, { .address = 0x14, .reg = 0x9c }, { .address = 0x18, .reg = 0x79 }, { .address = 0x1c, .reg = 0x9f }, { .address = 0x20, .reg = 0xa1 }, { .address = 0x48, .reg = 0x94 }, { .address = 0x4c, .reg = 0x99 }, { .address = 0x50, .reg = 0x9e }, }; static int intel_crc_pmic_get_power(struct regmap *regmap, int reg, int bit, u64 *value) { int data; if (regmap_read(regmap, reg, &data)) return -EIO; *value = (data & PWR_SOURCE_SELECT) && (data & BIT(bit)) ? 1 : 0; return 0; } static int intel_crc_pmic_update_power(struct regmap *regmap, int reg, int bit, bool on) { int data; if (regmap_read(regmap, reg, &data)) return -EIO; if (on) { data |= PWR_SOURCE_SELECT | BIT(bit); } else { data &= ~BIT(bit); data |= PWR_SOURCE_SELECT; } if (regmap_write(regmap, reg, data)) return -EIO; return 0; } static int intel_crc_pmic_get_raw_temp(struct regmap *regmap, int reg) { int temp_l, temp_h; /* * Raw temperature value is 10bits: 8bits in reg * and 2bits in reg-1: bit0,1 */ if (regmap_read(regmap, reg, &temp_l) || regmap_read(regmap, reg - 1, &temp_h)) return -EIO; return temp_l | (temp_h & 0x3) << 8; } static int intel_crc_pmic_update_aux(struct regmap *regmap, int reg, int raw) { return regmap_write(regmap, reg, raw) || regmap_update_bits(regmap, reg - 1, 0x3, raw >> 8) ? -EIO : 0; } static int intel_crc_pmic_get_policy(struct regmap *regmap, int reg, int bit, u64 *value) { int pen; if (regmap_read(regmap, reg, &pen)) return -EIO; *value = pen >> 7; return 0; } static int intel_crc_pmic_update_policy(struct regmap *regmap, int reg, int bit, int enable) { int alert0; /* Update to policy enable bit requires unlocking a0lock */ if (regmap_read(regmap, PMIC_A0LOCK_REG, &alert0)) return -EIO; if (regmap_update_bits(regmap, PMIC_A0LOCK_REG, 0x01, 0)) return -EIO; if (regmap_update_bits(regmap, reg, 0x80, enable << 7)) return -EIO; /* restore alert0 */ if (regmap_write(regmap, PMIC_A0LOCK_REG, alert0)) return -EIO; return 0; } static const struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = { .get_power = intel_crc_pmic_get_power, .update_power = intel_crc_pmic_update_power, .get_raw_temp = intel_crc_pmic_get_raw_temp, .update_aux = intel_crc_pmic_update_aux, .get_policy = intel_crc_pmic_get_policy, .update_policy = intel_crc_pmic_update_policy, .lpat_raw_to_temp = acpi_lpat_raw_to_temp, .power_table = power_table, .power_table_count= ARRAY_SIZE(power_table), .thermal_table = thermal_table, .thermal_table_count = ARRAY_SIZE(thermal_table), .pmic_i2c_address = 0x6e, }; static int intel_crc_pmic_opregion_probe(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); return intel_pmic_install_opregion_handler(&pdev->dev, ACPI_HANDLE(pdev->dev.parent), pmic->regmap, &intel_crc_pmic_opregion_data); } static struct platform_driver intel_crc_pmic_opregion_driver = { .probe = intel_crc_pmic_opregion_probe, .driver = { .name = "byt_crystal_cove_pmic", }, }; builtin_platform_driver(intel_crc_pmic_opregion_driver);
linux-master
drivers/acpi/pmic/intel_pmic_bytcrc.c
// SPDX-License-Identifier: GPL-2.0 /* * Dollar Cove TI PMIC operation region driver * Copyright (C) 2014 Intel Corporation. All rights reserved. * * Rewritten and cleaned up * Copyright (C) 2017 Takashi Iwai <[email protected]> */ #include <linux/acpi.h> #include <linux/init.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/platform_device.h> #include "intel_pmic.h" /* registers stored in 16bit BE (high:low, total 10bit) */ #define CHTDC_TI_VBAT 0x54 #define CHTDC_TI_DIETEMP 0x56 #define CHTDC_TI_BPTHERM 0x58 #define CHTDC_TI_GPADC 0x5a static struct pmic_table chtdc_ti_power_table[] = { { .address = 0x00, .reg = 0x41 }, /* LDO1 */ { .address = 0x04, .reg = 0x42 }, /* LDO2 */ { .address = 0x08, .reg = 0x43 }, /* LDO3 */ { .address = 0x0c, .reg = 0x45 }, /* LDO5 */ { .address = 0x10, .reg = 0x46 }, /* LDO6 */ { .address = 0x14, .reg = 0x47 }, /* LDO7 */ { .address = 0x18, .reg = 0x48 }, /* LDO8 */ { .address = 0x1c, .reg = 0x49 }, /* LDO9 */ { .address = 0x20, .reg = 0x4a }, /* LD10 */ { .address = 0x24, .reg = 0x4b }, /* LD11 */ { .address = 0x28, .reg = 0x4c }, /* LD12 */ { .address = 0x2c, .reg = 0x4d }, /* LD13 */ { .address = 0x30, .reg = 0x4e }, /* LD14 */ }; static struct pmic_table chtdc_ti_thermal_table[] = { { .address = 0x00, .reg = CHTDC_TI_GPADC }, { .address = 0x0c, .reg = CHTDC_TI_GPADC }, /* TMP2 -> SYSTEMP */ { .address = 0x18, .reg = CHTDC_TI_GPADC }, /* TMP3 -> BPTHERM */ { .address = 0x24, .reg = CHTDC_TI_BPTHERM }, { .address = 0x30, .reg = CHTDC_TI_GPADC }, /* TMP5 -> DIETEMP */ { .address = 0x3c, .reg = CHTDC_TI_DIETEMP }, }; static int chtdc_ti_pmic_get_power(struct regmap *regmap, int reg, int bit, u64 *value) { int data; if (regmap_read(regmap, reg, &data)) return -EIO; *value = data & 1; return 0; } static int chtdc_ti_pmic_update_power(struct regmap *regmap, int reg, int bit, bool on) { return regmap_update_bits(regmap, reg, 1, on); } static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg) { u8 buf[2]; if (regmap_bulk_read(regmap, reg, buf, 2)) return -EIO; /* stored in big-endian */ return ((buf[0] & 0x03) << 8) | buf[1]; } static const struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = { .get_power = chtdc_ti_pmic_get_power, .update_power = chtdc_ti_pmic_update_power, .get_raw_temp = chtdc_ti_pmic_get_raw_temp, .lpat_raw_to_temp = acpi_lpat_raw_to_temp, .power_table = chtdc_ti_power_table, .power_table_count = ARRAY_SIZE(chtdc_ti_power_table), .thermal_table = chtdc_ti_thermal_table, .thermal_table_count = ARRAY_SIZE(chtdc_ti_thermal_table), .pmic_i2c_address = 0x5e, }; static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); int err; err = intel_pmic_install_opregion_handler(&pdev->dev, ACPI_HANDLE(pdev->dev.parent), pmic->regmap, &chtdc_ti_pmic_opregion_data); if (err < 0) return err; /* Re-enumerate devices depending on PMIC */ acpi_dev_clear_dependencies(ACPI_COMPANION(pdev->dev.parent)); return 0; } static const struct platform_device_id chtdc_ti_pmic_opregion_id_table[] = { { .name = "chtdc_ti_region" }, {}, }; static struct platform_driver chtdc_ti_pmic_opregion_driver = { .probe = chtdc_ti_pmic_opregion_probe, .driver = { .name = "cht_dollar_cove_ti_pmic", }, .id_table = chtdc_ti_pmic_opregion_id_table, }; builtin_platform_driver(chtdc_ti_pmic_opregion_driver);
linux-master
drivers/acpi/pmic/intel_pmic_chtdc_ti.c
// SPDX-License-Identifier: GPL-2.0-only /* * intel_pmic.c - Intel PMIC operation region driver * * Copyright (C) 2014 Intel Corporation. All rights reserved. */ #include <linux/export.h> #include <linux/acpi.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/regmap.h> #include <acpi/acpi_lpat.h> #include "intel_pmic.h" #define PMIC_POWER_OPREGION_ID 0x8d #define PMIC_THERMAL_OPREGION_ID 0x8c #define PMIC_REGS_OPREGION_ID 0x8f struct intel_pmic_regs_handler_ctx { unsigned int val; u16 addr; }; struct intel_pmic_opregion { struct mutex lock; struct acpi_lpat_conversion_table *lpat_table; struct regmap *regmap; const struct intel_pmic_opregion_data *data; struct intel_pmic_regs_handler_ctx ctx; }; static struct intel_pmic_opregion *intel_pmic_opregion; static int pmic_get_reg_bit(int address, struct pmic_table *table, int count, int *reg, int *bit) { int i; for (i = 0; i < count; i++) { if (table[i].address == address) { *reg = table[i].reg; if (bit) *bit = table[i].bit; return 0; } } return -ENOENT; } static acpi_status intel_pmic_power_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value64, void *handler_context, void *region_context) { struct intel_pmic_opregion *opregion = region_context; struct regmap *regmap = opregion->regmap; const struct intel_pmic_opregion_data *d = opregion->data; int reg, bit, result; if (bits != 32 || !value64) return AE_BAD_PARAMETER; if (function == ACPI_WRITE && !(*value64 == 0 || *value64 == 1)) return AE_BAD_PARAMETER; result = pmic_get_reg_bit(address, d->power_table, d->power_table_count, &reg, &bit); if (result == -ENOENT) return AE_BAD_PARAMETER; mutex_lock(&opregion->lock); result = function == ACPI_READ ? d->get_power(regmap, reg, bit, value64) : d->update_power(regmap, reg, bit, *value64 == 1); mutex_unlock(&opregion->lock); return result ? AE_ERROR : AE_OK; } static int pmic_read_temp(struct intel_pmic_opregion *opregion, int reg, u64 *value) { int raw_temp, temp; if (!opregion->data->get_raw_temp) return -ENXIO; raw_temp = opregion->data->get_raw_temp(opregion->regmap, reg); if (raw_temp < 0) return raw_temp; if (!opregion->lpat_table) { *value = raw_temp; return 0; } temp = opregion->data->lpat_raw_to_temp(opregion->lpat_table, raw_temp); if (temp < 0) return temp; *value = temp; return 0; } static int pmic_thermal_temp(struct intel_pmic_opregion *opregion, int reg, u32 function, u64 *value) { return function == ACPI_READ ? pmic_read_temp(opregion, reg, value) : -EINVAL; } static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg, u32 function, u64 *value) { int raw_temp; if (function == ACPI_READ) return pmic_read_temp(opregion, reg, value); if (!opregion->data->update_aux) return -ENXIO; if (opregion->lpat_table) { raw_temp = acpi_lpat_temp_to_raw(opregion->lpat_table, *value); if (raw_temp < 0) return raw_temp; } else { raw_temp = *value; } return opregion->data->update_aux(opregion->regmap, reg, raw_temp); } static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg, int bit, u32 function, u64 *value) { const struct intel_pmic_opregion_data *d = opregion->data; struct regmap *regmap = opregion->regmap; if (!d->get_policy || !d->update_policy) return -ENXIO; if (function == ACPI_READ) return d->get_policy(regmap, reg, bit, value); if (*value != 0 && *value != 1) return -EINVAL; return d->update_policy(regmap, reg, bit, *value); } static bool pmic_thermal_is_temp(int address) { return (address <= 0x3c) && !(address % 12); } static bool pmic_thermal_is_aux(int address) { return (address >= 4 && address <= 0x40 && !((address - 4) % 12)) || (address >= 8 && address <= 0x44 && !((address - 8) % 12)); } static bool pmic_thermal_is_pen(int address) { return address >= 0x48 && address <= 0x5c; } static acpi_status intel_pmic_thermal_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value64, void *handler_context, void *region_context) { struct intel_pmic_opregion *opregion = region_context; const struct intel_pmic_opregion_data *d = opregion->data; int reg, bit, result; if (bits != 32 || !value64) return AE_BAD_PARAMETER; result = pmic_get_reg_bit(address, d->thermal_table, d->thermal_table_count, &reg, &bit); if (result == -ENOENT) return AE_BAD_PARAMETER; mutex_lock(&opregion->lock); if (pmic_thermal_is_temp(address)) result = pmic_thermal_temp(opregion, reg, function, value64); else if (pmic_thermal_is_aux(address)) result = pmic_thermal_aux(opregion, reg, function, value64); else if (pmic_thermal_is_pen(address)) result = pmic_thermal_pen(opregion, reg, bit, function, value64); else result = -EINVAL; mutex_unlock(&opregion->lock); if (result < 0) { if (result == -EINVAL) return AE_BAD_PARAMETER; else return AE_ERROR; } return AE_OK; } static acpi_status intel_pmic_regs_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value64, void *handler_context, void *region_context) { struct intel_pmic_opregion *opregion = region_context; int result = -EINVAL; if (function == ACPI_WRITE) { switch (address) { case 0: return AE_OK; case 1: opregion->ctx.addr |= (*value64 & 0xff) << 8; return AE_OK; case 2: opregion->ctx.addr |= *value64 & 0xff; return AE_OK; case 3: opregion->ctx.val = *value64 & 0xff; return AE_OK; case 4: if (*value64) { result = regmap_write(opregion->regmap, opregion->ctx.addr, opregion->ctx.val); } else { result = regmap_read(opregion->regmap, opregion->ctx.addr, &opregion->ctx.val); } opregion->ctx.addr = 0; } } if (function == ACPI_READ && address == 3) { *value64 = opregion->ctx.val; return AE_OK; } if (result < 0) { if (result == -EINVAL) return AE_BAD_PARAMETER; else return AE_ERROR; } return AE_OK; } int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, const struct intel_pmic_opregion_data *d) { acpi_status status = AE_OK; struct intel_pmic_opregion *opregion; int ret; if (!dev || !regmap || !d) return -EINVAL; if (!handle) return -ENODEV; opregion = devm_kzalloc(dev, sizeof(*opregion), GFP_KERNEL); if (!opregion) return -ENOMEM; mutex_init(&opregion->lock); opregion->regmap = regmap; opregion->lpat_table = acpi_lpat_get_conversion_table(handle); if (d->power_table_count) status = acpi_install_address_space_handler(handle, PMIC_POWER_OPREGION_ID, intel_pmic_power_handler, NULL, opregion); if (ACPI_FAILURE(status)) { ret = -ENODEV; goto out_error; } if (d->thermal_table_count) status = acpi_install_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID, intel_pmic_thermal_handler, NULL, opregion); if (ACPI_FAILURE(status)) { ret = -ENODEV; goto out_remove_power_handler; } status = acpi_install_address_space_handler(handle, PMIC_REGS_OPREGION_ID, intel_pmic_regs_handler, NULL, opregion); if (ACPI_FAILURE(status)) { ret = -ENODEV; goto out_remove_thermal_handler; } opregion->data = d; intel_pmic_opregion = opregion; return 0; out_remove_thermal_handler: if (d->thermal_table_count) acpi_remove_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID, intel_pmic_thermal_handler); out_remove_power_handler: if (d->power_table_count) acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID, intel_pmic_power_handler); out_error: acpi_lpat_free_conversion_table(opregion->lpat_table); return ret; } EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler); /** * intel_soc_pmic_exec_mipi_pmic_seq_element - Execute PMIC MIPI sequence * @i2c_address: I2C client address for the PMIC * @reg_address: PMIC register address * @value: New value for the register bits to change * @mask: Mask indicating which register bits to change * * DSI LCD panels describe an initialization sequence in the i915 VBT (Video * BIOS Tables) using so called MIPI sequences. One possible element in these * sequences is a PMIC specific element of 15 bytes. * * This function executes these PMIC specific elements sending the embedded * commands to the PMIC. * * Return 0 on success, < 0 on failure. */ int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address, u32 value, u32 mask) { const struct intel_pmic_opregion_data *d; int ret; if (!intel_pmic_opregion) { pr_warn("%s: No PMIC registered\n", __func__); return -ENXIO; } d = intel_pmic_opregion->data; mutex_lock(&intel_pmic_opregion->lock); if (d->exec_mipi_pmic_seq_element) { ret = d->exec_mipi_pmic_seq_element(intel_pmic_opregion->regmap, i2c_address, reg_address, value, mask); } else if (d->pmic_i2c_address) { if (i2c_address == d->pmic_i2c_address) { ret = regmap_update_bits(intel_pmic_opregion->regmap, reg_address, mask, value); } else { pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n", __func__, i2c_address, reg_address, value, mask); ret = -ENXIO; } } else { pr_warn("%s: Not implemented\n", __func__); pr_warn("%s: i2c-addr: 0x%x reg-addr 0x%x value 0x%x mask 0x%x\n", __func__, i2c_address, reg_address, value, mask); ret = -EOPNOTSUPP; } mutex_unlock(&intel_pmic_opregion->lock); return ret; } EXPORT_SYMBOL_GPL(intel_soc_pmic_exec_mipi_pmic_seq_element);
linux-master
drivers/acpi/pmic/intel_pmic.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel CHT Whiskey Cove PMIC operation region driver * Copyright (C) 2017 Hans de Goede <[email protected]> * * Based on various non upstream patches to support the CHT Whiskey Cove PMIC: * Copyright (C) 2013-2015 Intel Corporation. All rights reserved. */ #include <linux/acpi.h> #include <linux/init.h> #include <linux/mfd/intel_soc_pmic.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "intel_pmic.h" #define CHT_WC_V1P05A_CTRL 0x6e3b #define CHT_WC_V1P15_CTRL 0x6e3c #define CHT_WC_V1P05A_VSEL 0x6e3d #define CHT_WC_V1P15_VSEL 0x6e3e #define CHT_WC_V1P8A_CTRL 0x6e56 #define CHT_WC_V1P8SX_CTRL 0x6e57 #define CHT_WC_VDDQ_CTRL 0x6e58 #define CHT_WC_V1P2A_CTRL 0x6e59 #define CHT_WC_V1P2SX_CTRL 0x6e5a #define CHT_WC_V1P8A_VSEL 0x6e5b #define CHT_WC_VDDQ_VSEL 0x6e5c #define CHT_WC_V2P8SX_CTRL 0x6e5d #define CHT_WC_V3P3A_CTRL 0x6e5e #define CHT_WC_V3P3SD_CTRL 0x6e5f #define CHT_WC_VSDIO_CTRL 0x6e67 #define CHT_WC_V3P3A_VSEL 0x6e68 #define CHT_WC_VPROG1A_CTRL 0x6e90 #define CHT_WC_VPROG1B_CTRL 0x6e91 #define CHT_WC_VPROG1F_CTRL 0x6e95 #define CHT_WC_VPROG2D_CTRL 0x6e99 #define CHT_WC_VPROG3A_CTRL 0x6e9a #define CHT_WC_VPROG3B_CTRL 0x6e9b #define CHT_WC_VPROG4A_CTRL 0x6e9c #define CHT_WC_VPROG4B_CTRL 0x6e9d #define CHT_WC_VPROG4C_CTRL 0x6e9e #define CHT_WC_VPROG4D_CTRL 0x6e9f #define CHT_WC_VPROG5A_CTRL 0x6ea0 #define CHT_WC_VPROG5B_CTRL 0x6ea1 #define CHT_WC_VPROG6A_CTRL 0x6ea2 #define CHT_WC_VPROG6B_CTRL 0x6ea3 #define CHT_WC_VPROG1A_VSEL 0x6ec0 #define CHT_WC_VPROG1B_VSEL 0x6ec1 #define CHT_WC_V1P8SX_VSEL 0x6ec2 #define CHT_WC_V1P2SX_VSEL 0x6ec3 #define CHT_WC_V1P2A_VSEL 0x6ec4 #define CHT_WC_VPROG1F_VSEL 0x6ec5 #define CHT_WC_VSDIO_VSEL 0x6ec6 #define CHT_WC_V2P8SX_VSEL 0x6ec7 #define CHT_WC_V3P3SD_VSEL 0x6ec8 #define CHT_WC_VPROG2D_VSEL 0x6ec9 #define CHT_WC_VPROG3A_VSEL 0x6eca #define CHT_WC_VPROG3B_VSEL 0x6ecb #define CHT_WC_VPROG4A_VSEL 0x6ecc #define CHT_WC_VPROG4B_VSEL 0x6ecd #define CHT_WC_VPROG4C_VSEL 0x6ece #define CHT_WC_VPROG4D_VSEL 0x6ecf #define CHT_WC_VPROG5A_VSEL 0x6ed0 #define CHT_WC_VPROG5B_VSEL 0x6ed1 #define CHT_WC_VPROG6A_VSEL 0x6ed2 #define CHT_WC_VPROG6B_VSEL 0x6ed3 /* * Regulator support is based on the non upstream patch: * "regulator: whiskey_cove: implements Whiskey Cove pmic VRF support" * https://github.com/intel-aero/meta-intel-aero/blob/master/recipes-kernel/linux/linux-yocto/0019-regulator-whiskey_cove-implements-WhiskeyCove-pmic-V.patch */ static struct pmic_table power_table[] = { { .address = 0x0, .reg = CHT_WC_V1P8A_CTRL, .bit = 0x01, }, /* V18A */ { .address = 0x04, .reg = CHT_WC_V1P8SX_CTRL, .bit = 0x07, }, /* V18X */ { .address = 0x08, .reg = CHT_WC_VDDQ_CTRL, .bit = 0x01, }, /* VDDQ */ { .address = 0x0c, .reg = CHT_WC_V1P2A_CTRL, .bit = 0x07, }, /* V12A */ { .address = 0x10, .reg = CHT_WC_V1P2SX_CTRL, .bit = 0x07, }, /* V12X */ { .address = 0x14, .reg = CHT_WC_V2P8SX_CTRL, .bit = 0x07, }, /* V28X */ { .address = 0x18, .reg = CHT_WC_V3P3A_CTRL, .bit = 0x01, }, /* V33A */ { .address = 0x1c, .reg = CHT_WC_V3P3SD_CTRL, .bit = 0x07, }, /* V3SD */ { .address = 0x20, .reg = CHT_WC_VSDIO_CTRL, .bit = 0x07, }, /* VSD */ /* { .address = 0x24, .reg = ??, .bit = ??, }, ** VSW2 */ /* { .address = 0x28, .reg = ??, .bit = ??, }, ** VSW1 */ /* { .address = 0x2c, .reg = ??, .bit = ??, }, ** VUPY */ /* { .address = 0x30, .reg = ??, .bit = ??, }, ** VRSO */ { .address = 0x34, .reg = CHT_WC_VPROG1A_CTRL, .bit = 0x07, }, /* VP1A */ { .address = 0x38, .reg = CHT_WC_VPROG1B_CTRL, .bit = 0x07, }, /* VP1B */ { .address = 0x3c, .reg = CHT_WC_VPROG1F_CTRL, .bit = 0x07, }, /* VP1F */ { .address = 0x40, .reg = CHT_WC_VPROG2D_CTRL, .bit = 0x07, }, /* VP2D */ { .address = 0x44, .reg = CHT_WC_VPROG3A_CTRL, .bit = 0x07, }, /* VP3A */ { .address = 0x48, .reg = CHT_WC_VPROG3B_CTRL, .bit = 0x07, }, /* VP3B */ { .address = 0x4c, .reg = CHT_WC_VPROG4A_CTRL, .bit = 0x07, }, /* VP4A */ { .address = 0x50, .reg = CHT_WC_VPROG4B_CTRL, .bit = 0x07, }, /* VP4B */ { .address = 0x54, .reg = CHT_WC_VPROG4C_CTRL, .bit = 0x07, }, /* VP4C */ { .address = 0x58, .reg = CHT_WC_VPROG4D_CTRL, .bit = 0x07, }, /* VP4D */ { .address = 0x5c, .reg = CHT_WC_VPROG5A_CTRL, .bit = 0x07, }, /* VP5A */ { .address = 0x60, .reg = CHT_WC_VPROG5B_CTRL, .bit = 0x07, }, /* VP5B */ { .address = 0x64, .reg = CHT_WC_VPROG6A_CTRL, .bit = 0x07, }, /* VP6A */ { .address = 0x68, .reg = CHT_WC_VPROG6B_CTRL, .bit = 0x07, }, /* VP6B */ /* { .address = 0x6c, .reg = ??, .bit = ??, } ** VP7A */ }; static int intel_cht_wc_pmic_get_power(struct regmap *regmap, int reg, int bit, u64 *value) { int data; if (regmap_read(regmap, reg, &data)) return -EIO; *value = (data & bit) ? 1 : 0; return 0; } static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg, int bitmask, bool on) { return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0); } static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap, u16 i2c_client_address, u32 reg_address, u32 value, u32 mask) { u32 address; if (i2c_client_address > 0xff || reg_address > 0xff) { pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n", __func__, i2c_client_address, reg_address); return -ERANGE; } address = (i2c_client_address << 8) | reg_address; return regmap_update_bits(regmap, address, mask, value); } /* * The thermal table and ops are empty, we do not support the Thermal opregion * (DPTF) due to lacking documentation. */ static const struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = { .get_power = intel_cht_wc_pmic_get_power, .update_power = intel_cht_wc_pmic_update_power, .exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element, .lpat_raw_to_temp = acpi_lpat_raw_to_temp, .power_table = power_table, .power_table_count = ARRAY_SIZE(power_table), }; static int intel_cht_wc_pmic_opregion_probe(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); return intel_pmic_install_opregion_handler(&pdev->dev, ACPI_HANDLE(pdev->dev.parent), pmic->regmap, &intel_cht_wc_pmic_opregion_data); } static const struct platform_device_id cht_wc_opregion_id_table[] = { { .name = "cht_wcove_region" }, {}, }; static struct platform_driver intel_cht_wc_pmic_opregion_driver = { .probe = intel_cht_wc_pmic_opregion_probe, .driver = { .name = "cht_whiskey_cove_pmic", }, .id_table = cht_wc_opregion_id_table, }; builtin_platform_driver(intel_cht_wc_pmic_opregion_driver);
linux-master
drivers/acpi/pmic/intel_pmic_chtwc.c