python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: oslinuxtbl - Linux OSL for obtaining ACPI tables * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include "acpidump.h" #define _COMPONENT ACPI_OS_SERVICES ACPI_MODULE_NAME("oslinuxtbl") #ifndef PATH_MAX #define PATH_MAX 256 #endif /* List of information about obtained ACPI tables */ typedef struct osl_table_info { struct osl_table_info *next; u32 instance; char signature[ACPI_NAMESEG_SIZE]; } osl_table_info; /* Local prototypes */ static acpi_status osl_table_initialize(void); static acpi_status osl_table_name_from_file(char *filename, char *signature, u32 *instance); static acpi_status osl_add_table_to_list(char *signature, u32 instance); static acpi_status osl_read_table_from_file(char *filename, acpi_size file_offset, struct acpi_table_header **table); static acpi_status osl_map_table(acpi_size address, char *signature, struct acpi_table_header **table); static void osl_unmap_table(struct acpi_table_header *table); static acpi_physical_address osl_find_rsdp_via_efi_by_keyword(FILE * file, const char *keyword); static acpi_physical_address osl_find_rsdp_via_efi(void); static acpi_status osl_load_rsdp(void); static acpi_status osl_list_customized_tables(char *directory); static acpi_status osl_get_customized_table(char *pathname, char *signature, u32 instance, struct acpi_table_header **table, acpi_physical_address *address); static acpi_status osl_list_bios_tables(void); static acpi_status osl_get_bios_table(char *signature, u32 instance, struct acpi_table_header **table, acpi_physical_address *address); static acpi_status osl_get_last_status(acpi_status default_status); /* File locations */ #define DYNAMIC_TABLE_DIR "/sys/firmware/acpi/tables/dynamic" #define STATIC_TABLE_DIR "/sys/firmware/acpi/tables" #define EFI_SYSTAB "/sys/firmware/efi/systab" /* Should we get dynamically loaded SSDTs from DYNAMIC_TABLE_DIR? */ u8 gbl_dump_dynamic_tables = TRUE; /* Initialization flags */ u8 gbl_table_list_initialized = FALSE; /* Local copies of main ACPI tables */ struct acpi_table_rsdp gbl_rsdp; struct acpi_table_fadt *gbl_fadt = NULL; struct acpi_table_rsdt *gbl_rsdt = NULL; struct acpi_table_xsdt *gbl_xsdt = NULL; /* Table addresses */ acpi_physical_address gbl_fadt_address = 0; acpi_physical_address gbl_rsdp_address = 0; /* Revision of RSD PTR */ u8 gbl_revision = 0; struct osl_table_info *gbl_table_list_head = NULL; u32 gbl_table_count = 0; /****************************************************************************** * * FUNCTION: osl_get_last_status * * PARAMETERS: default_status - Default error status to return * * RETURN: Status; Converted from errno. * * DESCRIPTION: Get last errno and convert it to acpi_status. * *****************************************************************************/ static acpi_status osl_get_last_status(acpi_status default_status) { switch (errno) { case EACCES: case EPERM: return (AE_ACCESS); case ENOENT: return (AE_NOT_FOUND); case ENOMEM: return (AE_NO_MEMORY); default: return (default_status); } } /****************************************************************************** * * FUNCTION: acpi_os_get_table_by_address * * PARAMETERS: address - Physical address of the ACPI table * table - Where a pointer to the table is returned * * RETURN: Status; Table buffer is returned if AE_OK. * AE_NOT_FOUND: A valid table was not found at the address * * DESCRIPTION: Get an ACPI table via a physical memory address. * *****************************************************************************/ acpi_status acpi_os_get_table_by_address(acpi_physical_address address, struct acpi_table_header **table) { u32 table_length; struct acpi_table_header *mapped_table; struct acpi_table_header *local_table = NULL; acpi_status status = AE_OK; /* Get main ACPI tables from memory on first invocation of this function */ status = osl_table_initialize(); if (ACPI_FAILURE(status)) { return (status); } /* Map the table and validate it */ status = osl_map_table(address, NULL, &mapped_table); if (ACPI_FAILURE(status)) { return (status); } /* Copy table to local buffer and return it */ table_length = ap_get_table_length(mapped_table); if (table_length == 0) { status = AE_BAD_HEADER; goto exit; } local_table = calloc(1, table_length); if (!local_table) { status = AE_NO_MEMORY; goto exit; } memcpy(local_table, mapped_table, table_length); exit: osl_unmap_table(mapped_table); *table = local_table; return (status); } /****************************************************************************** * * FUNCTION: acpi_os_get_table_by_name * * PARAMETERS: signature - ACPI Signature for desired table. Must be * a null terminated 4-character string. * instance - Multiple table support for SSDT/UEFI (0...n) * Must be 0 for other tables. * table - Where a pointer to the table is returned * address - Where the table physical address is returned * * RETURN: Status; Table buffer and physical address returned if AE_OK. * AE_LIMIT: Instance is beyond valid limit * AE_NOT_FOUND: A table with the signature was not found * * NOTE: Assumes the input signature is uppercase. * *****************************************************************************/ acpi_status acpi_os_get_table_by_name(char *signature, u32 instance, struct acpi_table_header **table, acpi_physical_address *address) { acpi_status status; /* Get main ACPI tables from memory on first invocation of this function */ status = osl_table_initialize(); if (ACPI_FAILURE(status)) { return (status); } /* Not a main ACPI table, attempt to extract it from the RSDT/XSDT */ if (!gbl_dump_customized_tables) { /* Attempt to get the table from the memory */ status = osl_get_bios_table(signature, instance, table, address); } else { /* Attempt to get the table from the static directory */ status = osl_get_customized_table(STATIC_TABLE_DIR, signature, instance, table, address); } if (ACPI_FAILURE(status) && status == AE_LIMIT) { if (gbl_dump_dynamic_tables) { /* Attempt to get a dynamic table */ status = osl_get_customized_table(DYNAMIC_TABLE_DIR, signature, instance, table, address); } } return (status); } /****************************************************************************** * * FUNCTION: osl_add_table_to_list * * PARAMETERS: signature - Table signature * instance - Table instance * * RETURN: Status; Successfully added if AE_OK. * AE_NO_MEMORY: Memory allocation error * * DESCRIPTION: Insert a table structure into OSL table list. * *****************************************************************************/ static acpi_status osl_add_table_to_list(char *signature, u32 instance) { struct osl_table_info *new_info; struct osl_table_info *next; u32 next_instance = 0; u8 found = FALSE; new_info = calloc(1, sizeof(struct osl_table_info)); if (!new_info) { return (AE_NO_MEMORY); } ACPI_COPY_NAMESEG(new_info->signature, signature); if (!gbl_table_list_head) { gbl_table_list_head = new_info; } else { next = gbl_table_list_head; while (1) { if (ACPI_COMPARE_NAMESEG(next->signature, signature)) { if (next->instance == instance) { found = TRUE; } if (next->instance >= next_instance) { next_instance = next->instance + 1; } } if (!next->next) { break; } next = next->next; } next->next = new_info; } if (found) { if (instance) { fprintf(stderr, "%4.4s: Warning unmatched table instance %d, expected %d\n", signature, instance, next_instance); } instance = next_instance; } new_info->instance = instance; gbl_table_count++; return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_get_table_by_index * * PARAMETERS: index - Which table to get * table - Where a pointer to the table is returned * instance - Where a pointer to the table instance no. is * returned * address - Where the table physical address is returned * * RETURN: Status; Table buffer and physical address returned if AE_OK. * AE_LIMIT: Index is beyond valid limit * * DESCRIPTION: Get an ACPI table via an index value (0 through n). Returns * AE_LIMIT when an invalid index is reached. Index is not * necessarily an index into the RSDT/XSDT. * *****************************************************************************/ acpi_status acpi_os_get_table_by_index(u32 index, struct acpi_table_header **table, u32 *instance, acpi_physical_address *address) { struct osl_table_info *info; acpi_status status; u32 i; /* Get main ACPI tables from memory on first invocation of this function */ status = osl_table_initialize(); if (ACPI_FAILURE(status)) { return (status); } /* Validate Index */ if (index >= gbl_table_count) { return (AE_LIMIT); } /* Point to the table list entry specified by the Index argument */ info = gbl_table_list_head; for (i = 0; i < index; i++) { info = info->next; } /* Now we can just get the table via the signature */ status = acpi_os_get_table_by_name(info->signature, info->instance, table, address); if (ACPI_SUCCESS(status)) { *instance = info->instance; } return (status); } /****************************************************************************** * * FUNCTION: osl_find_rsdp_via_efi_by_keyword * * PARAMETERS: keyword - Character string indicating ACPI GUID version * in the EFI table * * RETURN: RSDP address if found * * DESCRIPTION: Find RSDP address via EFI using keyword indicating the ACPI * GUID version. * *****************************************************************************/ static acpi_physical_address osl_find_rsdp_via_efi_by_keyword(FILE * file, const char *keyword) { char buffer[80]; unsigned long long address = 0; char format[32]; snprintf(format, 32, "%s=%s", keyword, "%llx"); fseek(file, 0, SEEK_SET); while (fgets(buffer, 80, file)) { if (sscanf(buffer, format, &address) == 1) { break; } } return ((acpi_physical_address)(address)); } /****************************************************************************** * * FUNCTION: osl_find_rsdp_via_efi * * PARAMETERS: None * * RETURN: RSDP address if found * * DESCRIPTION: Find RSDP address via EFI. * *****************************************************************************/ static acpi_physical_address osl_find_rsdp_via_efi(void) { FILE *file; acpi_physical_address address = 0; file = fopen(EFI_SYSTAB, "r"); if (file) { address = osl_find_rsdp_via_efi_by_keyword(file, "ACPI20"); if (!address) { address = osl_find_rsdp_via_efi_by_keyword(file, "ACPI"); } fclose(file); } return (address); } /****************************************************************************** * * FUNCTION: osl_load_rsdp * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Scan and load RSDP. * *****************************************************************************/ static acpi_status osl_load_rsdp(void) { struct acpi_table_header *mapped_table; u8 *rsdp_address; acpi_physical_address rsdp_base; acpi_size rsdp_size; /* Get RSDP from memory */ rsdp_size = sizeof(struct acpi_table_rsdp); if (gbl_rsdp_base) { rsdp_base = gbl_rsdp_base; } else { rsdp_base = osl_find_rsdp_via_efi(); } if (!rsdp_base) { rsdp_base = ACPI_HI_RSDP_WINDOW_BASE; rsdp_size = ACPI_HI_RSDP_WINDOW_SIZE; } rsdp_address = acpi_os_map_memory(rsdp_base, rsdp_size); if (!rsdp_address) { return (osl_get_last_status(AE_BAD_ADDRESS)); } /* Search low memory for the RSDP */ mapped_table = ACPI_CAST_PTR(struct acpi_table_header, acpi_tb_scan_memory_for_rsdp(rsdp_address, rsdp_size)); if (!mapped_table) { acpi_os_unmap_memory(rsdp_address, rsdp_size); return (AE_NOT_FOUND); } gbl_rsdp_address = rsdp_base + (ACPI_CAST8(mapped_table) - rsdp_address); memcpy(&gbl_rsdp, mapped_table, sizeof(struct acpi_table_rsdp)); acpi_os_unmap_memory(rsdp_address, rsdp_size); return (AE_OK); } /****************************************************************************** * * FUNCTION: osl_can_use_xsdt * * PARAMETERS: None * * RETURN: TRUE if XSDT is allowed to be used. * * DESCRIPTION: This function collects logic that can be used to determine if * XSDT should be used instead of RSDT. * *****************************************************************************/ static u8 osl_can_use_xsdt(void) { if (gbl_revision && !acpi_gbl_do_not_use_xsdt) { return (TRUE); } else { return (FALSE); } } /****************************************************************************** * * FUNCTION: osl_table_initialize * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize ACPI table data. Get and store main ACPI tables to * local variables. Main ACPI tables include RSDT, FADT, RSDT, * and/or XSDT. * *****************************************************************************/ static acpi_status osl_table_initialize(void) { acpi_status status; acpi_physical_address address; if (gbl_table_list_initialized) { return (AE_OK); } if (!gbl_dump_customized_tables) { /* Get RSDP from memory */ status = osl_load_rsdp(); if (ACPI_FAILURE(status)) { return (status); } /* Get XSDT from memory */ if (gbl_rsdp.revision && !gbl_do_not_dump_xsdt) { if (gbl_xsdt) { free(gbl_xsdt); gbl_xsdt = NULL; } gbl_revision = 2; status = osl_get_bios_table(ACPI_SIG_XSDT, 0, ACPI_CAST_PTR(struct acpi_table_header *, &gbl_xsdt), &address); if (ACPI_FAILURE(status)) { return (status); } } /* Get RSDT from memory */ if (gbl_rsdp.rsdt_physical_address) { if (gbl_rsdt) { free(gbl_rsdt); gbl_rsdt = NULL; } status = osl_get_bios_table(ACPI_SIG_RSDT, 0, ACPI_CAST_PTR(struct acpi_table_header *, &gbl_rsdt), &address); if (ACPI_FAILURE(status)) { return (status); } } /* Get FADT from memory */ if (gbl_fadt) { free(gbl_fadt); gbl_fadt = NULL; } status = osl_get_bios_table(ACPI_SIG_FADT, 0, ACPI_CAST_PTR(struct acpi_table_header *, &gbl_fadt), &gbl_fadt_address); if (ACPI_FAILURE(status)) { return (status); } /* Add mandatory tables to global table list first */ status = osl_add_table_to_list(ACPI_RSDP_NAME, 0); if (ACPI_FAILURE(status)) { return (status); } status = osl_add_table_to_list(ACPI_SIG_RSDT, 0); if (ACPI_FAILURE(status)) { return (status); } if (gbl_revision == 2) { status = osl_add_table_to_list(ACPI_SIG_XSDT, 0); if (ACPI_FAILURE(status)) { return (status); } } status = osl_add_table_to_list(ACPI_SIG_DSDT, 0); if (ACPI_FAILURE(status)) { return (status); } status = osl_add_table_to_list(ACPI_SIG_FACS, 0); if (ACPI_FAILURE(status)) { return (status); } /* Add all tables found in the memory */ status = osl_list_bios_tables(); if (ACPI_FAILURE(status)) { return (status); } } else { /* Add all tables found in the static directory */ status = osl_list_customized_tables(STATIC_TABLE_DIR); if (ACPI_FAILURE(status)) { return (status); } } if (gbl_dump_dynamic_tables) { /* Add all dynamically loaded tables in the dynamic directory */ status = osl_list_customized_tables(DYNAMIC_TABLE_DIR); if (ACPI_FAILURE(status)) { return (status); } } gbl_table_list_initialized = TRUE; return (AE_OK); } /****************************************************************************** * * FUNCTION: osl_list_bios_tables * * PARAMETERS: None * * RETURN: Status; Table list is initialized if AE_OK. * * DESCRIPTION: Add ACPI tables to the table list from memory. * * NOTE: This works on Linux as table customization does not modify the * addresses stored in RSDP/RSDT/XSDT/FADT. * *****************************************************************************/ static acpi_status osl_list_bios_tables(void) { struct acpi_table_header *mapped_table = NULL; u8 *table_data; u8 number_of_tables; u8 item_size; acpi_physical_address table_address = 0; acpi_status status = AE_OK; u32 i; if (osl_can_use_xsdt()) { item_size = sizeof(u64); table_data = ACPI_CAST8(gbl_xsdt) + sizeof(struct acpi_table_header); number_of_tables = (u8)((gbl_xsdt->header.length - sizeof(struct acpi_table_header)) / item_size); } else { /* Use RSDT if XSDT is not available */ item_size = sizeof(u32); table_data = ACPI_CAST8(gbl_rsdt) + sizeof(struct acpi_table_header); number_of_tables = (u8)((gbl_rsdt->header.length - sizeof(struct acpi_table_header)) / item_size); } /* Search RSDT/XSDT for the requested table */ for (i = 0; i < number_of_tables; ++i, table_data += item_size) { if (osl_can_use_xsdt()) { table_address = (acpi_physical_address)(*ACPI_CAST64(table_data)); } else { table_address = (acpi_physical_address)(*ACPI_CAST32(table_data)); } /* Skip NULL entries in RSDT/XSDT */ if (table_address == 0) { continue; } status = osl_map_table(table_address, NULL, &mapped_table); if (ACPI_FAILURE(status)) { return (status); } osl_add_table_to_list(mapped_table->signature, 0); osl_unmap_table(mapped_table); } return (AE_OK); } /****************************************************************************** * * FUNCTION: osl_get_bios_table * * PARAMETERS: signature - ACPI Signature for common table. Must be * a null terminated 4-character string. * instance - Multiple table support for SSDT/UEFI (0...n) * Must be 0 for other tables. * table - Where a pointer to the table is returned * address - Where the table physical address is returned * * RETURN: Status; Table buffer and physical address returned if AE_OK. * AE_LIMIT: Instance is beyond valid limit * AE_NOT_FOUND: A table with the signature was not found * * DESCRIPTION: Get a BIOS provided ACPI table * * NOTE: Assumes the input signature is uppercase. * *****************************************************************************/ static acpi_status osl_get_bios_table(char *signature, u32 instance, struct acpi_table_header **table, acpi_physical_address *address) { struct acpi_table_header *local_table = NULL; struct acpi_table_header *mapped_table = NULL; u8 *table_data; u8 number_of_tables; u8 item_size; u32 current_instance = 0; acpi_physical_address table_address; acpi_physical_address first_table_address = 0; u32 table_length = 0; acpi_status status = AE_OK; u32 i; /* Handle special tables whose addresses are not in RSDT/XSDT */ if (ACPI_COMPARE_NAMESEG(signature, ACPI_RSDP_NAME) || ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_RSDT) || ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_XSDT) || ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_DSDT) || ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_FACS)) { find_next_instance: table_address = 0; /* * Get the appropriate address, either 32-bit or 64-bit. Be very * careful about the FADT length and validate table addresses. * Note: The 64-bit addresses have priority. */ if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_DSDT)) { if (current_instance < 2) { if ((gbl_fadt->header.length >= MIN_FADT_FOR_XDSDT) && gbl_fadt->Xdsdt && current_instance == 0) { table_address = (acpi_physical_address)gbl_fadt-> Xdsdt; } else if ((gbl_fadt->header.length >= MIN_FADT_FOR_DSDT) && gbl_fadt->dsdt != first_table_address) { table_address = (acpi_physical_address)gbl_fadt-> dsdt; } } } else if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_FACS)) { if (current_instance < 2) { if ((gbl_fadt->header.length >= MIN_FADT_FOR_XFACS) && gbl_fadt->Xfacs && current_instance == 0) { table_address = (acpi_physical_address)gbl_fadt-> Xfacs; } else if ((gbl_fadt->header.length >= MIN_FADT_FOR_FACS) && gbl_fadt->facs != first_table_address) { table_address = (acpi_physical_address)gbl_fadt-> facs; } } } else if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_XSDT)) { if (!gbl_revision) { return (AE_BAD_SIGNATURE); } if (current_instance == 0) { table_address = (acpi_physical_address)gbl_rsdp. xsdt_physical_address; } } else if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_RSDT)) { if (current_instance == 0) { table_address = (acpi_physical_address)gbl_rsdp. rsdt_physical_address; } } else { if (current_instance == 0) { table_address = (acpi_physical_address)gbl_rsdp_address; signature = ACPI_SIG_RSDP; } } if (table_address == 0) { goto exit_find_table; } /* Now we can get the requested special table */ status = osl_map_table(table_address, signature, &mapped_table); if (ACPI_FAILURE(status)) { return (status); } table_length = ap_get_table_length(mapped_table); if (first_table_address == 0) { first_table_address = table_address; } /* Match table instance */ if (current_instance != instance) { osl_unmap_table(mapped_table); mapped_table = NULL; current_instance++; goto find_next_instance; } } else { /* Case for a normal ACPI table */ if (osl_can_use_xsdt()) { item_size = sizeof(u64); table_data = ACPI_CAST8(gbl_xsdt) + sizeof(struct acpi_table_header); number_of_tables = (u8)((gbl_xsdt->header.length - sizeof(struct acpi_table_header)) / item_size); } else { /* Use RSDT if XSDT is not available */ item_size = sizeof(u32); table_data = ACPI_CAST8(gbl_rsdt) + sizeof(struct acpi_table_header); number_of_tables = (u8)((gbl_rsdt->header.length - sizeof(struct acpi_table_header)) / item_size); } /* Search RSDT/XSDT for the requested table */ for (i = 0; i < number_of_tables; ++i, table_data += item_size) { if (osl_can_use_xsdt()) { table_address = (acpi_physical_address)(*ACPI_CAST64 (table_data)); } else { table_address = (acpi_physical_address)(*ACPI_CAST32 (table_data)); } /* Skip NULL entries in RSDT/XSDT */ if (table_address == 0) { continue; } status = osl_map_table(table_address, NULL, &mapped_table); if (ACPI_FAILURE(status)) { return (status); } table_length = mapped_table->length; /* Does this table match the requested signature? */ if (!ACPI_COMPARE_NAMESEG (mapped_table->signature, signature)) { osl_unmap_table(mapped_table); mapped_table = NULL; continue; } /* Match table instance (for SSDT/UEFI tables) */ if (current_instance != instance) { osl_unmap_table(mapped_table); mapped_table = NULL; current_instance++; continue; } break; } } exit_find_table: if (!mapped_table) { return (AE_LIMIT); } if (table_length == 0) { status = AE_BAD_HEADER; goto exit; } /* Copy table to local buffer and return it */ local_table = calloc(1, table_length); if (!local_table) { status = AE_NO_MEMORY; goto exit; } memcpy(local_table, mapped_table, table_length); *address = table_address; *table = local_table; exit: osl_unmap_table(mapped_table); return (status); } /****************************************************************************** * * FUNCTION: osl_list_customized_tables * * PARAMETERS: directory - Directory that contains the tables * * RETURN: Status; Table list is initialized if AE_OK. * * DESCRIPTION: Add ACPI tables to the table list from a directory. * *****************************************************************************/ static acpi_status osl_list_customized_tables(char *directory) { void *table_dir; u32 instance; char temp_name[ACPI_NAMESEG_SIZE]; char *filename; acpi_status status = AE_OK; /* Open the requested directory */ table_dir = acpi_os_open_directory(directory, "*", REQUEST_FILE_ONLY); if (!table_dir) { return (osl_get_last_status(AE_NOT_FOUND)); } /* Examine all entries in this directory */ while ((filename = acpi_os_get_next_filename(table_dir))) { /* Extract table name and instance number */ status = osl_table_name_from_file(filename, temp_name, &instance); /* Ignore meaningless files */ if (ACPI_FAILURE(status)) { continue; } /* Add new info node to global table list */ status = osl_add_table_to_list(temp_name, instance); if (ACPI_FAILURE(status)) { break; } } acpi_os_close_directory(table_dir); return (status); } /****************************************************************************** * * FUNCTION: osl_map_table * * PARAMETERS: address - Address of the table in memory * signature - Optional ACPI Signature for desired table. * Null terminated 4-character string. * table - Where a pointer to the mapped table is * returned * * RETURN: Status; Mapped table is returned if AE_OK. * AE_NOT_FOUND: A valid table was not found at the address * * DESCRIPTION: Map entire ACPI table into caller's address space. * *****************************************************************************/ static acpi_status osl_map_table(acpi_size address, char *signature, struct acpi_table_header **table) { struct acpi_table_header *mapped_table; u32 length; if (!address) { return (AE_BAD_ADDRESS); } /* * Map the header so we can get the table length. * Use sizeof (struct acpi_table_header) as: * 1. it is bigger than 24 to include RSDP->Length * 2. it is smaller than sizeof (struct acpi_table_rsdp) */ mapped_table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); if (!mapped_table) { fprintf(stderr, "Could not map table header at 0x%8.8X%8.8X\n", ACPI_FORMAT_UINT64(address)); return (osl_get_last_status(AE_BAD_ADDRESS)); } /* If specified, signature must match */ if (signature) { if (ACPI_VALIDATE_RSDP_SIG(signature)) { if (!ACPI_VALIDATE_RSDP_SIG(mapped_table->signature)) { acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header)); return (AE_BAD_SIGNATURE); } } else if (!ACPI_COMPARE_NAMESEG (signature, mapped_table->signature)) { acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header)); return (AE_BAD_SIGNATURE); } } /* Map the entire table */ length = ap_get_table_length(mapped_table); acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header)); if (length == 0) { return (AE_BAD_HEADER); } mapped_table = acpi_os_map_memory(address, length); if (!mapped_table) { fprintf(stderr, "Could not map table at 0x%8.8X%8.8X length %8.8X\n", ACPI_FORMAT_UINT64(address), length); return (osl_get_last_status(AE_INVALID_TABLE_LENGTH)); } (void)ap_is_valid_checksum(mapped_table); *table = mapped_table; return (AE_OK); } /****************************************************************************** * * FUNCTION: osl_unmap_table * * PARAMETERS: table - A pointer to the mapped table * * RETURN: None * * DESCRIPTION: Unmap entire ACPI table. * *****************************************************************************/ static void osl_unmap_table(struct acpi_table_header *table) { if (table) { acpi_os_unmap_memory(table, ap_get_table_length(table)); } } /****************************************************************************** * * FUNCTION: osl_table_name_from_file * * PARAMETERS: filename - File that contains the desired table * signature - Pointer to 4-character buffer to store * extracted table signature. * instance - Pointer to integer to store extracted * table instance number. * * RETURN: Status; Table name is extracted if AE_OK. * * DESCRIPTION: Extract table signature and instance number from a table file * name. * *****************************************************************************/ static acpi_status osl_table_name_from_file(char *filename, char *signature, u32 *instance) { /* Ignore meaningless files */ if (strlen(filename) < ACPI_NAMESEG_SIZE) { return (AE_BAD_SIGNATURE); } /* Extract instance number */ if (isdigit((int)filename[ACPI_NAMESEG_SIZE])) { sscanf(&filename[ACPI_NAMESEG_SIZE], "%u", instance); } else if (strlen(filename) != ACPI_NAMESEG_SIZE) { return (AE_BAD_SIGNATURE); } else { *instance = 0; } /* Extract signature */ ACPI_COPY_NAMESEG(signature, filename); return (AE_OK); } /****************************************************************************** * * FUNCTION: osl_read_table_from_file * * PARAMETERS: filename - File that contains the desired table * file_offset - Offset of the table in file * table - Where a pointer to the table is returned * * RETURN: Status; Table buffer is returned if AE_OK. * * DESCRIPTION: Read a ACPI table from a file. * *****************************************************************************/ static acpi_status osl_read_table_from_file(char *filename, acpi_size file_offset, struct acpi_table_header **table) { FILE *table_file; struct acpi_table_header header; struct acpi_table_header *local_table = NULL; u32 table_length; s32 count; acpi_status status = AE_OK; /* Open the file */ table_file = fopen(filename, "rb"); if (table_file == NULL) { fprintf(stderr, "Could not open table file: %s\n", filename); return (osl_get_last_status(AE_NOT_FOUND)); } fseek(table_file, file_offset, SEEK_SET); /* Read the Table header to get the table length */ count = fread(&header, 1, sizeof(struct acpi_table_header), table_file); if (count != sizeof(struct acpi_table_header)) { fprintf(stderr, "Could not read table header: %s\n", filename); status = AE_BAD_HEADER; goto exit; } #ifdef ACPI_OBSOLETE_FUNCTIONS /* If signature is specified, it must match the table */ if (signature) { if (ACPI_VALIDATE_RSDP_SIG(signature)) { if (!ACPI_VALIDATE_RSDP_SIG(header.signature)) { fprintf(stderr, "Incorrect RSDP signature: found %8.8s\n", header.signature); status = AE_BAD_SIGNATURE; goto exit; } } else if (!ACPI_COMPARE_NAMESEG(signature, header.signature)) { fprintf(stderr, "Incorrect signature: Expecting %4.4s, found %4.4s\n", signature, header.signature); status = AE_BAD_SIGNATURE; goto exit; } } #endif table_length = ap_get_table_length(&header); if (table_length == 0) { status = AE_BAD_HEADER; goto exit; } /* Read the entire table into a local buffer */ local_table = calloc(1, table_length); if (!local_table) { fprintf(stderr, "%4.4s: Could not allocate buffer for table of length %X\n", header.signature, table_length); status = AE_NO_MEMORY; goto exit; } fseek(table_file, file_offset, SEEK_SET); count = fread(local_table, 1, table_length, table_file); if (count != table_length) { fprintf(stderr, "%4.4s: Could not read table content\n", header.signature); status = AE_INVALID_TABLE_LENGTH; goto exit; } /* Validate checksum */ (void)ap_is_valid_checksum(local_table); exit: fclose(table_file); *table = local_table; return (status); } /****************************************************************************** * * FUNCTION: osl_get_customized_table * * PARAMETERS: pathname - Directory to find Linux customized table * signature - ACPI Signature for desired table. Must be * a null terminated 4-character string. * instance - Multiple table support for SSDT/UEFI (0...n) * Must be 0 for other tables. * table - Where a pointer to the table is returned * address - Where the table physical address is returned * * RETURN: Status; Table buffer is returned if AE_OK. * AE_LIMIT: Instance is beyond valid limit * AE_NOT_FOUND: A table with the signature was not found * * DESCRIPTION: Get an OS customized table. * *****************************************************************************/ static acpi_status osl_get_customized_table(char *pathname, char *signature, u32 instance, struct acpi_table_header **table, acpi_physical_address *address) { void *table_dir; u32 current_instance = 0; char temp_name[ACPI_NAMESEG_SIZE]; char table_filename[PATH_MAX]; char *filename; acpi_status status; /* Open the directory for customized tables */ table_dir = acpi_os_open_directory(pathname, "*", REQUEST_FILE_ONLY); if (!table_dir) { return (osl_get_last_status(AE_NOT_FOUND)); } /* Attempt to find the table in the directory */ while ((filename = acpi_os_get_next_filename(table_dir))) { /* Ignore meaningless files */ if (!ACPI_COMPARE_NAMESEG(filename, signature)) { continue; } /* Extract table name and instance number */ status = osl_table_name_from_file(filename, temp_name, &current_instance); /* Ignore meaningless files */ if (ACPI_FAILURE(status) || current_instance != instance) { continue; } /* Create the table pathname */ if (instance != 0) { sprintf(table_filename, "%s/%4.4s%d", pathname, temp_name, instance); } else { sprintf(table_filename, "%s/%4.4s", pathname, temp_name); } break; } acpi_os_close_directory(table_dir); if (!filename) { return (AE_LIMIT); } /* There is no physical address saved for customized tables, use zero */ *address = 0; status = osl_read_table_from_file(table_filename, 0, table); return (status); }
linux-master
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: osunixxf - UNIX OSL interfaces * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ /* * These interfaces are required in order to compile the ASL compiler and the * various ACPICA tools under Linux or other Unix-like system. */ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acparser.h" #include "acdebug.h" #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <unistd.h> #include <sys/time.h> #include <semaphore.h> #include <pthread.h> #include <errno.h> #define _COMPONENT ACPI_OS_SERVICES ACPI_MODULE_NAME("osunixxf") /* Upcalls to acpi_exec */ void ae_table_override(struct acpi_table_header *existing_table, struct acpi_table_header **new_table); typedef void *(*PTHREAD_CALLBACK) (void *); /* Buffer used by acpi_os_vprintf */ #define ACPI_VPRINTF_BUFFER_SIZE 512 #define _ASCII_NEWLINE '\n' /* Terminal support for acpi_exec only */ #ifdef ACPI_EXEC_APP #include <termios.h> struct termios original_term_attributes; int term_attributes_were_set = 0; acpi_status acpi_ut_read_line(char *buffer, u32 buffer_length, u32 *bytes_read); static void os_enter_line_edit_mode(void); static void os_exit_line_edit_mode(void); /****************************************************************************** * * FUNCTION: os_enter_line_edit_mode, os_exit_line_edit_mode * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Enter/Exit the raw character input mode for the terminal. * * Interactive line-editing support for the AML debugger. Used with the * common/acgetline module. * * readline() is not used because of non-portability. It is not available * on all systems, and if it is, often the package must be manually installed. * * Therefore, we use the POSIX tcgetattr/tcsetattr and do the minimal line * editing that we need in acpi_os_get_line. * * If the POSIX tcgetattr/tcsetattr interfaces are unavailable, these * calls will also work: * For os_enter_line_edit_mode: system ("stty cbreak -echo") * For os_exit_line_edit_mode: system ("stty cooked echo") * *****************************************************************************/ static void os_enter_line_edit_mode(void) { struct termios local_term_attributes; term_attributes_were_set = 0; /* STDIN must be a terminal */ if (!isatty(STDIN_FILENO)) { return; } /* Get and keep the original attributes */ if (tcgetattr(STDIN_FILENO, &original_term_attributes)) { fprintf(stderr, "Could not get terminal attributes!\n"); return; } /* Set the new attributes to enable raw character input */ memcpy(&local_term_attributes, &original_term_attributes, sizeof(struct termios)); local_term_attributes.c_lflag &= ~(ICANON | ECHO); local_term_attributes.c_cc[VMIN] = 1; local_term_attributes.c_cc[VTIME] = 0; if (tcsetattr(STDIN_FILENO, TCSANOW, &local_term_attributes)) { fprintf(stderr, "Could not set terminal attributes!\n"); return; } term_attributes_were_set = 1; } static void os_exit_line_edit_mode(void) { if (!term_attributes_were_set) { return; } /* Set terminal attributes back to the original values */ if (tcsetattr(STDIN_FILENO, TCSANOW, &original_term_attributes)) { fprintf(stderr, "Could not restore terminal attributes!\n"); } } #else /* These functions are not needed for other ACPICA utilities */ #define os_enter_line_edit_mode() #define os_exit_line_edit_mode() #endif /****************************************************************************** * * FUNCTION: acpi_os_initialize, acpi_os_terminate * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize and terminate this module. * *****************************************************************************/ acpi_status acpi_os_initialize(void) { acpi_status status; acpi_gbl_output_file = stdout; os_enter_line_edit_mode(); status = acpi_os_create_lock(&acpi_gbl_print_lock); if (ACPI_FAILURE(status)) { return (status); } return (AE_OK); } acpi_status acpi_os_terminate(void) { os_exit_line_edit_mode(); return (AE_OK); } #ifndef ACPI_USE_NATIVE_RSDP_POINTER /****************************************************************************** * * FUNCTION: acpi_os_get_root_pointer * * PARAMETERS: None * * RETURN: RSDP physical address * * DESCRIPTION: Gets the ACPI root pointer (RSDP) * *****************************************************************************/ acpi_physical_address acpi_os_get_root_pointer(void) { return (0); } #endif /****************************************************************************** * * FUNCTION: acpi_os_predefined_override * * PARAMETERS: init_val - Initial value of the predefined object * new_val - The new value for the object * * RETURN: Status, pointer to value. Null pointer returned if not * overriding. * * DESCRIPTION: Allow the OS to override predefined names * *****************************************************************************/ acpi_status acpi_os_predefined_override(const struct acpi_predefined_names *init_val, acpi_string *new_val) { if (!init_val || !new_val) { return (AE_BAD_PARAMETER); } *new_val = NULL; return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_table_override * * PARAMETERS: existing_table - Header of current table (probably * firmware) * new_table - Where an entire new table is returned. * * RETURN: Status, pointer to new table. Null pointer returned if no * table is available to override * * DESCRIPTION: Return a different version of a table if one is available * *****************************************************************************/ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table, struct acpi_table_header **new_table) { if (!existing_table || !new_table) { return (AE_BAD_PARAMETER); } *new_table = NULL; #ifdef ACPI_EXEC_APP ae_table_override(existing_table, new_table); return (AE_OK); #else return (AE_NO_ACPI_TABLES); #endif } /****************************************************************************** * * FUNCTION: acpi_os_physical_table_override * * PARAMETERS: existing_table - Header of current table (probably firmware) * new_address - Where new table address is returned * (Physical address) * new_table_length - Where new table length is returned * * RETURN: Status, address/length of new table. Null pointer returned * if no table is available to override. * * DESCRIPTION: Returns AE_SUPPORT, function not used in user space. * *****************************************************************************/ acpi_status acpi_os_physical_table_override(struct acpi_table_header *existing_table, acpi_physical_address *new_address, u32 *new_table_length) { return (AE_SUPPORT); } /****************************************************************************** * * FUNCTION: acpi_os_enter_sleep * * PARAMETERS: sleep_state - Which sleep state to enter * rega_value - Register A value * regb_value - Register B value * * RETURN: Status * * DESCRIPTION: A hook before writing sleep registers to enter the sleep * state. Return AE_CTRL_TERMINATE to skip further sleep register * writes. * *****************************************************************************/ acpi_status acpi_os_enter_sleep(u8 sleep_state, u32 rega_value, u32 regb_value) { return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_redirect_output * * PARAMETERS: destination - An open file handle/pointer * * RETURN: None * * DESCRIPTION: Causes redirect of acpi_os_printf and acpi_os_vprintf * *****************************************************************************/ void acpi_os_redirect_output(void *destination) { acpi_gbl_output_file = destination; } /****************************************************************************** * * FUNCTION: acpi_os_printf * * PARAMETERS: fmt, ... - Standard printf format * * RETURN: None * * DESCRIPTION: Formatted output. Note: very similar to acpi_os_vprintf * (performance), changes should be tracked in both functions. * *****************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_os_printf(const char *fmt, ...) { va_list args; u8 flags; flags = acpi_gbl_db_output_flags; if (flags & ACPI_DB_REDIRECTABLE_OUTPUT) { /* Output is directable to either a file (if open) or the console */ if (acpi_gbl_debug_file) { /* Output file is open, send the output there */ va_start(args, fmt); vfprintf(acpi_gbl_debug_file, fmt, args); va_end(args); } else { /* No redirection, send output to console (once only!) */ flags |= ACPI_DB_CONSOLE_OUTPUT; } } if (flags & ACPI_DB_CONSOLE_OUTPUT) { va_start(args, fmt); vfprintf(acpi_gbl_output_file, fmt, args); va_end(args); } } /****************************************************************************** * * FUNCTION: acpi_os_vprintf * * PARAMETERS: fmt - Standard printf format * args - Argument list * * RETURN: None * * DESCRIPTION: Formatted output with argument list pointer. Note: very * similar to acpi_os_printf, changes should be tracked in both * functions. * *****************************************************************************/ void acpi_os_vprintf(const char *fmt, va_list args) { u8 flags; char buffer[ACPI_VPRINTF_BUFFER_SIZE]; /* * We build the output string in a local buffer because we may be * outputting the buffer twice. Using vfprintf is problematic because * some implementations modify the args pointer/structure during * execution. Thus, we use the local buffer for portability. * * Note: Since this module is intended for use by the various ACPICA * utilities/applications, we can safely declare the buffer on the stack. * Also, This function is used for relatively small error messages only. */ vsnprintf(buffer, ACPI_VPRINTF_BUFFER_SIZE, fmt, args); flags = acpi_gbl_db_output_flags; if (flags & ACPI_DB_REDIRECTABLE_OUTPUT) { /* Output is directable to either a file (if open) or the console */ if (acpi_gbl_debug_file) { /* Output file is open, send the output there */ fputs(buffer, acpi_gbl_debug_file); } else { /* No redirection, send output to console (once only!) */ flags |= ACPI_DB_CONSOLE_OUTPUT; } } if (flags & ACPI_DB_CONSOLE_OUTPUT) { fputs(buffer, acpi_gbl_output_file); } } #ifndef ACPI_EXEC_APP /****************************************************************************** * * FUNCTION: acpi_os_get_line * * PARAMETERS: buffer - Where to return the command line * buffer_length - Maximum length of Buffer * bytes_read - Where the actual byte count is returned * * RETURN: Status and actual bytes read * * DESCRIPTION: Get the next input line from the terminal. NOTE: For the * acpi_exec utility, we use the acgetline module instead to * provide line-editing and history support. * *****************************************************************************/ acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) { int input_char; u32 end_of_line; /* Standard acpi_os_get_line for all utilities except acpi_exec */ for (end_of_line = 0;; end_of_line++) { if (end_of_line >= buffer_length) { return (AE_BUFFER_OVERFLOW); } if ((input_char = getchar()) == EOF) { return (AE_ERROR); } if (!input_char || input_char == _ASCII_NEWLINE) { break; } buffer[end_of_line] = (char)input_char; } /* Null terminate the buffer */ buffer[end_of_line] = 0; /* Return the number of bytes in the string */ if (bytes_read) { *bytes_read = end_of_line; } return (AE_OK); } #endif #ifndef ACPI_USE_NATIVE_MEMORY_MAPPING /****************************************************************************** * * FUNCTION: acpi_os_map_memory * * PARAMETERS: where - Physical address of memory to be mapped * length - How much memory to map * * RETURN: Pointer to mapped memory. Null on error. * * DESCRIPTION: Map physical memory into caller's address space * *****************************************************************************/ void *acpi_os_map_memory(acpi_physical_address where, acpi_size length) { return (ACPI_TO_POINTER((acpi_size)where)); } /****************************************************************************** * * FUNCTION: acpi_os_unmap_memory * * PARAMETERS: where - Logical address of memory to be unmapped * length - How much memory to unmap * * RETURN: None. * * DESCRIPTION: Delete a previously created mapping. Where and Length must * correspond to a previous mapping exactly. * *****************************************************************************/ void acpi_os_unmap_memory(void *where, acpi_size length) { return; } #endif /****************************************************************************** * * FUNCTION: acpi_os_allocate * * PARAMETERS: size - Amount to allocate, in bytes * * RETURN: Pointer to the new allocation. Null on error. * * DESCRIPTION: Allocate memory. Algorithm is dependent on the OS. * *****************************************************************************/ void *acpi_os_allocate(acpi_size size) { void *mem; mem = (void *)malloc((size_t) size); return (mem); } #ifdef USE_NATIVE_ALLOCATE_ZEROED /****************************************************************************** * * FUNCTION: acpi_os_allocate_zeroed * * PARAMETERS: size - Amount to allocate, in bytes * * RETURN: Pointer to the new allocation. Null on error. * * DESCRIPTION: Allocate and zero memory. Algorithm is dependent on the OS. * *****************************************************************************/ void *acpi_os_allocate_zeroed(acpi_size size) { void *mem; mem = (void *)calloc(1, (size_t) size); return (mem); } #endif /****************************************************************************** * * FUNCTION: acpi_os_free * * PARAMETERS: mem - Pointer to previously allocated memory * * RETURN: None. * * DESCRIPTION: Free memory allocated via acpi_os_allocate * *****************************************************************************/ void acpi_os_free(void *mem) { free(mem); } #ifdef ACPI_SINGLE_THREADED /****************************************************************************** * * FUNCTION: Semaphore stub functions * * DESCRIPTION: Stub functions used for single-thread applications that do * not require semaphore synchronization. Full implementations * of these functions appear after the stubs. * *****************************************************************************/ acpi_status acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle *out_handle) { *out_handle = (acpi_handle)1; return (AE_OK); } acpi_status acpi_os_delete_semaphore(acpi_handle handle) { return (AE_OK); } acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) { return (AE_OK); } acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) { return (AE_OK); } #else /****************************************************************************** * * FUNCTION: acpi_os_create_semaphore * * PARAMETERS: initial_units - Units to be assigned to the new semaphore * out_handle - Where a handle will be returned * * RETURN: Status * * DESCRIPTION: Create an OS semaphore * *****************************************************************************/ acpi_status acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle *out_handle) { sem_t *sem; if (!out_handle) { return (AE_BAD_PARAMETER); } #ifdef __APPLE__ { static int semaphore_count = 0; char semaphore_name[32]; snprintf(semaphore_name, sizeof(semaphore_name), "acpi_sem_%d", semaphore_count++); printf("%s\n", semaphore_name); sem = sem_open(semaphore_name, O_EXCL | O_CREAT, 0755, initial_units); if (!sem) { return (AE_NO_MEMORY); } sem_unlink(semaphore_name); /* This just deletes the name */ } #else sem = acpi_os_allocate(sizeof(sem_t)); if (!sem) { return (AE_NO_MEMORY); } if (sem_init(sem, 0, initial_units) == -1) { acpi_os_free(sem); return (AE_BAD_PARAMETER); } #endif *out_handle = (acpi_handle)sem; return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_delete_semaphore * * PARAMETERS: handle - Handle returned by acpi_os_create_semaphore * * RETURN: Status * * DESCRIPTION: Delete an OS semaphore * *****************************************************************************/ acpi_status acpi_os_delete_semaphore(acpi_handle handle) { sem_t *sem = (sem_t *) handle; if (!sem) { return (AE_BAD_PARAMETER); } #ifdef __APPLE__ if (sem_close(sem) == -1) { return (AE_BAD_PARAMETER); } #else if (sem_destroy(sem) == -1) { return (AE_BAD_PARAMETER); } #endif return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_wait_semaphore * * PARAMETERS: handle - Handle returned by acpi_os_create_semaphore * units - How many units to wait for * msec_timeout - How long to wait (milliseconds) * * RETURN: Status * * DESCRIPTION: Wait for units * *****************************************************************************/ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 msec_timeout) { acpi_status status = AE_OK; sem_t *sem = (sem_t *) handle; int ret_val; #ifndef ACPI_USE_ALTERNATE_TIMEOUT struct timespec time; #endif if (!sem) { return (AE_BAD_PARAMETER); } switch (msec_timeout) { /* * No Wait: * -------- * A zero timeout value indicates that we shouldn't wait - just * acquire the semaphore if available otherwise return AE_TIME * (a.k.a. 'would block'). */ case 0: if (sem_trywait(sem) == -1) { status = (AE_TIME); } break; /* Wait Indefinitely */ case ACPI_WAIT_FOREVER: while (((ret_val = sem_wait(sem)) == -1) && (errno == EINTR)) { continue; /* Restart if interrupted */ } if (ret_val != 0) { status = (AE_TIME); } break; /* Wait with msec_timeout */ default: #ifdef ACPI_USE_ALTERNATE_TIMEOUT /* * Alternate timeout mechanism for environments where * sem_timedwait is not available or does not work properly. */ while (msec_timeout) { if (sem_trywait(sem) == 0) { /* Got the semaphore */ return (AE_OK); } if (msec_timeout >= 10) { msec_timeout -= 10; usleep(10 * ACPI_USEC_PER_MSEC); /* ten milliseconds */ } else { msec_timeout--; usleep(ACPI_USEC_PER_MSEC); /* one millisecond */ } } status = (AE_TIME); #else /* * The interface to sem_timedwait is an absolute time, so we need to * get the current time, then add in the millisecond Timeout value. */ if (clock_gettime(CLOCK_REALTIME, &time) == -1) { perror("clock_gettime"); return (AE_TIME); } time.tv_sec += (msec_timeout / ACPI_MSEC_PER_SEC); time.tv_nsec += ((msec_timeout % ACPI_MSEC_PER_SEC) * ACPI_NSEC_PER_MSEC); /* Handle nanosecond overflow (field must be less than one second) */ if (time.tv_nsec >= ACPI_NSEC_PER_SEC) { time.tv_sec += (time.tv_nsec / ACPI_NSEC_PER_SEC); time.tv_nsec = (time.tv_nsec % ACPI_NSEC_PER_SEC); } while (((ret_val = sem_timedwait(sem, &time)) == -1) && (errno == EINTR)) { continue; /* Restart if interrupted */ } if (ret_val != 0) { if (errno != ETIMEDOUT) { perror("sem_timedwait"); } status = (AE_TIME); } #endif break; } return (status); } /****************************************************************************** * * FUNCTION: acpi_os_signal_semaphore * * PARAMETERS: handle - Handle returned by acpi_os_create_semaphore * units - Number of units to send * * RETURN: Status * * DESCRIPTION: Send units * *****************************************************************************/ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) { sem_t *sem = (sem_t *) handle; if (!sem) { return (AE_BAD_PARAMETER); } if (sem_post(sem) == -1) { return (AE_LIMIT); } return (AE_OK); } #endif /* ACPI_SINGLE_THREADED */ /****************************************************************************** * * FUNCTION: Spinlock interfaces * * DESCRIPTION: Map these interfaces to semaphore interfaces * *****************************************************************************/ acpi_status acpi_os_create_lock(acpi_spinlock * out_handle) { return (acpi_os_create_semaphore(1, 1, out_handle)); } void acpi_os_delete_lock(acpi_spinlock handle) { acpi_os_delete_semaphore(handle); } acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle) { acpi_os_wait_semaphore(handle, 1, 0xFFFF); return (0); } void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags) { acpi_os_signal_semaphore(handle, 1); } /****************************************************************************** * * FUNCTION: acpi_os_install_interrupt_handler * * PARAMETERS: interrupt_number - Level handler should respond to. * isr - Address of the ACPI interrupt handler * except_ptr - Where status is returned * * RETURN: Handle to the newly installed handler. * * DESCRIPTION: Install an interrupt handler. Used to install the ACPI * OS-independent handler. * *****************************************************************************/ u32 acpi_os_install_interrupt_handler(u32 interrupt_number, acpi_osd_handler service_routine, void *context) { return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_remove_interrupt_handler * * PARAMETERS: handle - Returned when handler was installed * * RETURN: Status * * DESCRIPTION: Uninstalls an interrupt handler. * *****************************************************************************/ acpi_status acpi_os_remove_interrupt_handler(u32 interrupt_number, acpi_osd_handler service_routine) { return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_stall * * PARAMETERS: microseconds - Time to sleep * * RETURN: Blocks until sleep is completed. * * DESCRIPTION: Sleep at microsecond granularity * *****************************************************************************/ void acpi_os_stall(u32 microseconds) { if (microseconds) { usleep(microseconds); } } /****************************************************************************** * * FUNCTION: acpi_os_sleep * * PARAMETERS: milliseconds - Time to sleep * * RETURN: Blocks until sleep is completed. * * DESCRIPTION: Sleep at millisecond granularity * *****************************************************************************/ void acpi_os_sleep(u64 milliseconds) { /* Sleep for whole seconds */ sleep(milliseconds / ACPI_MSEC_PER_SEC); /* * Sleep for remaining microseconds. * Arg to usleep() is in usecs and must be less than 1,000,000 (1 second). */ usleep((milliseconds % ACPI_MSEC_PER_SEC) * ACPI_USEC_PER_MSEC); } /****************************************************************************** * * FUNCTION: acpi_os_get_timer * * PARAMETERS: None * * RETURN: Current time in 100 nanosecond units * * DESCRIPTION: Get the current system time * *****************************************************************************/ u64 acpi_os_get_timer(void) { struct timeval time; /* This timer has sufficient resolution for user-space application code */ gettimeofday(&time, NULL); /* (Seconds * 10^7 = 100ns(10^-7)) + (Microseconds(10^-6) * 10^1 = 100ns) */ return (((u64)time.tv_sec * ACPI_100NSEC_PER_SEC) + ((u64)time.tv_usec * ACPI_100NSEC_PER_USEC)); } /****************************************************************************** * * FUNCTION: acpi_os_read_pci_configuration * * PARAMETERS: pci_id - Seg/Bus/Dev * pci_register - Device Register * value - Buffer where value is placed * width - Number of bits * * RETURN: Status * * DESCRIPTION: Read data from PCI configuration space * *****************************************************************************/ acpi_status acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id, u32 pci_register, u64 *value, u32 width) { *value = 0; return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_write_pci_configuration * * PARAMETERS: pci_id - Seg/Bus/Dev * pci_register - Device Register * value - Value to be written * width - Number of bits * * RETURN: Status. * * DESCRIPTION: Write data to PCI configuration space * *****************************************************************************/ acpi_status acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id, u32 pci_register, u64 value, u32 width) { return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_read_port * * PARAMETERS: address - Address of I/O port/register to read * value - Where value is placed * width - Number of bits * * RETURN: Value read from port * * DESCRIPTION: Read data from an I/O port or register * *****************************************************************************/ acpi_status acpi_os_read_port(acpi_io_address address, u32 *value, u32 width) { switch (width) { case 8: *value = 0xFF; break; case 16: *value = 0xFFFF; break; case 32: *value = 0xFFFFFFFF; break; default: return (AE_BAD_PARAMETER); } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_write_port * * PARAMETERS: address - Address of I/O port/register to write * value - Value to write * width - Number of bits * * RETURN: None * * DESCRIPTION: Write data to an I/O port or register * *****************************************************************************/ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width) { return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_read_memory * * PARAMETERS: address - Physical Memory Address to read * value - Where value is placed * width - Number of bits (8,16,32, or 64) * * RETURN: Value read from physical memory address. Always returned * as a 64-bit integer, regardless of the read width. * * DESCRIPTION: Read data from a physical memory address * *****************************************************************************/ acpi_status acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width) { switch (width) { case 8: case 16: case 32: case 64: *value = 0; break; default: return (AE_BAD_PARAMETER); } return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_write_memory * * PARAMETERS: address - Physical Memory Address to write * value - Value to write * width - Number of bits (8,16,32, or 64) * * RETURN: None * * DESCRIPTION: Write data to a physical memory address * *****************************************************************************/ acpi_status acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width) { return (AE_OK); } /****************************************************************************** * * FUNCTION: acpi_os_readable * * PARAMETERS: pointer - Area to be verified * length - Size of area * * RETURN: TRUE if readable for entire length * * DESCRIPTION: Verify that a pointer is valid for reading * *****************************************************************************/ u8 acpi_os_readable(void *pointer, acpi_size length) { return (TRUE); } /****************************************************************************** * * FUNCTION: acpi_os_writable * * PARAMETERS: pointer - Area to be verified * length - Size of area * * RETURN: TRUE if writable for entire length * * DESCRIPTION: Verify that a pointer is valid for writing * *****************************************************************************/ u8 acpi_os_writable(void *pointer, acpi_size length) { return (TRUE); } /****************************************************************************** * * FUNCTION: acpi_os_signal * * PARAMETERS: function - ACPI A signal function code * info - Pointer to function-dependent structure * * RETURN: Status * * DESCRIPTION: Miscellaneous functions. Example implementation only. * *****************************************************************************/ acpi_status acpi_os_signal(u32 function, void *info) { switch (function) { case ACPI_SIGNAL_FATAL: break; case ACPI_SIGNAL_BREAKPOINT: break; default: break; } return (AE_OK); } /* Optional multi-thread support */ #ifndef ACPI_SINGLE_THREADED /****************************************************************************** * * FUNCTION: acpi_os_get_thread_id * * PARAMETERS: None * * RETURN: Id of the running thread * * DESCRIPTION: Get the ID of the current (running) thread * *****************************************************************************/ acpi_thread_id acpi_os_get_thread_id(void) { pthread_t thread; thread = pthread_self(); return (ACPI_CAST_PTHREAD_T(thread)); } /****************************************************************************** * * FUNCTION: acpi_os_execute * * PARAMETERS: type - Type of execution * function - Address of the function to execute * context - Passed as a parameter to the function * * RETURN: Status. * * DESCRIPTION: Execute a new thread * *****************************************************************************/ acpi_status acpi_os_execute(acpi_execute_type type, acpi_osd_exec_callback function, void *context) { pthread_t thread; int ret; ret = pthread_create(&thread, NULL, (PTHREAD_CALLBACK) function, context); if (ret) { acpi_os_printf("Create thread failed"); } return (0); } #else /* ACPI_SINGLE_THREADED */ acpi_thread_id acpi_os_get_thread_id(void) { return (1); } acpi_status acpi_os_execute(acpi_execute_type type, acpi_osd_exec_callback function, void *context) { function(context); return (AE_OK); } #endif /* ACPI_SINGLE_THREADED */ /****************************************************************************** * * FUNCTION: acpi_os_wait_events_complete * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Wait for all asynchronous events to complete. This * implementation does nothing. * *****************************************************************************/ void acpi_os_wait_events_complete(void) { return; }
linux-master
tools/power/acpi/os_specific/service_layers/osunixxf.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: osunixmap - Unix OSL for file mappings * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include "acpidump.h" #include <unistd.h> #include <sys/mman.h> #ifdef _free_BSD #include <sys/param.h> #endif #define _COMPONENT ACPI_OS_SERVICES ACPI_MODULE_NAME("osunixmap") #ifndef O_BINARY #define O_BINARY 0 #endif #if defined(_dragon_fly) || defined(_free_BSD) || defined(_QNX) #define MMAP_FLAGS MAP_SHARED #else #define MMAP_FLAGS MAP_PRIVATE #endif #define SYSTEM_MEMORY "/dev/mem" /******************************************************************************* * * FUNCTION: acpi_os_get_page_size * * PARAMETERS: None * * RETURN: Page size of the platform. * * DESCRIPTION: Obtain page size of the platform. * ******************************************************************************/ static acpi_size acpi_os_get_page_size(void) { #ifdef PAGE_SIZE return PAGE_SIZE; #else return sysconf(_SC_PAGESIZE); #endif } /****************************************************************************** * * FUNCTION: acpi_os_map_memory * * PARAMETERS: where - Physical address of memory to be mapped * length - How much memory to map * * RETURN: Pointer to mapped memory. Null on error. * * DESCRIPTION: Map physical memory into local address space. * *****************************************************************************/ void *acpi_os_map_memory(acpi_physical_address where, acpi_size length) { u8 *mapped_memory; acpi_physical_address offset; acpi_size page_size; int fd; fd = open(SYSTEM_MEMORY, O_RDONLY | O_BINARY); if (fd < 0) { fprintf(stderr, "Cannot open %s\n", SYSTEM_MEMORY); return (NULL); } /* Align the offset to use mmap */ page_size = acpi_os_get_page_size(); offset = where % page_size; /* Map the table header to get the length of the full table */ mapped_memory = mmap(NULL, (length + offset), PROT_READ, MMAP_FLAGS, fd, (where - offset)); if (mapped_memory == MAP_FAILED) { fprintf(stderr, "Cannot map %s\n", SYSTEM_MEMORY); close(fd); return (NULL); } close(fd); return (ACPI_CAST8(mapped_memory + offset)); } /****************************************************************************** * * FUNCTION: acpi_os_unmap_memory * * PARAMETERS: where - Logical address of memory to be unmapped * length - How much memory to unmap * * RETURN: None. * * DESCRIPTION: Delete a previously created mapping. Where and Length must * correspond to a previous mapping exactly. * *****************************************************************************/ void acpi_os_unmap_memory(void *where, acpi_size length) { acpi_physical_address offset; acpi_size page_size; page_size = acpi_os_get_page_size(); offset = ACPI_TO_INTEGER(where) % page_size; munmap((u8 *)where - offset, (length + offset)); }
linux-master
tools/power/acpi/os_specific/service_layers/osunixmap.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: osunixdir - Unix directory access interfaces * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <dirent.h> #include <fnmatch.h> #include <ctype.h> #include <sys/stat.h> /* * Allocated structure returned from os_open_directory */ typedef struct external_find_info { char *dir_pathname; DIR *dir_ptr; char temp_buffer[256]; char *wildcard_spec; char requested_file_type; } external_find_info; /******************************************************************************* * * FUNCTION: acpi_os_open_directory * * PARAMETERS: dir_pathname - Full pathname to the directory * wildcard_spec - string of the form "*.c", etc. * * RETURN: A directory "handle" to be used in subsequent search operations. * NULL returned on failure. * * DESCRIPTION: Open a directory in preparation for a wildcard search * ******************************************************************************/ void *acpi_os_open_directory(char *dir_pathname, char *wildcard_spec, char requested_file_type) { struct external_find_info *external_info; DIR *dir; /* Allocate the info struct that will be returned to the caller */ external_info = calloc(1, sizeof(struct external_find_info)); if (!external_info) { return (NULL); } /* Get the directory stream */ dir = opendir(dir_pathname); if (!dir) { fprintf(stderr, "Cannot open directory - %s\n", dir_pathname); free(external_info); return (NULL); } /* Save the info in the return structure */ external_info->wildcard_spec = wildcard_spec; external_info->requested_file_type = requested_file_type; external_info->dir_pathname = dir_pathname; external_info->dir_ptr = dir; return (external_info); } /******************************************************************************* * * FUNCTION: acpi_os_get_next_filename * * PARAMETERS: dir_handle - Created via acpi_os_open_directory * * RETURN: Next filename matched. NULL if no more matches. * * DESCRIPTION: Get the next file in the directory that matches the wildcard * specification. * ******************************************************************************/ char *acpi_os_get_next_filename(void *dir_handle) { struct external_find_info *external_info = dir_handle; struct dirent *dir_entry; char *temp_str; int str_len; struct stat temp_stat; int err; while ((dir_entry = readdir(external_info->dir_ptr))) { if (!fnmatch (external_info->wildcard_spec, dir_entry->d_name, 0)) { if (dir_entry->d_name[0] == '.') { continue; } str_len = strlen(dir_entry->d_name) + strlen(external_info->dir_pathname) + 2; temp_str = calloc(str_len, 1); if (!temp_str) { fprintf(stderr, "Could not allocate buffer for temporary string\n"); return (NULL); } strcpy(temp_str, external_info->dir_pathname); strcat(temp_str, "/"); strcat(temp_str, dir_entry->d_name); err = stat(temp_str, &temp_stat); if (err == -1) { fprintf(stderr, "Cannot stat file (should not happen) - %s\n", temp_str); free(temp_str); return (NULL); } free(temp_str); if ((S_ISDIR(temp_stat.st_mode) && (external_info->requested_file_type == REQUEST_DIR_ONLY)) || ((!S_ISDIR(temp_stat.st_mode) && external_info->requested_file_type == REQUEST_FILE_ONLY))) { /* copy to a temp buffer because dir_entry struct is on the stack */ strcpy(external_info->temp_buffer, dir_entry->d_name); return (external_info->temp_buffer); } } } return (NULL); } /******************************************************************************* * * FUNCTION: acpi_os_close_directory * * PARAMETERS: dir_handle - Created via acpi_os_open_directory * * RETURN: None. * * DESCRIPTION: Close the open directory and cleanup. * ******************************************************************************/ void acpi_os_close_directory(void *dir_handle) { struct external_find_info *external_info = dir_handle; /* Close the directory and free allocations */ closedir(external_info->dir_ptr); free(dir_handle); }
linux-master
tools/power/acpi/os_specific/service_layers/osunixdir.c
// SPDX-License-Identifier: GPL-2.0-or-later /* cpufreq-bench CPUFreq microbenchmark * * Copyright (C) 2008 Christian Kornacker <[email protected]> */ #include <stdio.h> #include <unistd.h> #include <math.h> #include "config.h" #include "system.h" #include "benchmark.h" /* Print out progress if we log into a file */ #define show_progress(total_time, progress_time) \ if (config->output != stdout) { \ fprintf(stdout, "Progress: %02lu %%\r", \ (progress_time * 100) / total_time); \ fflush(stdout); \ } /** * compute how many rounds of calculation we should do * to get the given load time * * @param load aimed load time in µs * * @retval rounds of calculation **/ unsigned int calculate_timespace(long load, struct config *config) { int i; long long now, then; unsigned int estimated = GAUGECOUNT; unsigned int rounds = 0; unsigned int timed = 0; if (config->verbose) printf("calibrating load of %lius, please wait...\n", load); /* get the initial calculation time for a specific number of rounds */ now = get_time(); ROUNDS(estimated); then = get_time(); timed = (unsigned int)(then - now); /* approximation of the wanted load time by comparing with the * initial calculation time */ for (i = 0; i < 4; i++) { rounds = (unsigned int)(load * estimated / timed); dprintf("calibrating with %u rounds\n", rounds); now = get_time(); ROUNDS(rounds); then = get_time(); timed = (unsigned int)(then - now); estimated = rounds; } if (config->verbose) printf("calibration done\n"); return estimated; } /** * benchmark * generates a specific sleep an load time with the performance * governor and compares the used time for same calculations done * with the configured powersave governor * * @param config config values for the benchmark * **/ void start_benchmark(struct config *config) { unsigned int _round, cycle; long long now, then; long sleep_time = 0, load_time = 0; long performance_time = 0, powersave_time = 0; unsigned int calculations; unsigned long total_time = 0, progress_time = 0; sleep_time = config->sleep; load_time = config->load; /* For the progress bar */ for (_round = 1; _round <= config->rounds; _round++) total_time += _round * (config->sleep + config->load); total_time *= 2; /* powersave and performance cycles */ for (_round = 0; _round < config->rounds; _round++) { performance_time = 0LL; powersave_time = 0LL; show_progress(total_time, progress_time); /* set the cpufreq governor to "performance" which disables * P-State switching. */ if (set_cpufreq_governor("performance", config->cpu) != 0) return; /* calibrate the calculation time. the resulting calculation * _rounds should produce a load which matches the configured * load time */ calculations = calculate_timespace(load_time, config); if (config->verbose) printf("_round %i: doing %u cycles with %u calculations" " for %lius\n", _round + 1, config->cycles, calculations, load_time); fprintf(config->output, "%u %li %li ", _round, load_time, sleep_time); if (config->verbose) printf("average: %lius, rps:%li\n", load_time / calculations, 1000000 * calculations / load_time); /* do some sleep/load cycles with the performance governor */ for (cycle = 0; cycle < config->cycles; cycle++) { now = get_time(); usleep(sleep_time); ROUNDS(calculations); then = get_time(); performance_time += then - now - sleep_time; if (config->verbose) printf("performance cycle took %lius, " "sleep: %lius, " "load: %lius, rounds: %u\n", (long)(then - now), sleep_time, load_time, calculations); } fprintf(config->output, "%li ", performance_time / config->cycles); progress_time += sleep_time + load_time; show_progress(total_time, progress_time); /* set the powersave governor which activates P-State switching * again */ if (set_cpufreq_governor(config->governor, config->cpu) != 0) return; /* again, do some sleep/load cycles with the * powersave governor */ for (cycle = 0; cycle < config->cycles; cycle++) { now = get_time(); usleep(sleep_time); ROUNDS(calculations); then = get_time(); powersave_time += then - now - sleep_time; if (config->verbose) printf("powersave cycle took %lius, " "sleep: %lius, " "load: %lius, rounds: %u\n", (long)(then - now), sleep_time, load_time, calculations); } progress_time += sleep_time + load_time; /* compare the average sleep/load cycles */ fprintf(config->output, "%li ", powersave_time / config->cycles); fprintf(config->output, "%.3f\n", performance_time * 100.0 / powersave_time); fflush(config->output); if (config->verbose) printf("performance is at %.2f%%\n", performance_time * 100.0 / powersave_time); sleep_time += config->sleep_step; load_time += config->load_step; } }
linux-master
tools/power/cpupower/bench/benchmark.c
// SPDX-License-Identifier: GPL-2.0-or-later /* cpufreq-bench CPUFreq microbenchmark * * Copyright (C) 2008 Christian Kornacker <[email protected]> */ #include <stdio.h> #include <time.h> #include <sys/time.h> #include <sys/types.h> #include <unistd.h> #include <sched.h> #include <cpufreq.h> #include <cpupower.h> #include "config.h" #include "system.h" /** * returns time since epoch in µs * * @retval time **/ long long int get_time() { struct timeval now; gettimeofday(&now, NULL); return (long long int)(now.tv_sec * 1000000LL + now.tv_usec); } /** * sets the cpufreq governor * * @param governor cpufreq governor name * @param cpu cpu for which the governor should be set * * @retval 0 on success * @retval -1 when failed **/ int set_cpufreq_governor(char *governor, unsigned int cpu) { dprintf("set %s as cpufreq governor\n", governor); if (cpupower_is_cpu_online(cpu) != 1) { perror("cpufreq_cpu_exists"); fprintf(stderr, "error: cpu %u does not exist\n", cpu); return -1; } if (cpufreq_modify_policy_governor(cpu, governor) != 0) { perror("cpufreq_modify_policy_governor"); fprintf(stderr, "error: unable to set %s governor\n", governor); return -1; } return 0; } /** * sets cpu affinity for the process * * @param cpu cpu# to which the affinity should be set * * @retval 0 on success * @retval -1 when setting the affinity failed **/ int set_cpu_affinity(unsigned int cpu) { cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); dprintf("set affinity to cpu #%u\n", cpu); if (sched_setaffinity(getpid(), sizeof(cpu_set_t), &cpuset) < 0) { perror("sched_setaffinity"); fprintf(stderr, "warning: unable to set cpu affinity\n"); return -1; } return 0; } /** * sets the process priority parameter * * @param priority priority value * * @retval 0 on success * @retval -1 when setting the priority failed **/ int set_process_priority(int priority) { struct sched_param param; dprintf("set scheduler priority to %i\n", priority); param.sched_priority = priority; if (sched_setscheduler(0, SCHEDULER, &param) < 0) { perror("sched_setscheduler"); fprintf(stderr, "warning: unable to set scheduler priority\n"); return -1; } return 0; } /** * notifies the user that the benchmark may run some time * * @param config benchmark config values * **/ void prepare_user(const struct config *config) { unsigned long sleep_time = 0; unsigned long load_time = 0; unsigned int round; for (round = 0; round < config->rounds; round++) { sleep_time += 2 * config->cycles * (config->sleep + config->sleep_step * round); load_time += 2 * config->cycles * (config->load + config->load_step * round) + (config->load + config->load_step * round * 4); } if (config->verbose || config->output != stdout) printf("approx. test duration: %im\n", (int)((sleep_time + load_time) / 60000000)); } /** * sets up the cpu affinity and scheduler priority * * @param config benchmark config values * **/ void prepare_system(const struct config *config) { if (config->verbose) printf("set cpu affinity to cpu #%u\n", config->cpu); set_cpu_affinity(config->cpu); switch (config->prio) { case SCHED_HIGH: if (config->verbose) printf("high priority condition requested\n"); set_process_priority(PRIORITY_HIGH); break; case SCHED_LOW: if (config->verbose) printf("low priority condition requested\n"); set_process_priority(PRIORITY_LOW); break; default: if (config->verbose) printf("default priority condition requested\n"); set_process_priority(PRIORITY_DEFAULT); } }
linux-master
tools/power/cpupower/bench/system.c
// SPDX-License-Identifier: GPL-2.0-or-later /* cpufreq-bench CPUFreq microbenchmark * * Copyright (C) 2008 Christian Kornacker <[email protected]> */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <getopt.h> #include <errno.h> #include "config.h" #include "system.h" #include "benchmark.h" static struct option long_options[] = { {"output", 1, 0, 'o'}, {"sleep", 1, 0, 's'}, {"load", 1, 0, 'l'}, {"verbose", 0, 0, 'v'}, {"cpu", 1, 0, 'c'}, {"governor", 1, 0, 'g'}, {"prio", 1, 0, 'p'}, {"file", 1, 0, 'f'}, {"cycles", 1, 0, 'n'}, {"rounds", 1, 0, 'r'}, {"load-step", 1, 0, 'x'}, {"sleep-step", 1, 0, 'y'}, {"help", 0, 0, 'h'}, {0, 0, 0, 0} }; /******************************************************************* usage *******************************************************************/ void usage() { printf("usage: ./bench\n"); printf("Options:\n"); printf(" -l, --load=<long int>\t\tinitial load time in us\n"); printf(" -s, --sleep=<long int>\t\tinitial sleep time in us\n"); printf(" -x, --load-step=<long int>\ttime to be added to load time, in us\n"); printf(" -y, --sleep-step=<long int>\ttime to be added to sleep time, in us\n"); printf(" -c, --cpu=<cpu #>\t\t\tCPU Nr. to use, starting at 0\n"); printf(" -p, --prio=<priority>\t\t\tscheduler priority, HIGH, LOW or DEFAULT\n"); printf(" -g, --governor=<governor>\t\tcpufreq governor to test\n"); printf(" -n, --cycles=<int>\t\t\tload/sleep cycles\n"); printf(" -r, --rounds<int>\t\t\tload/sleep rounds\n"); printf(" -f, --file=<configfile>\t\tconfig file to use\n"); printf(" -o, --output=<dir>\t\t\toutput path. Filename will be OUTPUTPATH/benchmark_TIMESTAMP.log\n"); printf(" -v, --verbose\t\t\t\tverbose output on/off\n"); printf(" -h, --help\t\t\t\tPrint this help screen\n"); exit(1); } /******************************************************************* main *******************************************************************/ int main(int argc, char **argv) { int c; int option_index = 0; struct config *config = NULL; config = prepare_default_config(); if (config == NULL) return EXIT_FAILURE; while (1) { c = getopt_long (argc, argv, "hg:o:s:l:vc:p:f:n:r:x:y:", long_options, &option_index); if (c == -1) break; switch (c) { case 'o': if (config->output != NULL) fclose(config->output); config->output = prepare_output(optarg); if (config->output == NULL) return EXIT_FAILURE; dprintf("user output path -> %s\n", optarg); break; case 's': sscanf(optarg, "%li", &config->sleep); dprintf("user sleep time -> %s\n", optarg); break; case 'l': sscanf(optarg, "%li", &config->load); dprintf("user load time -> %s\n", optarg); break; case 'c': sscanf(optarg, "%u", &config->cpu); dprintf("user cpu -> %s\n", optarg); break; case 'g': strncpy(config->governor, optarg, 14); dprintf("user governor -> %s\n", optarg); break; case 'p': if (string_to_prio(optarg) != SCHED_ERR) { config->prio = string_to_prio(optarg); dprintf("user prio -> %s\n", optarg); } else { if (config != NULL) { if (config->output != NULL) fclose(config->output); free(config); } usage(); } break; case 'n': sscanf(optarg, "%u", &config->cycles); dprintf("user cycles -> %s\n", optarg); break; case 'r': sscanf(optarg, "%u", &config->rounds); dprintf("user rounds -> %s\n", optarg); break; case 'x': sscanf(optarg, "%li", &config->load_step); dprintf("user load_step -> %s\n", optarg); break; case 'y': sscanf(optarg, "%li", &config->sleep_step); dprintf("user sleep_step -> %s\n", optarg); break; case 'f': if (prepare_config(optarg, config)) return EXIT_FAILURE; break; case 'v': config->verbose = 1; dprintf("verbose output enabled\n"); break; case 'h': case '?': default: if (config != NULL) { if (config->output != NULL) fclose(config->output); free(config); } usage(); } } if (config->verbose) { printf("starting benchmark with parameters:\n"); printf("config:\n\t" "sleep=%li\n\t" "load=%li\n\t" "sleep_step=%li\n\t" "load_step=%li\n\t" "cpu=%u\n\t" "cycles=%u\n\t" "rounds=%u\n\t" "governor=%s\n\n", config->sleep, config->load, config->sleep_step, config->load_step, config->cpu, config->cycles, config->rounds, config->governor); } prepare_user(config); prepare_system(config); start_benchmark(config); if (config->output != stdout) fclose(config->output); free(config); return EXIT_SUCCESS; }
linux-master
tools/power/cpupower/bench/main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* cpufreq-bench CPUFreq microbenchmark * * Copyright (C) 2008 Christian Kornacker <[email protected]> */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <time.h> #include <dirent.h> #include <sys/utsname.h> #include <sys/types.h> #include <sys/stat.h> #include "parse.h" #include "config.h" /** * converts priority string to priority * * @param str string that represents a scheduler priority * * @retval priority * @retval SCHED_ERR when the priority doesn't exit **/ enum sched_prio string_to_prio(const char *str) { if (strncasecmp("high", str, strlen(str)) == 0) return SCHED_HIGH; else if (strncasecmp("default", str, strlen(str)) == 0) return SCHED_DEFAULT; else if (strncasecmp("low", str, strlen(str)) == 0) return SCHED_LOW; else return SCHED_ERR; } /** * create and open logfile * * @param dir directory in which the logfile should be created * * @retval logfile on success * @retval NULL when the file can't be created **/ FILE *prepare_output(const char *dirname) { FILE *output = NULL; int len; char *filename, *filename_tmp; struct utsname sysdata; DIR *dir; dir = opendir(dirname); if (dir == NULL) { if (mkdir(dirname, 0755)) { perror("mkdir"); fprintf(stderr, "error: Cannot create dir %s\n", dirname); return NULL; } } len = strlen(dirname) + 30; filename = malloc(sizeof(char) * len); if (!filename) { perror("malloc"); goto out_dir; } if (uname(&sysdata) == 0) { len += strlen(sysdata.nodename) + strlen(sysdata.release); filename_tmp = realloc(filename, sizeof(*filename) * len); if (filename_tmp == NULL) { free(filename); perror("realloc"); goto out_dir; } filename = filename_tmp; snprintf(filename, len - 1, "%s/benchmark_%s_%s_%li.log", dirname, sysdata.nodename, sysdata.release, time(NULL)); } else { snprintf(filename, len - 1, "%s/benchmark_%li.log", dirname, time(NULL)); } dprintf("logfilename: %s\n", filename); output = fopen(filename, "w+"); if (output == NULL) { perror("fopen"); fprintf(stderr, "error: unable to open logfile\n"); goto out; } fprintf(stdout, "Logfile: %s\n", filename); fprintf(output, "#round load sleep performance powersave percentage\n"); out: free(filename); out_dir: closedir(dir); return output; } /** * returns the default config * * @retval default config on success * @retval NULL when the output file can't be created **/ struct config *prepare_default_config() { struct config *config = malloc(sizeof(struct config)); dprintf("loading defaults\n"); config->sleep = 500000; config->load = 500000; config->sleep_step = 500000; config->load_step = 500000; config->cycles = 5; config->rounds = 50; config->cpu = 0; config->prio = SCHED_HIGH; config->verbose = 0; strncpy(config->governor, "ondemand", sizeof(config->governor)); config->output = stdout; #ifdef DEFAULT_CONFIG_FILE if (prepare_config(DEFAULT_CONFIG_FILE, config)) return NULL; #endif return config; } /** * parses config file and returns the config to the caller * * @param path config file name * * @retval 1 on error * @retval 0 on success **/ int prepare_config(const char *path, struct config *config) { size_t len = 0; char opt[16], val[32], *line = NULL; FILE *configfile; if (config == NULL) { fprintf(stderr, "error: config is NULL\n"); return 1; } configfile = fopen(path, "r"); if (configfile == NULL) { perror("fopen"); fprintf(stderr, "error: unable to read configfile\n"); free(config); return 1; } while (getline(&line, &len, configfile) != -1) { if (line[0] == '#' || line[0] == ' ' || line[0] == '\n') continue; if (sscanf(line, "%14s = %30s", opt, val) < 2) continue; dprintf("parsing: %s -> %s\n", opt, val); if (strcmp("sleep", opt) == 0) sscanf(val, "%li", &config->sleep); else if (strcmp("load", opt) == 0) sscanf(val, "%li", &config->load); else if (strcmp("load_step", opt) == 0) sscanf(val, "%li", &config->load_step); else if (strcmp("sleep_step", opt) == 0) sscanf(val, "%li", &config->sleep_step); else if (strcmp("cycles", opt) == 0) sscanf(val, "%u", &config->cycles); else if (strcmp("rounds", opt) == 0) sscanf(val, "%u", &config->rounds); else if (strcmp("verbose", opt) == 0) sscanf(val, "%u", &config->verbose); else if (strcmp("output", opt) == 0) config->output = prepare_output(val); else if (strcmp("cpu", opt) == 0) sscanf(val, "%u", &config->cpu); else if (strcmp("governor", opt) == 0) { strncpy(config->governor, val, sizeof(config->governor)); config->governor[sizeof(config->governor) - 1] = '\0'; } else if (strcmp("priority", opt) == 0) { if (string_to_prio(val) != SCHED_ERR) config->prio = string_to_prio(val); } } free(line); return 0; }
linux-master
tools/power/cpupower/bench/parse.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2004-2009 Dominik Brodowski <[email protected]> * (C) 2010 Thomas Renninger <[email protected]> */ #include <unistd.h> #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include <cpuidle.h> #include "helpers/sysfs.h" #include "helpers/helpers.h" #include "helpers/bitmask.h" #define LINE_LEN 10 static void cpuidle_cpu_output(unsigned int cpu, int verbose) { unsigned int idlestates, idlestate; char *tmp; idlestates = cpuidle_state_count(cpu); if (idlestates == 0) { printf(_("CPU %u: No idle states\n"), cpu); return; } printf(_("Number of idle states: %d\n"), idlestates); printf(_("Available idle states:")); for (idlestate = 0; idlestate < idlestates; idlestate++) { tmp = cpuidle_state_name(cpu, idlestate); if (!tmp) continue; printf(" %s", tmp); free(tmp); } printf("\n"); if (!verbose) return; for (idlestate = 0; idlestate < idlestates; idlestate++) { int disabled = cpuidle_is_state_disabled(cpu, idlestate); /* Disabled interface not supported on older kernels */ if (disabled < 0) disabled = 0; tmp = cpuidle_state_name(cpu, idlestate); if (!tmp) continue; printf("%s%s:\n", tmp, (disabled) ? " (DISABLED) " : ""); free(tmp); tmp = cpuidle_state_desc(cpu, idlestate); if (!tmp) continue; printf(_("Flags/Description: %s\n"), tmp); free(tmp); printf(_("Latency: %lu\n"), cpuidle_state_latency(cpu, idlestate)); printf(_("Usage: %lu\n"), cpuidle_state_usage(cpu, idlestate)); printf(_("Duration: %llu\n"), cpuidle_state_time(cpu, idlestate)); } } static void cpuidle_general_output(void) { char *tmp; tmp = cpuidle_get_driver(); if (!tmp) { printf(_("Could not determine cpuidle driver\n")); return; } printf(_("CPUidle driver: %s\n"), tmp); free(tmp); tmp = cpuidle_get_governor(); if (!tmp) { printf(_("Could not determine cpuidle governor\n")); return; } printf(_("CPUidle governor: %s\n"), tmp); free(tmp); } static void proc_cpuidle_cpu_output(unsigned int cpu) { long max_allowed_cstate = 2000000000; unsigned int cstate, cstates; cstates = cpuidle_state_count(cpu); if (cstates == 0) { printf(_("CPU %u: No C-states info\n"), cpu); return; } printf(_("active state: C0\n")); printf(_("max_cstate: C%u\n"), cstates-1); printf(_("maximum allowed latency: %lu usec\n"), max_allowed_cstate); printf(_("states:\t\n")); for (cstate = 1; cstate < cstates; cstate++) { printf(_(" C%d: " "type[C%d] "), cstate, cstate); printf(_("promotion[--] demotion[--] ")); printf(_("latency[%03lu] "), cpuidle_state_latency(cpu, cstate)); printf(_("usage[%08lu] "), cpuidle_state_usage(cpu, cstate)); printf(_("duration[%020Lu] \n"), cpuidle_state_time(cpu, cstate)); } } static struct option info_opts[] = { {"silent", no_argument, NULL, 's'}, {"proc", no_argument, NULL, 'o'}, { }, }; static inline void cpuidle_exit(int fail) { exit(EXIT_FAILURE); } int cmd_idle_info(int argc, char **argv) { extern char *optarg; extern int optind, opterr, optopt; int ret = 0, cont = 1, output_param = 0, verbose = 1; unsigned int cpu = 0; do { ret = getopt_long(argc, argv, "os", info_opts, NULL); if (ret == -1) break; switch (ret) { case '?': output_param = '?'; cont = 0; break; case 's': verbose = 0; break; case -1: cont = 0; break; case 'o': if (output_param) { output_param = -1; cont = 0; break; } output_param = ret; break; } } while (cont); switch (output_param) { case -1: printf(_("You can't specify more than one " "output-specific argument\n")); cpuidle_exit(EXIT_FAILURE); case '?': printf(_("invalid or unknown argument\n")); cpuidle_exit(EXIT_FAILURE); } /* Default is: show output of base_cpu only */ if (bitmask_isallclear(cpus_chosen)) bitmask_setbit(cpus_chosen, base_cpu); if (output_param == 0) cpuidle_general_output(); for (cpu = bitmask_first(cpus_chosen); cpu <= bitmask_last(cpus_chosen); cpu++) { if (!bitmask_isbitset(cpus_chosen, cpu)) continue; printf(_("analyzing CPU %d:\n"), cpu); if (sysfs_is_cpu_online(cpu) != 1) { printf(_(" *is offline\n")); printf("\n"); continue; } switch (output_param) { case 'o': proc_cpuidle_cpu_output(cpu); break; case 0: printf("\n"); cpuidle_cpu_output(cpu, verbose); break; } printf("\n"); } return EXIT_SUCCESS; }
linux-master
tools/power/cpupower/utils/cpuidle-info.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2004-2009 Dominik Brodowski <[email protected]> */ #include <unistd.h> #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <getopt.h> #include "cpufreq.h" #include "helpers/sysfs.h" #include "helpers/helpers.h" #include "helpers/bitmask.h" #define LINE_LEN 10 static unsigned int count_cpus(void) { FILE *fp; char value[LINE_LEN]; unsigned int ret = 0; unsigned int cpunr = 0; fp = fopen("/proc/stat", "r"); if (!fp) { printf(_("Couldn't count the number of CPUs (%s: %s), assuming 1\n"), "/proc/stat", strerror(errno)); return 1; } while (!feof(fp)) { if (!fgets(value, LINE_LEN, fp)) continue; value[LINE_LEN - 1] = '\0'; if (strlen(value) < (LINE_LEN - 2)) continue; if (strstr(value, "cpu ")) continue; if (sscanf(value, "cpu%d ", &cpunr) != 1) continue; if (cpunr > ret) ret = cpunr; } fclose(fp); /* cpu count starts from 0, on error return 1 (UP) */ return ret + 1; } static void proc_cpufreq_output(void) { unsigned int cpu, nr_cpus; struct cpufreq_policy *policy; unsigned int min_pctg = 0; unsigned int max_pctg = 0; unsigned long min, max; printf(_(" minimum CPU frequency - maximum CPU frequency - governor\n")); nr_cpus = count_cpus(); for (cpu = 0; cpu < nr_cpus; cpu++) { policy = cpufreq_get_policy(cpu); if (!policy) continue; if (cpufreq_get_hardware_limits(cpu, &min, &max)) { max = 0; } else { min_pctg = (policy->min * 100) / max; max_pctg = (policy->max * 100) / max; } printf("CPU%3d %9lu kHz (%3d %%) - %9lu kHz (%3d %%) - %s\n", cpu , policy->min, max ? min_pctg : 0, policy->max, max ? max_pctg : 0, policy->governor); cpufreq_put_policy(policy); } } static int no_rounding; static void print_duration(unsigned long duration) { unsigned long tmp; if (no_rounding) { if (duration > 1000000) printf("%u.%06u ms", ((unsigned int) duration/1000000), ((unsigned int) duration%1000000)); else if (duration > 100000) printf("%u us", ((unsigned int) duration/1000)); else if (duration > 1000) printf("%u.%03u us", ((unsigned int) duration/1000), ((unsigned int) duration%1000)); else printf("%lu ns", duration); } else { if (duration > 1000000) { tmp = duration%10000; if (tmp >= 5000) duration += 10000; printf("%u.%02u ms", ((unsigned int) duration/1000000), ((unsigned int) (duration%1000000)/10000)); } else if (duration > 100000) { tmp = duration%1000; if (tmp >= 500) duration += 1000; printf("%u us", ((unsigned int) duration / 1000)); } else if (duration > 1000) { tmp = duration%100; if (tmp >= 50) duration += 100; printf("%u.%01u us", ((unsigned int) duration/1000), ((unsigned int) (duration%1000)/100)); } else printf("%lu ns", duration); } return; } static int get_boost_mode_x86(unsigned int cpu) { int support, active, b_states = 0, ret, pstate_no, i; /* ToDo: Make this more global */ unsigned long pstates[MAX_HW_PSTATES] = {0,}; ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states); if (ret) { printf(_("Error while evaluating Boost Capabilities" " on CPU %d -- are you root?\n"), cpu); return ret; } /* P state changes via MSR are identified via cpuid 80000007 on Intel and AMD, but we assume boost capable machines can do that if (cpuid_eax(0x80000000) >= 0x80000007 && (cpuid_edx(0x80000007) & (1 << 7))) */ printf(_(" boost state support:\n")); printf(_(" Supported: %s\n"), support ? _("yes") : _("no")); printf(_(" Active: %s\n"), active ? _("yes") : _("no")); if (cpupower_cpu_info.vendor == X86_VENDOR_AMD && cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATE) { return 0; } else if ((cpupower_cpu_info.vendor == X86_VENDOR_AMD && cpupower_cpu_info.family >= 0x10) || cpupower_cpu_info.vendor == X86_VENDOR_HYGON) { ret = decode_pstates(cpu, b_states, pstates, &pstate_no); if (ret) return ret; printf(_(" Boost States: %d\n"), b_states); printf(_(" Total States: %d\n"), pstate_no); for (i = 0; i < pstate_no; i++) { if (!pstates[i]) continue; if (i < b_states) printf(_(" Pstate-Pb%d: %luMHz (boost state)" "\n"), i, pstates[i]); else printf(_(" Pstate-P%d: %luMHz\n"), i - b_states, pstates[i]); } } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_HAS_TURBO_RATIO) { double bclk; unsigned long long intel_turbo_ratio = 0; unsigned int ratio; /* Any way to autodetect this ? */ if (cpupower_cpu_info.caps & CPUPOWER_CAP_IS_SNB) bclk = 100.00; else bclk = 133.33; intel_turbo_ratio = msr_intel_get_turbo_ratio(cpu); dprint (" Ratio: 0x%llx - bclk: %f\n", intel_turbo_ratio, bclk); ratio = (intel_turbo_ratio >> 24) & 0xFF; if (ratio) printf(_(" %.0f MHz max turbo 4 active cores\n"), ratio * bclk); ratio = (intel_turbo_ratio >> 16) & 0xFF; if (ratio) printf(_(" %.0f MHz max turbo 3 active cores\n"), ratio * bclk); ratio = (intel_turbo_ratio >> 8) & 0xFF; if (ratio) printf(_(" %.0f MHz max turbo 2 active cores\n"), ratio * bclk); ratio = (intel_turbo_ratio >> 0) & 0xFF; if (ratio) printf(_(" %.0f MHz max turbo 1 active cores\n"), ratio * bclk); } return 0; } /* --boost / -b */ static int get_boost_mode(unsigned int cpu) { struct cpufreq_available_frequencies *freqs; if (cpupower_cpu_info.vendor == X86_VENDOR_AMD || cpupower_cpu_info.vendor == X86_VENDOR_HYGON || cpupower_cpu_info.vendor == X86_VENDOR_INTEL) return get_boost_mode_x86(cpu); freqs = cpufreq_get_boost_frequencies(cpu); if (freqs) { printf(_(" boost frequency steps: ")); while (freqs->next) { print_speed(freqs->frequency, no_rounding); printf(", "); freqs = freqs->next; } print_speed(freqs->frequency, no_rounding); printf("\n"); cpufreq_put_available_frequencies(freqs); } return 0; } /* --freq / -f */ static int get_freq_kernel(unsigned int cpu, unsigned int human) { unsigned long freq = cpufreq_get_freq_kernel(cpu); printf(_(" current CPU frequency: ")); if (!freq) { printf(_(" Unable to call to kernel\n")); return -EINVAL; } if (human) { print_speed(freq, no_rounding); } else printf("%lu", freq); printf(_(" (asserted by call to kernel)\n")); return 0; } /* --hwfreq / -w */ static int get_freq_hardware(unsigned int cpu, unsigned int human) { unsigned long freq = cpufreq_get_freq_hardware(cpu); printf(_(" current CPU frequency: ")); if (!freq) { printf("Unable to call hardware\n"); return -EINVAL; } if (human) { print_speed(freq, no_rounding); } else printf("%lu", freq); printf(_(" (asserted by call to hardware)\n")); return 0; } /* --hwlimits / -l */ static int get_hardware_limits(unsigned int cpu, unsigned int human) { unsigned long min, max; if (cpufreq_get_hardware_limits(cpu, &min, &max)) { printf(_("Not Available\n")); return -EINVAL; } if (human) { printf(_(" hardware limits: ")); print_speed(min, no_rounding); printf(" - "); print_speed(max, no_rounding); printf("\n"); } else { printf("%lu %lu\n", min, max); } return 0; } /* --driver / -d */ static int get_driver(unsigned int cpu) { char *driver = cpufreq_get_driver(cpu); if (!driver) { printf(_(" no or unknown cpufreq driver is active on this CPU\n")); return -EINVAL; } printf(" driver: %s\n", driver); cpufreq_put_driver(driver); return 0; } /* --policy / -p */ static int get_policy(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_get_policy(cpu); if (!policy) { printf(_(" Unable to determine current policy\n")); return -EINVAL; } printf(_(" current policy: frequency should be within ")); print_speed(policy->min, no_rounding); printf(_(" and ")); print_speed(policy->max, no_rounding); printf(".\n "); printf(_("The governor \"%s\" may decide which speed to use\n" " within this range.\n"), policy->governor); cpufreq_put_policy(policy); return 0; } /* --governors / -g */ static int get_available_governors(unsigned int cpu) { struct cpufreq_available_governors *governors = cpufreq_get_available_governors(cpu); printf(_(" available cpufreq governors: ")); if (!governors) { printf(_("Not Available\n")); return -EINVAL; } while (governors->next) { printf("%s ", governors->governor); governors = governors->next; } printf("%s\n", governors->governor); cpufreq_put_available_governors(governors); return 0; } /* --affected-cpus / -a */ static int get_affected_cpus(unsigned int cpu) { struct cpufreq_affected_cpus *cpus = cpufreq_get_affected_cpus(cpu); printf(_(" CPUs which need to have their frequency coordinated by software: ")); if (!cpus) { printf(_("Not Available\n")); return -EINVAL; } while (cpus->next) { printf("%d ", cpus->cpu); cpus = cpus->next; } printf("%d\n", cpus->cpu); cpufreq_put_affected_cpus(cpus); return 0; } /* --related-cpus / -r */ static int get_related_cpus(unsigned int cpu) { struct cpufreq_affected_cpus *cpus = cpufreq_get_related_cpus(cpu); printf(_(" CPUs which run at the same hardware frequency: ")); if (!cpus) { printf(_("Not Available\n")); return -EINVAL; } while (cpus->next) { printf("%d ", cpus->cpu); cpus = cpus->next; } printf("%d\n", cpus->cpu); cpufreq_put_related_cpus(cpus); return 0; } /* --stats / -s */ static int get_freq_stats(unsigned int cpu, unsigned int human) { unsigned long total_trans = cpufreq_get_transitions(cpu); unsigned long long total_time; struct cpufreq_stats *stats = cpufreq_get_stats(cpu, &total_time); while (stats) { if (human) { print_speed(stats->frequency, no_rounding); printf(":%.2f%%", (100.0 * stats->time_in_state) / total_time); } else printf("%lu:%llu", stats->frequency, stats->time_in_state); stats = stats->next; if (stats) printf(", "); } cpufreq_put_stats(stats); if (total_trans) printf(" (%lu)\n", total_trans); return 0; } /* --latency / -y */ static int get_latency(unsigned int cpu, unsigned int human) { unsigned long latency = cpufreq_get_transition_latency(cpu); printf(_(" maximum transition latency: ")); if (!latency || latency == UINT_MAX) { printf(_(" Cannot determine or is not supported.\n")); return -EINVAL; } if (human) { print_duration(latency); printf("\n"); } else printf("%lu\n", latency); return 0; } /* --performance / -c */ static int get_perf_cap(unsigned int cpu) { if (cpupower_cpu_info.vendor == X86_VENDOR_AMD && cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATE) amd_pstate_show_perf_and_freq(cpu, no_rounding); return 0; } static void debug_output_one(unsigned int cpu) { struct cpufreq_available_frequencies *freqs; get_driver(cpu); get_related_cpus(cpu); get_affected_cpus(cpu); get_latency(cpu, 1); get_hardware_limits(cpu, 1); freqs = cpufreq_get_available_frequencies(cpu); if (freqs) { printf(_(" available frequency steps: ")); while (freqs->next) { print_speed(freqs->frequency, no_rounding); printf(", "); freqs = freqs->next; } print_speed(freqs->frequency, no_rounding); printf("\n"); cpufreq_put_available_frequencies(freqs); } get_available_governors(cpu); get_policy(cpu); if (get_freq_hardware(cpu, 1) < 0) get_freq_kernel(cpu, 1); get_boost_mode(cpu); get_perf_cap(cpu); } static struct option info_opts[] = { {"debug", no_argument, NULL, 'e'}, {"boost", no_argument, NULL, 'b'}, {"freq", no_argument, NULL, 'f'}, {"hwfreq", no_argument, NULL, 'w'}, {"hwlimits", no_argument, NULL, 'l'}, {"driver", no_argument, NULL, 'd'}, {"policy", no_argument, NULL, 'p'}, {"governors", no_argument, NULL, 'g'}, {"related-cpus", no_argument, NULL, 'r'}, {"affected-cpus", no_argument, NULL, 'a'}, {"stats", no_argument, NULL, 's'}, {"latency", no_argument, NULL, 'y'}, {"proc", no_argument, NULL, 'o'}, {"human", no_argument, NULL, 'm'}, {"no-rounding", no_argument, NULL, 'n'}, {"performance", no_argument, NULL, 'c'}, { }, }; int cmd_freq_info(int argc, char **argv) { extern char *optarg; extern int optind, opterr, optopt; int ret = 0, cont = 1; unsigned int cpu = 0; unsigned int human = 0; int output_param = 0; do { ret = getopt_long(argc, argv, "oefwldpgrasmybnc", info_opts, NULL); switch (ret) { case '?': output_param = '?'; cont = 0; break; case -1: cont = 0; break; case 'b': case 'o': case 'a': case 'r': case 'g': case 'p': case 'd': case 'l': case 'w': case 'f': case 'e': case 's': case 'y': case 'c': if (output_param) { output_param = -1; cont = 0; break; } output_param = ret; break; case 'm': if (human) { output_param = -1; cont = 0; break; } human = 1; break; case 'n': no_rounding = 1; break; default: fprintf(stderr, "invalid or unknown argument\n"); return EXIT_FAILURE; } } while (cont); switch (output_param) { case 'o': if (!bitmask_isallclear(cpus_chosen)) { printf(_("The argument passed to this tool can't be " "combined with passing a --cpu argument\n")); return -EINVAL; } break; case 0: output_param = 'e'; } ret = 0; /* Default is: show output of base_cpu only */ if (bitmask_isallclear(cpus_chosen)) bitmask_setbit(cpus_chosen, base_cpu); switch (output_param) { case -1: printf(_("You can't specify more than one --cpu parameter and/or\n" "more than one output-specific argument\n")); return -EINVAL; case '?': printf(_("invalid or unknown argument\n")); return -EINVAL; case 'o': proc_cpufreq_output(); return EXIT_SUCCESS; } for (cpu = bitmask_first(cpus_chosen); cpu <= bitmask_last(cpus_chosen); cpu++) { if (!bitmask_isbitset(cpus_chosen, cpu)) continue; printf(_("analyzing CPU %d:\n"), cpu); if (sysfs_is_cpu_online(cpu) != 1) { printf(_(" *is offline\n")); printf("\n"); continue; } switch (output_param) { case 'b': get_boost_mode(cpu); break; case 'e': debug_output_one(cpu); break; case 'a': ret = get_affected_cpus(cpu); break; case 'r': ret = get_related_cpus(cpu); break; case 'g': ret = get_available_governors(cpu); break; case 'p': ret = get_policy(cpu); break; case 'd': ret = get_driver(cpu); break; case 'l': ret = get_hardware_limits(cpu, human); break; case 'w': ret = get_freq_hardware(cpu, human); break; case 'f': ret = get_freq_kernel(cpu, human); break; case 's': ret = get_freq_stats(cpu, human); break; case 'y': ret = get_latency(cpu, human); break; case 'c': ret = get_perf_cap(cpu); break; } if (ret) return ret; } return ret; }
linux-master
tools/power/cpupower/utils/cpufreq-info.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2010,2011 Thomas Renninger <[email protected]>, Novell Inc. * * Ideas taken over from the perf userspace tool (included in the Linus * kernel git repo): subcommand builtins and param parsing. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <sched.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/utsname.h> #include "builtin.h" #include "helpers/helpers.h" #include "helpers/bitmask.h" #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) static int cmd_help(int argc, const char **argv); /* Global cpu_info object available for all binaries * Info only retrieved from CPU 0 * * Values will be zero/unknown on non X86 archs */ struct cpupower_cpu_info cpupower_cpu_info; int run_as_root; int base_cpu; /* Affected cpus chosen by -c/--cpu param */ struct bitmask *cpus_chosen; struct bitmask *online_cpus; struct bitmask *offline_cpus; #ifdef DEBUG int be_verbose; #endif static void print_help(void); struct cmd_struct { const char *cmd; int (*main)(int, const char **); int needs_root; }; static struct cmd_struct commands[] = { { "frequency-info", cmd_freq_info, 0 }, { "frequency-set", cmd_freq_set, 1 }, { "idle-info", cmd_idle_info, 0 }, { "idle-set", cmd_idle_set, 1 }, { "powercap-info", cmd_cap_info, 0 }, { "set", cmd_set, 1 }, { "info", cmd_info, 0 }, { "monitor", cmd_monitor, 0 }, { "help", cmd_help, 0 }, /* { "bench", cmd_bench, 1 }, */ }; static void print_help(void) { unsigned int i; #ifdef DEBUG printf(_("Usage:\tcpupower [-d|--debug] [-c|--cpu cpulist ] <command> [<args>]\n")); #else printf(_("Usage:\tcpupower [-c|--cpu cpulist ] <command> [<args>]\n")); #endif printf(_("Supported commands are:\n")); for (i = 0; i < ARRAY_SIZE(commands); i++) printf("\t%s\n", commands[i].cmd); printf(_("\nNot all commands can make use of the -c cpulist option.\n")); printf(_("\nUse 'cpupower help <command>' for getting help for above commands.\n")); } static int print_man_page(const char *subpage) { int len; char *page; len = 10; /* enough for "cpupower-" */ if (subpage != NULL) len += strlen(subpage); page = malloc(len); if (!page) return -ENOMEM; sprintf(page, "cpupower"); if ((subpage != NULL) && strcmp(subpage, "help")) { strcat(page, "-"); strcat(page, subpage); } execlp("man", "man", page, NULL); /* should not be reached */ return -EINVAL; } static int cmd_help(int argc, const char **argv) { if (argc > 1) { print_man_page(argv[1]); /* exits within execlp() */ return EXIT_FAILURE; } print_help(); return EXIT_SUCCESS; } static void print_version(void) { printf(PACKAGE " " VERSION "\n"); printf(_("Report errors and bugs to %s, please.\n"), PACKAGE_BUGREPORT); } static void handle_options(int *argc, const char ***argv) { int ret, x, new_argc = 0; if (*argc < 1) return; for (x = 0; x < *argc && ((*argv)[x])[0] == '-'; x++) { const char *param = (*argv)[x]; if (!strcmp(param, "-h") || !strcmp(param, "--help")) { print_help(); exit(EXIT_SUCCESS); } else if (!strcmp(param, "-c") || !strcmp(param, "--cpu")) { if (*argc < 2) { print_help(); exit(EXIT_FAILURE); } if (!strcmp((*argv)[x+1], "all")) bitmask_setall(cpus_chosen); else { ret = bitmask_parselist( (*argv)[x+1], cpus_chosen); if (ret < 0) { fprintf(stderr, _("Error parsing cpu " "list\n")); exit(EXIT_FAILURE); } } x += 1; /* Cut out param: cpupower -c 1 info -> cpupower info */ new_argc += 2; continue; } else if (!strcmp(param, "-v") || !strcmp(param, "--version")) { print_version(); exit(EXIT_SUCCESS); #ifdef DEBUG } else if (!strcmp(param, "-d") || !strcmp(param, "--debug")) { be_verbose = 1; new_argc++; continue; #endif } else { fprintf(stderr, "Unknown option: %s\n", param); print_help(); exit(EXIT_FAILURE); } } *argc -= new_argc; *argv += new_argc; } int main(int argc, const char *argv[]) { const char *cmd; unsigned int i, ret; struct stat statbuf; struct utsname uts; char pathname[32]; cpus_chosen = bitmask_alloc(sysconf(_SC_NPROCESSORS_CONF)); online_cpus = bitmask_alloc(sysconf(_SC_NPROCESSORS_CONF)); offline_cpus = bitmask_alloc(sysconf(_SC_NPROCESSORS_CONF)); argc--; argv += 1; handle_options(&argc, &argv); cmd = argv[0]; if (argc < 1) { print_help(); return EXIT_FAILURE; } setlocale(LC_ALL, ""); textdomain(PACKAGE); /* Turn "perf cmd --help" into "perf help cmd" */ if (argc > 1 && !strcmp(argv[1], "--help")) { argv[1] = argv[0]; argv[0] = cmd = "help"; } base_cpu = sched_getcpu(); if (base_cpu < 0) { fprintf(stderr, _("No valid cpus found.\n")); return EXIT_FAILURE; } get_cpu_info(&cpupower_cpu_info); run_as_root = !geteuid(); if (run_as_root) { ret = uname(&uts); sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); if (!ret && !strcmp(uts.machine, "x86_64") && stat(pathname, &statbuf) != 0) { if (system("modprobe msr") == -1) fprintf(stderr, _("MSR access not available.\n")); } } for (i = 0; i < ARRAY_SIZE(commands); i++) { struct cmd_struct *p = commands + i; if (strcmp(p->cmd, cmd)) continue; if (!run_as_root && p->needs_root) { fprintf(stderr, _("Subcommand %s needs root " "privileges\n"), cmd); return EXIT_FAILURE; } ret = p->main(argc, argv); if (cpus_chosen) bitmask_free(cpus_chosen); if (online_cpus) bitmask_free(online_cpus); if (offline_cpus) bitmask_free(offline_cpus); return ret; } print_help(); return EXIT_FAILURE; }
linux-master
tools/power/cpupower/utils/cpupower.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2011 Thomas Renninger <[email protected]>, Novell Inc. */ #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <getopt.h> #include <sys/utsname.h> #include "helpers/helpers.h" #include "helpers/sysfs.h" static struct option set_opts[] = { {"perf-bias", optional_argument, NULL, 'b'}, { }, }; static void print_wrong_arg_exit(void) { printf(_("invalid or unknown argument\n")); exit(EXIT_FAILURE); } int cmd_info(int argc, char **argv) { extern char *optarg; extern int optind, opterr, optopt; unsigned int cpu; struct utsname uts; union { struct { int perf_bias:1; }; int params; } params = {}; int ret = 0; ret = uname(&uts); if (!ret && (!strcmp(uts.machine, "ppc64le") || !strcmp(uts.machine, "ppc64"))) { fprintf(stderr, _("Subcommand not supported on POWER.\n")); return ret; } setlocale(LC_ALL, ""); textdomain(PACKAGE); /* parameter parsing */ while ((ret = getopt_long(argc, argv, "b", set_opts, NULL)) != -1) { switch (ret) { case 'b': if (params.perf_bias) print_wrong_arg_exit(); params.perf_bias = 1; break; default: print_wrong_arg_exit(); } } if (!params.params) params.params = 0x7; /* Default is: show output of base_cpu only */ if (bitmask_isallclear(cpus_chosen)) bitmask_setbit(cpus_chosen, base_cpu); /* Add more per cpu options here */ if (!params.perf_bias) return ret; if (params.perf_bias) { if (!run_as_root) { params.perf_bias = 0; printf(_("Intel's performance bias setting needs root privileges\n")); } else if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS)) { printf(_("System does not support Intel's performance" " bias setting\n")); params.perf_bias = 0; } } /* loop over CPUs */ for (cpu = bitmask_first(cpus_chosen); cpu <= bitmask_last(cpus_chosen); cpu++) { if (!bitmask_isbitset(cpus_chosen, cpu)) continue; printf(_("analyzing CPU %d:\n"), cpu); if (sysfs_is_cpu_online(cpu) != 1){ printf(_(" *is offline\n")); continue; } if (params.perf_bias) { ret = cpupower_intel_get_perf_bias(cpu); if (ret < 0) { fprintf(stderr, _("Could not read perf-bias value[%d]\n"), ret); exit(EXIT_FAILURE); } else printf(_("perf-bias: %d\n"), ret); } } return 0; }
linux-master
tools/power/cpupower/utils/cpupower-info.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2004-2009 Dominik Brodowski <[email protected]> */ #include <unistd.h> #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <limits.h> #include <string.h> #include <ctype.h> #include <getopt.h> #include "cpufreq.h" #include "cpuidle.h" #include "helpers/helpers.h" #define NORM_FREQ_LEN 32 static struct option set_opts[] = { {"min", required_argument, NULL, 'd'}, {"max", required_argument, NULL, 'u'}, {"governor", required_argument, NULL, 'g'}, {"freq", required_argument, NULL, 'f'}, {"related", no_argument, NULL, 'r'}, { }, }; static void print_error(void) { printf(_("Error setting new values. Common errors:\n" "- Do you have proper administration rights? (super-user?)\n" "- Is the governor you requested available and modprobed?\n" "- Trying to set an invalid policy?\n" "- Trying to set a specific frequency, but userspace governor is not available,\n" " for example because of hardware which cannot be set to a specific frequency\n" " or because the userspace governor isn't loaded?\n")); }; struct freq_units { char *str_unit; int power_of_ten; }; const struct freq_units def_units[] = { {"hz", -3}, {"khz", 0}, /* default */ {"mhz", 3}, {"ghz", 6}, {"thz", 9}, {NULL, 0} }; static void print_unknown_arg(void) { printf(_("invalid or unknown argument\n")); } static unsigned long string_to_frequency(const char *str) { char normalized[NORM_FREQ_LEN]; const struct freq_units *unit; const char *scan; char *end; unsigned long freq; int power = 0, match_count = 0, i, cp, pad; while (*str == '0') str++; for (scan = str; isdigit(*scan) || *scan == '.'; scan++) { if (*scan == '.' && match_count == 0) match_count = 1; else if (*scan == '.' && match_count == 1) return 0; } if (*scan) { match_count = 0; for (unit = def_units; unit->str_unit; unit++) { for (i = 0; scan[i] && tolower(scan[i]) == unit->str_unit[i]; ++i) continue; if (scan[i]) continue; match_count++; power = unit->power_of_ten; } if (match_count != 1) return 0; } /* count the number of digits to be copied */ for (cp = 0; isdigit(str[cp]); cp++) continue; if (str[cp] == '.') { while (power > -1 && isdigit(str[cp+1])) { cp++; power--; } } if (power >= -1) { /* not enough => pad */ pad = power + 1; } else { /* too much => strip */ pad = 0; cp += power + 1; } /* check bounds */ if (cp <= 0 || cp + pad > NORM_FREQ_LEN - 1) return 0; /* copy digits */ for (i = 0; i < cp; i++, str++) { if (*str == '.') str++; normalized[i] = *str; } /* and pad */ for (; i < cp + pad; i++) normalized[i] = '0'; /* round up, down ? */ match_count = (normalized[i-1] >= '5'); /* and drop the decimal part */ normalized[i-1] = 0; /* cp > 0 && pad >= 0 ==> i > 0 */ /* final conversion (and applying rounding) */ errno = 0; freq = strtoul(normalized, &end, 10); if (errno) return 0; else { if (match_count && freq != ULONG_MAX) freq++; return freq; } } static int do_new_policy(unsigned int cpu, struct cpufreq_policy *new_pol) { struct cpufreq_policy *cur_pol = cpufreq_get_policy(cpu); int ret; if (!cur_pol) { printf(_("wrong, unknown or unhandled CPU?\n")); return -EINVAL; } if (!new_pol->min) new_pol->min = cur_pol->min; if (!new_pol->max) new_pol->max = cur_pol->max; if (!new_pol->governor) new_pol->governor = cur_pol->governor; ret = cpufreq_set_policy(cpu, new_pol); cpufreq_put_policy(cur_pol); return ret; } static int do_one_cpu(unsigned int cpu, struct cpufreq_policy *new_pol, unsigned long freq, unsigned int pc) { switch (pc) { case 0: return cpufreq_set_frequency(cpu, freq); case 1: /* if only one value of a policy is to be changed, we can * use a "fast path". */ if (new_pol->min) return cpufreq_modify_policy_min(cpu, new_pol->min); else if (new_pol->max) return cpufreq_modify_policy_max(cpu, new_pol->max); else if (new_pol->governor) return cpufreq_modify_policy_governor(cpu, new_pol->governor); default: /* slow path */ return do_new_policy(cpu, new_pol); } } int cmd_freq_set(int argc, char **argv) { extern char *optarg; extern int optind, opterr, optopt; int ret = 0, cont = 1; int double_parm = 0, related = 0, policychange = 0; unsigned long freq = 0; char gov[20]; unsigned int cpu; struct cpufreq_policy new_pol = { .min = 0, .max = 0, .governor = NULL, }; /* parameter parsing */ do { ret = getopt_long(argc, argv, "d:u:g:f:r", set_opts, NULL); switch (ret) { case '?': print_unknown_arg(); return -EINVAL; case -1: cont = 0; break; case 'r': if (related) double_parm++; related++; break; case 'd': if (new_pol.min) double_parm++; policychange++; new_pol.min = string_to_frequency(optarg); if (new_pol.min == 0) { print_unknown_arg(); return -EINVAL; } break; case 'u': if (new_pol.max) double_parm++; policychange++; new_pol.max = string_to_frequency(optarg); if (new_pol.max == 0) { print_unknown_arg(); return -EINVAL; } break; case 'f': if (freq) double_parm++; freq = string_to_frequency(optarg); if (freq == 0) { print_unknown_arg(); return -EINVAL; } break; case 'g': if (new_pol.governor) double_parm++; policychange++; if ((strlen(optarg) < 3) || (strlen(optarg) > 18)) { print_unknown_arg(); return -EINVAL; } if ((sscanf(optarg, "%19s", gov)) != 1) { print_unknown_arg(); return -EINVAL; } new_pol.governor = gov; break; } } while (cont); /* parameter checking */ if (double_parm) { printf("the same parameter was passed more than once\n"); return -EINVAL; } if (freq && policychange) { printf(_("the -f/--freq parameter cannot be combined with -d/--min, -u/--max or\n" "-g/--governor parameters\n")); return -EINVAL; } if (!freq && !policychange) { printf(_("At least one parameter out of -f/--freq, -d/--min, -u/--max, and\n" "-g/--governor must be passed\n")); return -EINVAL; } /* Default is: set all CPUs */ if (bitmask_isallclear(cpus_chosen)) bitmask_setall(cpus_chosen); /* Also set frequency settings for related CPUs if -r is passed */ if (related) { for (cpu = bitmask_first(cpus_chosen); cpu <= bitmask_last(cpus_chosen); cpu++) { struct cpufreq_affected_cpus *cpus; if (!bitmask_isbitset(cpus_chosen, cpu) || cpupower_is_cpu_online(cpu) != 1) continue; cpus = cpufreq_get_related_cpus(cpu); if (!cpus) break; while (cpus->next) { bitmask_setbit(cpus_chosen, cpus->cpu); cpus = cpus->next; } /* Set the last cpu in related cpus list */ bitmask_setbit(cpus_chosen, cpus->cpu); cpufreq_put_related_cpus(cpus); } } get_cpustate(); /* loop over CPUs */ for (cpu = bitmask_first(cpus_chosen); cpu <= bitmask_last(cpus_chosen); cpu++) { if (!bitmask_isbitset(cpus_chosen, cpu) || cpupower_is_cpu_online(cpu) != 1) continue; printf(_("Setting cpu: %d\n"), cpu); ret = do_one_cpu(cpu, &new_pol, freq, policychange); if (ret) { print_error(); return ret; } } print_offline_cpus(); return 0; }
linux-master
tools/power/cpupower/utils/cpufreq-set.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2016 SUSE Software Solutions GmbH * Thomas Renninger <[email protected]> */ #include <unistd.h> #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <getopt.h> #include "powercap.h" #include "helpers/helpers.h" int powercap_show_all; static struct option info_opts[] = { { "all", no_argument, NULL, 'a'}, { }, }; static int powercap_print_one_zone(struct powercap_zone *zone) { int mode, i, ret = 0; char pr_prefix[1024] = ""; for (i = 0; i < zone->tree_depth && i < POWERCAP_MAX_TREE_DEPTH; i++) strcat(pr_prefix, "\t"); printf("%sZone: %s", pr_prefix, zone->name); ret = powercap_zone_get_enabled(zone, &mode); if (ret < 0) return ret; printf(" (%s)\n", mode ? "enabled" : "disabled"); if (zone->has_power_uw) printf(_("%sPower can be monitored in micro Jules\n"), pr_prefix); if (zone->has_energy_uj) printf(_("%sPower can be monitored in micro Watts\n"), pr_prefix); printf("\n"); if (ret != 0) return ret; return ret; } static int powercap_show(void) { struct powercap_zone *root_zone; char line[MAX_LINE_LEN] = ""; int ret, val; ret = powercap_get_driver(line, MAX_LINE_LEN); if (ret < 0) { printf(_("No powercapping driver loaded\n")); return ret; } printf("Driver: %s\n", line); ret = powercap_get_enabled(&val); if (ret < 0) return ret; if (!val) { printf(_("Powercapping is disabled\n")); return -1; } printf(_("Powercap domain hierarchy:\n\n")); root_zone = powercap_init_zones(); if (root_zone == NULL) { printf(_("No powercap info found\n")); return 1; } powercap_walk_zones(root_zone, powercap_print_one_zone); return 0; } int cmd_cap_set(int argc, char **argv) { return 0; }; int cmd_cap_info(int argc, char **argv) { int ret = 0, cont = 1; do { ret = getopt_long(argc, argv, "a", info_opts, NULL); switch (ret) { case '?': cont = 0; break; case -1: cont = 0; break; case 'a': powercap_show_all = 1; break; default: fprintf(stderr, _("invalid or unknown argument\n")); return EXIT_FAILURE; } } while (cont); powercap_show(); return 0; }
linux-master
tools/power/cpupower/utils/powercap-info.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2011 Thomas Renninger <[email protected]>, Novell Inc. */ #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <getopt.h> #include <sys/utsname.h> #include "helpers/helpers.h" #include "helpers/sysfs.h" #include "helpers/bitmask.h" static struct option set_opts[] = { {"perf-bias", required_argument, NULL, 'b'}, {"epp", required_argument, NULL, 'e'}, {"amd-pstate-mode", required_argument, NULL, 'm'}, {"turbo-boost", required_argument, NULL, 't'}, { }, }; static void print_wrong_arg_exit(void) { printf(_("invalid or unknown argument\n")); exit(EXIT_FAILURE); } int cmd_set(int argc, char **argv) { extern char *optarg; extern int optind, opterr, optopt; unsigned int cpu; struct utsname uts; union { struct { int perf_bias:1; int epp:1; int mode:1; int turbo_boost:1; }; int params; } params; int perf_bias = 0, turbo_boost = 1; int ret = 0; char epp[30], mode[20]; ret = uname(&uts); if (!ret && (!strcmp(uts.machine, "ppc64le") || !strcmp(uts.machine, "ppc64"))) { fprintf(stderr, _("Subcommand not supported on POWER.\n")); return ret; } setlocale(LC_ALL, ""); textdomain(PACKAGE); params.params = 0; /* parameter parsing */ while ((ret = getopt_long(argc, argv, "b:e:m:", set_opts, NULL)) != -1) { switch (ret) { case 'b': if (params.perf_bias) print_wrong_arg_exit(); perf_bias = atoi(optarg); if (perf_bias < 0 || perf_bias > 15) { printf(_("--perf-bias param out " "of range [0-%d]\n"), 15); print_wrong_arg_exit(); } params.perf_bias = 1; break; case 'e': if (params.epp) print_wrong_arg_exit(); if (sscanf(optarg, "%29s", epp) != 1) { print_wrong_arg_exit(); return -EINVAL; } params.epp = 1; break; case 'm': if (cpupower_cpu_info.vendor != X86_VENDOR_AMD) print_wrong_arg_exit(); if (params.mode) print_wrong_arg_exit(); if (sscanf(optarg, "%19s", mode) != 1) { print_wrong_arg_exit(); return -EINVAL; } params.mode = 1; break; case 't': if (params.turbo_boost) print_wrong_arg_exit(); turbo_boost = atoi(optarg); if (turbo_boost < 0 || turbo_boost > 1) { printf("--turbo-boost param out of range [0-1]\n"); print_wrong_arg_exit(); } params.turbo_boost = 1; break; default: print_wrong_arg_exit(); } } if (!params.params) print_wrong_arg_exit(); if (params.mode) { ret = cpupower_set_amd_pstate_mode(mode); if (ret) fprintf(stderr, "Error setting mode\n"); } if (params.turbo_boost) { ret = cpupower_set_turbo_boost(turbo_boost); if (ret) fprintf(stderr, "Error setting turbo-boost\n"); } /* Default is: set all CPUs */ if (bitmask_isallclear(cpus_chosen)) bitmask_setall(cpus_chosen); /* loop over CPUs */ for (cpu = bitmask_first(cpus_chosen); cpu <= bitmask_last(cpus_chosen); cpu++) { if (!bitmask_isbitset(cpus_chosen, cpu)) continue; if (sysfs_is_cpu_online(cpu) != 1){ fprintf(stderr, _("Cannot set values on CPU %d:"), cpu); fprintf(stderr, _(" *is offline\n")); continue; } if (params.perf_bias) { ret = cpupower_intel_set_perf_bias(cpu, perf_bias); if (ret) { fprintf(stderr, _("Error setting perf-bias " "value on CPU %d\n"), cpu); break; } } if (params.epp) { ret = cpupower_set_epp(cpu, epp); if (ret) { fprintf(stderr, "Error setting epp value on CPU %d\n", cpu); break; } } } return ret; }
linux-master
tools/power/cpupower/utils/cpupower-set.c
// SPDX-License-Identifier: GPL-2.0 #include <unistd.h> #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <limits.h> #include <string.h> #include <ctype.h> #include <getopt.h> #include <cpufreq.h> #include <cpuidle.h> #include "helpers/helpers.h" static struct option info_opts[] = { {"disable", required_argument, NULL, 'd'}, {"enable", required_argument, NULL, 'e'}, {"disable-by-latency", required_argument, NULL, 'D'}, {"enable-all", no_argument, NULL, 'E'}, { }, }; int cmd_idle_set(int argc, char **argv) { extern char *optarg; extern int optind, opterr, optopt; int ret = 0, cont = 1, param = 0, disabled; unsigned long long latency = 0, state_latency; unsigned int cpu = 0, idlestate = 0, idlestates = 0; char *endptr; do { ret = getopt_long(argc, argv, "d:e:ED:", info_opts, NULL); if (ret == -1) break; switch (ret) { case '?': param = '?'; cont = 0; break; case 'd': case 'e': if (param) { param = -1; cont = 0; break; } param = ret; strtol(optarg, &endptr, 10); if (*endptr != '\0') { printf(_("Bad value: %s, Integer expected\n"), optarg); exit(EXIT_FAILURE); } else { idlestate = atoi(optarg); } break; case 'D': if (param) { param = -1; cont = 0; break; } param = ret; latency = strtoull(optarg, &endptr, 10); if (*endptr != '\0') { printf(_("Bad latency value: %s\n"), optarg); exit(EXIT_FAILURE); } break; case 'E': if (param) { param = -1; cont = 0; break; } param = ret; break; case -1: cont = 0; break; } } while (cont); switch (param) { case -1: printf(_("You can't specify more than one " "output-specific argument\n")); exit(EXIT_FAILURE); case '?': printf(_("invalid or unknown argument\n")); exit(EXIT_FAILURE); } get_cpustate(); /* Default is: set all CPUs */ if (bitmask_isallclear(cpus_chosen)) bitmask_setall(cpus_chosen); for (cpu = bitmask_first(cpus_chosen); cpu <= bitmask_last(cpus_chosen); cpu++) { if (!bitmask_isbitset(cpus_chosen, cpu)) continue; if (cpupower_is_cpu_online(cpu) != 1) continue; idlestates = cpuidle_state_count(cpu); if (idlestates <= 0) continue; switch (param) { case 'd': ret = cpuidle_state_disable(cpu, idlestate, 1); if (ret == 0) printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); else if (ret == -1) printf(_("Idlestate %u not available on CPU %u\n"), idlestate, cpu); else if (ret == -2) printf(_("Idlestate disabling not supported by kernel\n")); else printf(_("Idlestate %u not disabled on CPU %u\n"), idlestate, cpu); break; case 'e': ret = cpuidle_state_disable(cpu, idlestate, 0); if (ret == 0) printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu); else if (ret == -1) printf(_("Idlestate %u not available on CPU %u\n"), idlestate, cpu); else if (ret == -2) printf(_("Idlestate enabling not supported by kernel\n")); else printf(_("Idlestate %u not enabled on CPU %u\n"), idlestate, cpu); break; case 'D': for (idlestate = 0; idlestate < idlestates; idlestate++) { disabled = cpuidle_is_state_disabled (cpu, idlestate); state_latency = cpuidle_state_latency (cpu, idlestate); if (disabled == 1) { if (latency > state_latency){ ret = cpuidle_state_disable (cpu, idlestate, 0); if (ret == 0) printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu); } continue; } if (latency <= state_latency){ ret = cpuidle_state_disable (cpu, idlestate, 1); if (ret == 0) printf(_("Idlestate %u disabled on CPU %u\n"), idlestate, cpu); } } break; case 'E': for (idlestate = 0; idlestate < idlestates; idlestate++) { disabled = cpuidle_is_state_disabled (cpu, idlestate); if (disabled == 1) { ret = cpuidle_state_disable (cpu, idlestate, 0); if (ret == 0) printf(_("Idlestate %u enabled on CPU %u\n"), idlestate, cpu); } } break; default: /* Not reachable with proper args checking */ printf(_("Invalid or unknown argument\n")); exit(EXIT_FAILURE); break; } } print_offline_cpus(); return EXIT_SUCCESS; }
linux-master
tools/power/cpupower/utils/cpuidle-set.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2010,2011 Thomas Renninger <[email protected]>, Novell Inc */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <limits.h> #include <cpuidle.h> #include "helpers/helpers.h" #include "idle_monitor/cpupower-monitor.h" #define CPUIDLE_STATES_MAX 10 static cstate_t cpuidle_cstates[CPUIDLE_STATES_MAX]; struct cpuidle_monitor cpuidle_sysfs_monitor; static unsigned long long **previous_count; static unsigned long long **current_count; static struct timespec start_time; static unsigned long long timediff; static int cpuidle_get_count_percent(unsigned int id, double *percent, unsigned int cpu) { unsigned long long statediff = current_count[cpu][id] - previous_count[cpu][id]; dprint("%s: - diff: %llu - percent: %f (%u)\n", cpuidle_cstates[id].name, timediff, *percent, cpu); if (timediff == 0) *percent = 0.0; else *percent = ((100.0 * statediff) / timediff); dprint("%s: - timediff: %llu - statediff: %llu - percent: %f (%u)\n", cpuidle_cstates[id].name, timediff, statediff, *percent, cpu); return 0; } static int cpuidle_start(void) { int cpu, state; clock_gettime(CLOCK_REALTIME, &start_time); for (cpu = 0; cpu < cpu_count; cpu++) { for (state = 0; state < cpuidle_sysfs_monitor.hw_states_num; state++) { previous_count[cpu][state] = cpuidle_state_time(cpu, state); dprint("CPU %d - State: %d - Val: %llu\n", cpu, state, previous_count[cpu][state]); } } return 0; } static int cpuidle_stop(void) { int cpu, state; struct timespec end_time; clock_gettime(CLOCK_REALTIME, &end_time); timediff = timespec_diff_us(start_time, end_time); for (cpu = 0; cpu < cpu_count; cpu++) { for (state = 0; state < cpuidle_sysfs_monitor.hw_states_num; state++) { current_count[cpu][state] = cpuidle_state_time(cpu, state); dprint("CPU %d - State: %d - Val: %llu\n", cpu, state, previous_count[cpu][state]); } } return 0; } void fix_up_intel_idle_driver_name(char *tmp, int num) { /* fix up cpuidle name for intel idle driver */ if (!strncmp(tmp, "NHM-", 4)) { switch (num) { case 1: strcpy(tmp, "C1"); break; case 2: strcpy(tmp, "C3"); break; case 3: strcpy(tmp, "C6"); break; } } else if (!strncmp(tmp, "SNB-", 4)) { switch (num) { case 1: strcpy(tmp, "C1"); break; case 2: strcpy(tmp, "C3"); break; case 3: strcpy(tmp, "C6"); break; case 4: strcpy(tmp, "C7"); break; } } else if (!strncmp(tmp, "ATM-", 4)) { switch (num) { case 1: strcpy(tmp, "C1"); break; case 2: strcpy(tmp, "C2"); break; case 3: strcpy(tmp, "C4"); break; case 4: strcpy(tmp, "C6"); break; } } } #ifdef __powerpc__ void map_power_idle_state_name(char *tmp) { if (!strncmp(tmp, "stop0_lite", CSTATE_NAME_LEN)) strcpy(tmp, "stop0L"); else if (!strncmp(tmp, "stop1_lite", CSTATE_NAME_LEN)) strcpy(tmp, "stop1L"); else if (!strncmp(tmp, "stop2_lite", CSTATE_NAME_LEN)) strcpy(tmp, "stop2L"); } #else void map_power_idle_state_name(char *tmp) { } #endif static struct cpuidle_monitor *cpuidle_register(void) { int num; char *tmp; int this_cpu; this_cpu = sched_getcpu(); /* Assume idle state count is the same for all CPUs */ cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu); if (cpuidle_sysfs_monitor.hw_states_num <= 0) return NULL; for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { tmp = cpuidle_state_name(this_cpu, num); if (tmp == NULL) continue; map_power_idle_state_name(tmp); fix_up_intel_idle_driver_name(tmp, num); strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1); free(tmp); tmp = cpuidle_state_desc(this_cpu, num); if (tmp == NULL) continue; strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1); free(tmp); cpuidle_cstates[num].range = RANGE_THREAD; cpuidle_cstates[num].id = num; cpuidle_cstates[num].get_count_percent = cpuidle_get_count_percent; } /* Free this at program termination */ previous_count = malloc(sizeof(long long *) * cpu_count); current_count = malloc(sizeof(long long *) * cpu_count); for (num = 0; num < cpu_count; num++) { previous_count[num] = malloc(sizeof(long long) * cpuidle_sysfs_monitor.hw_states_num); current_count[num] = malloc(sizeof(long long) * cpuidle_sysfs_monitor.hw_states_num); } cpuidle_sysfs_monitor.name_len = strlen(cpuidle_sysfs_monitor.name); return &cpuidle_sysfs_monitor; } void cpuidle_unregister(void) { int num; for (num = 0; num < cpu_count; num++) { free(previous_count[num]); free(current_count[num]); } free(previous_count); free(current_count); } struct cpuidle_monitor cpuidle_sysfs_monitor = { .name = "Idle_Stats", .hw_states = cpuidle_cstates, .start = cpuidle_start, .stop = cpuidle_stop, .do_register = cpuidle_register, .unregister = cpuidle_unregister, .flags.needs_root = 0, .overflow_s = UINT_MAX, };
linux-master
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2010,2011 Thomas Renninger <[email protected]>, Novell Inc. */ #if defined(__i386__) || defined(__x86_64__) #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <cpufreq.h> #include "helpers/helpers.h" #include "idle_monitor/cpupower-monitor.h" #define MSR_APERF 0xE8 #define MSR_MPERF 0xE7 #define RDPRU ".byte 0x0f, 0x01, 0xfd" #define RDPRU_ECX_MPERF 0 #define RDPRU_ECX_APERF 1 #define MSR_TSC 0x10 #define MSR_AMD_HWCR 0xc0010015 enum mperf_id { C0 = 0, Cx, AVG_FREQ, MPERF_CSTATE_COUNT }; static int mperf_get_count_percent(unsigned int self_id, double *percent, unsigned int cpu); static int mperf_get_count_freq(unsigned int id, unsigned long long *count, unsigned int cpu); static struct timespec time_start, time_end; static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { { .name = "C0", .desc = N_("Processor Core not idle"), .id = C0, .range = RANGE_THREAD, .get_count_percent = mperf_get_count_percent, }, { .name = "Cx", .desc = N_("Processor Core in an idle state"), .id = Cx, .range = RANGE_THREAD, .get_count_percent = mperf_get_count_percent, }, { .name = "Freq", .desc = N_("Average Frequency (including boost) in MHz"), .id = AVG_FREQ, .range = RANGE_THREAD, .get_count = mperf_get_count_freq, }, }; enum MAX_FREQ_MODE { MAX_FREQ_SYSFS, MAX_FREQ_TSC_REF }; static int max_freq_mode; /* * The max frequency mperf is ticking at (in C0), either retrieved via: * 1) calculated after measurements if we know TSC ticks at mperf/P0 frequency * 2) cpufreq /sys/devices/.../cpu0/cpufreq/cpuinfo_max_freq at init time * 1. Is preferred as it also works without cpufreq subsystem (e.g. on Xen) */ static unsigned long max_frequency; static unsigned long long *tsc_at_measure_start; static unsigned long long *tsc_at_measure_end; static unsigned long long *mperf_previous_count; static unsigned long long *aperf_previous_count; static unsigned long long *mperf_current_count; static unsigned long long *aperf_current_count; /* valid flag for all CPUs. If a MSR read failed it will be zero */ static int *is_valid; static int mperf_get_tsc(unsigned long long *tsc) { int ret; ret = read_msr(base_cpu, MSR_TSC, tsc); if (ret) dprint("Reading TSC MSR failed, returning %llu\n", *tsc); return ret; } static int get_aperf_mperf(int cpu, unsigned long long *aval, unsigned long long *mval) { unsigned long low_a, high_a; unsigned long low_m, high_m; int ret; /* * Running on the cpu from which we read the registers will * prevent APERF/MPERF from going out of sync because of IPI * latency introduced by read_msr()s. */ if (mperf_monitor.flags.per_cpu_schedule) { if (bind_cpu(cpu)) return 1; } if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_RDPRU) { asm volatile(RDPRU : "=a" (low_a), "=d" (high_a) : "c" (RDPRU_ECX_APERF)); asm volatile(RDPRU : "=a" (low_m), "=d" (high_m) : "c" (RDPRU_ECX_MPERF)); *aval = ((low_a) | (high_a) << 32); *mval = ((low_m) | (high_m) << 32); return 0; } ret = read_msr(cpu, MSR_APERF, aval); ret |= read_msr(cpu, MSR_MPERF, mval); return ret; } static int mperf_init_stats(unsigned int cpu) { unsigned long long aval, mval; int ret; ret = get_aperf_mperf(cpu, &aval, &mval); aperf_previous_count[cpu] = aval; mperf_previous_count[cpu] = mval; is_valid[cpu] = !ret; return 0; } static int mperf_measure_stats(unsigned int cpu) { unsigned long long aval, mval; int ret; ret = get_aperf_mperf(cpu, &aval, &mval); aperf_current_count[cpu] = aval; mperf_current_count[cpu] = mval; is_valid[cpu] = !ret; return 0; } static int mperf_get_count_percent(unsigned int id, double *percent, unsigned int cpu) { unsigned long long aperf_diff, mperf_diff, tsc_diff; unsigned long long timediff; if (!is_valid[cpu]) return -1; if (id != C0 && id != Cx) return -1; mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; if (max_freq_mode == MAX_FREQ_TSC_REF) { tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu]; *percent = 100.0 * mperf_diff / tsc_diff; dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n", mperf_cstates[id].name, mperf_diff, tsc_diff); } else if (max_freq_mode == MAX_FREQ_SYSFS) { timediff = max_frequency * timespec_diff_us(time_start, time_end); *percent = 100.0 * mperf_diff / timediff; dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n", mperf_cstates[id].name, mperf_diff, timediff); } else return -1; if (id == Cx) *percent = 100.0 - *percent; dprint("%s: previous: %llu - current: %llu - (%u)\n", mperf_cstates[id].name, mperf_diff, aperf_diff, cpu); dprint("%s: %f\n", mperf_cstates[id].name, *percent); return 0; } static int mperf_get_count_freq(unsigned int id, unsigned long long *count, unsigned int cpu) { unsigned long long aperf_diff, mperf_diff, time_diff, tsc_diff; if (id != AVG_FREQ) return 1; if (!is_valid[cpu]) return -1; mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; if (max_freq_mode == MAX_FREQ_TSC_REF) { /* Calculate max_freq from TSC count */ tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu]; time_diff = timespec_diff_us(time_start, time_end); max_frequency = tsc_diff / time_diff; } *count = max_frequency * ((double)aperf_diff / mperf_diff); dprint("%s: Average freq based on %s maximum frequency:\n", mperf_cstates[id].name, (max_freq_mode == MAX_FREQ_TSC_REF) ? "TSC calculated" : "sysfs read"); dprint("max_frequency: %lu\n", max_frequency); dprint("aperf_diff: %llu\n", aperf_diff); dprint("mperf_diff: %llu\n", mperf_diff); dprint("avg freq: %llu\n", *count); return 0; } static int mperf_start(void) { int cpu; clock_gettime(CLOCK_REALTIME, &time_start); for (cpu = 0; cpu < cpu_count; cpu++) { mperf_get_tsc(&tsc_at_measure_start[cpu]); mperf_init_stats(cpu); } return 0; } static int mperf_stop(void) { int cpu; for (cpu = 0; cpu < cpu_count; cpu++) { mperf_measure_stats(cpu); mperf_get_tsc(&tsc_at_measure_end[cpu]); } clock_gettime(CLOCK_REALTIME, &time_end); return 0; } /* * Mperf register is defined to tick at P0 (maximum) frequency * * Instead of reading out P0 which can be tricky to read out from HW, * we use TSC counter if it reliably ticks at P0/mperf frequency. * * Still try to fall back to: * /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq * on older Intel HW without invariant TSC feature. * Or on AMD machines where TSC does not tick at P0 (do not exist yet, but * it's still double checked (MSR_AMD_HWCR)). * * On these machines the user would still get useful mperf * stats when acpi-cpufreq driver is loaded. */ static int init_maxfreq_mode(void) { int ret; unsigned long long hwcr; unsigned long min; if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC)) goto use_sysfs; if (cpupower_cpu_info.vendor == X86_VENDOR_AMD || cpupower_cpu_info.vendor == X86_VENDOR_HYGON) { /* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf * freq. * A test whether hwcr is accessable/available would be: * (cpupower_cpu_info.family > 0x10 || * cpupower_cpu_info.family == 0x10 && * cpupower_cpu_info.model >= 0x2)) * This should be the case for all aperf/mperf * capable AMD machines and is therefore safe to test here. * Compare with Linus kernel git commit: acf01734b1747b1ec4 */ ret = read_msr(0, MSR_AMD_HWCR, &hwcr); /* * If the MSR read failed, assume a Xen system that did * not explicitly provide access to it and assume TSC works */ if (ret != 0) { dprint("TSC read 0x%x failed - assume TSC working\n", MSR_AMD_HWCR); return 0; } else if (1 & (hwcr >> 24)) { max_freq_mode = MAX_FREQ_TSC_REF; return 0; } else { /* Use sysfs max frequency if available */ } } else if (cpupower_cpu_info.vendor == X86_VENDOR_INTEL) { /* * On Intel we assume mperf (in C0) is ticking at same * rate than TSC */ max_freq_mode = MAX_FREQ_TSC_REF; return 0; } use_sysfs: if (cpufreq_get_hardware_limits(0, &min, &max_frequency)) { dprint("Cannot retrieve max freq from cpufreq kernel " "subsystem\n"); return -1; } max_freq_mode = MAX_FREQ_SYSFS; max_frequency /= 1000; /* Default automatically to MHz value */ return 0; } /* * This monitor provides: * * 1) Average frequency a CPU resided in * This always works if the CPU has aperf/mperf capabilities * * 2) C0 and Cx (any sleep state) time a CPU resided in * Works if mperf timer stops ticking in sleep states which * seem to be the case on all current HW. * Both is directly retrieved from HW registers and is independent * from kernel statistics. */ struct cpuidle_monitor mperf_monitor; struct cpuidle_monitor *mperf_register(void) { if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) return NULL; if (init_maxfreq_mode()) return NULL; if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) mperf_monitor.flags.per_cpu_schedule = 1; /* Free this at program termination */ is_valid = calloc(cpu_count, sizeof(int)); mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long)); aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long)); mperf_current_count = calloc(cpu_count, sizeof(unsigned long long)); aperf_current_count = calloc(cpu_count, sizeof(unsigned long long)); tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long)); tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long)); mperf_monitor.name_len = strlen(mperf_monitor.name); return &mperf_monitor; } void mperf_unregister(void) { free(mperf_previous_count); free(aperf_previous_count); free(mperf_current_count); free(aperf_current_count); free(tsc_at_measure_start); free(tsc_at_measure_end); free(is_valid); } struct cpuidle_monitor mperf_monitor = { .name = "Mperf", .hw_states_num = MPERF_CSTATE_COUNT, .hw_states = mperf_cstates, .start = mperf_start, .stop = mperf_stop, .do_register = mperf_register, .unregister = mperf_unregister, .flags.needs_root = 1, .overflow_s = 922000000 /* 922337203 seconds TSC overflow at 20GHz */ }; #endif /* #if defined(__i386__) || defined(__x86_64__) */
linux-master
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2010,2011 Thomas Renninger <[email protected]>, Novell Inc. * * Based on Len Brown's <[email protected]> turbostat tool. */ #if defined(__i386__) || defined(__x86_64__) #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include "helpers/helpers.h" #include "idle_monitor/cpupower-monitor.h" #define MSR_PKG_C3_RESIDENCY 0x3F8 #define MSR_PKG_C6_RESIDENCY 0x3F9 #define MSR_CORE_C3_RESIDENCY 0x3FC #define MSR_CORE_C6_RESIDENCY 0x3FD #define MSR_TSC 0x10 #define NHM_CSTATE_COUNT 4 enum intel_nhm_id { C3 = 0, C6, PC3, PC6, TSC = 0xFFFF }; static int nhm_get_count_percent(unsigned int self_id, double *percent, unsigned int cpu); static cstate_t nhm_cstates[NHM_CSTATE_COUNT] = { { .name = "C3", .desc = N_("Processor Core C3"), .id = C3, .range = RANGE_CORE, .get_count_percent = nhm_get_count_percent, }, { .name = "C6", .desc = N_("Processor Core C6"), .id = C6, .range = RANGE_CORE, .get_count_percent = nhm_get_count_percent, }, { .name = "PC3", .desc = N_("Processor Package C3"), .id = PC3, .range = RANGE_PACKAGE, .get_count_percent = nhm_get_count_percent, }, { .name = "PC6", .desc = N_("Processor Package C6"), .id = PC6, .range = RANGE_PACKAGE, .get_count_percent = nhm_get_count_percent, }, }; static unsigned long long tsc_at_measure_start; static unsigned long long tsc_at_measure_end; static unsigned long long *previous_count[NHM_CSTATE_COUNT]; static unsigned long long *current_count[NHM_CSTATE_COUNT]; /* valid flag for all CPUs. If a MSR read failed it will be zero */ static int *is_valid; static int nhm_get_count(enum intel_nhm_id id, unsigned long long *val, unsigned int cpu) { int msr; switch (id) { case C3: msr = MSR_CORE_C3_RESIDENCY; break; case C6: msr = MSR_CORE_C6_RESIDENCY; break; case PC3: msr = MSR_PKG_C3_RESIDENCY; break; case PC6: msr = MSR_PKG_C6_RESIDENCY; break; case TSC: msr = MSR_TSC; break; default: return -1; } if (read_msr(cpu, msr, val)) return -1; return 0; } static int nhm_get_count_percent(unsigned int id, double *percent, unsigned int cpu) { *percent = 0.0; if (!is_valid[cpu]) return -1; *percent = (100.0 * (current_count[id][cpu] - previous_count[id][cpu])) / (tsc_at_measure_end - tsc_at_measure_start); dprint("%s: previous: %llu - current: %llu - (%u)\n", nhm_cstates[id].name, previous_count[id][cpu], current_count[id][cpu], cpu); dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n", nhm_cstates[id].name, (unsigned long long) tsc_at_measure_end - tsc_at_measure_start, current_count[id][cpu] - previous_count[id][cpu], *percent, cpu); return 0; } static int nhm_start(void) { int num, cpu; unsigned long long dbg, val; nhm_get_count(TSC, &tsc_at_measure_start, base_cpu); for (num = 0; num < NHM_CSTATE_COUNT; num++) { for (cpu = 0; cpu < cpu_count; cpu++) { is_valid[cpu] = !nhm_get_count(num, &val, cpu); previous_count[num][cpu] = val; } } nhm_get_count(TSC, &dbg, base_cpu); dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start); return 0; } static int nhm_stop(void) { unsigned long long val; unsigned long long dbg; int num, cpu; nhm_get_count(TSC, &tsc_at_measure_end, base_cpu); for (num = 0; num < NHM_CSTATE_COUNT; num++) { for (cpu = 0; cpu < cpu_count; cpu++) { is_valid[cpu] = !nhm_get_count(num, &val, cpu); current_count[num][cpu] = val; } } nhm_get_count(TSC, &dbg, base_cpu); dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end); return 0; } struct cpuidle_monitor intel_nhm_monitor; struct cpuidle_monitor *intel_nhm_register(void) { int num; if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL) return NULL; if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC)) return NULL; if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) return NULL; /* Free this at program termination */ is_valid = calloc(cpu_count, sizeof(int)); for (num = 0; num < NHM_CSTATE_COUNT; num++) { previous_count[num] = calloc(cpu_count, sizeof(unsigned long long)); current_count[num] = calloc(cpu_count, sizeof(unsigned long long)); } intel_nhm_monitor.name_len = strlen(intel_nhm_monitor.name); return &intel_nhm_monitor; } void intel_nhm_unregister(void) { int num; for (num = 0; num < NHM_CSTATE_COUNT; num++) { free(previous_count[num]); free(current_count[num]); } free(is_valid); } struct cpuidle_monitor intel_nhm_monitor = { .name = "Nehalem", .hw_states_num = NHM_CSTATE_COUNT, .hw_states = nhm_cstates, .start = nhm_start, .stop = nhm_stop, .do_register = intel_nhm_register, .unregister = intel_nhm_unregister, .flags.needs_root = 1, .overflow_s = 922000000 /* 922337203 seconds TSC overflow at 20GHz */ }; #endif
linux-master
tools/power/cpupower/utils/idle_monitor/nhm_idle.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2010,2011 Thomas Renninger <[email protected]>, Novell Inc. * * Output format inspired by Len Brown's <[email protected]> turbostat tool. */ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <signal.h> #include <sys/types.h> #include <sys/wait.h> #include <libgen.h> #include "idle_monitor/cpupower-monitor.h" #include "idle_monitor/idle_monitors.h" #include "helpers/helpers.h" /* Define pointers to all monitors. */ #define DEF(x) & x ## _monitor , struct cpuidle_monitor *all_monitors[] = { #include "idle_monitors.def" 0 }; int cpu_count; static struct cpuidle_monitor *monitors[MONITORS_MAX]; static unsigned int avail_monitors; static char *progname; enum operation_mode_e { list = 1, show, show_all }; static int mode; static int interval = 1; static char *show_monitors_param; static struct cpupower_topology cpu_top; static unsigned int wake_cpus; /* ToDo: Document this in the manpage */ static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; static void print_wrong_arg_exit(void) { printf(_("invalid or unknown argument\n")); exit(EXIT_FAILURE); } long long timespec_diff_us(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } return (temp.tv_sec * 1000000) + (temp.tv_nsec / 1000); } void print_n_spaces(int n) { int x; for (x = 0; x < n; x++) printf(" "); } /*s is filled with left and right spaces *to make its length atleast n+1 */ int fill_string_with_spaces(char *s, int n) { char *temp; int len = strlen(s); if (len >= n) return -1; temp = malloc(sizeof(char) * (n+1)); for (; len < n; len++) s[len] = ' '; s[len] = '\0'; snprintf(temp, n+1, " %s", s); strcpy(s, temp); free(temp); return 0; } #define MAX_COL_WIDTH 6 void print_header(int topology_depth) { int unsigned mon; int state, need_len; cstate_t s; char buf[128] = ""; fill_string_with_spaces(buf, topology_depth * 5 - 1); printf("%s|", buf); for (mon = 0; mon < avail_monitors; mon++) { need_len = monitors[mon]->hw_states_num * (MAX_COL_WIDTH + 1) - 1; if (mon != 0) printf("||"); sprintf(buf, "%s", monitors[mon]->name); fill_string_with_spaces(buf, need_len); printf("%s", buf); } printf("\n"); if (topology_depth > 2) printf(" PKG|"); if (topology_depth > 1) printf("CORE|"); if (topology_depth > 0) printf(" CPU|"); for (mon = 0; mon < avail_monitors; mon++) { if (mon != 0) printf("||"); for (state = 0; state < monitors[mon]->hw_states_num; state++) { if (state != 0) printf("|"); s = monitors[mon]->hw_states[state]; sprintf(buf, "%s", s.name); fill_string_with_spaces(buf, MAX_COL_WIDTH); printf("%s", buf); } printf(" "); } printf("\n"); } void print_results(int topology_depth, int cpu) { unsigned int mon; int state, ret; double percent; unsigned long long result; cstate_t s; /* Be careful CPUs may got resorted for pkg value do not just use cpu */ if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu)) return; if (!cpu_top.core_info[cpu].is_online && cpu_top.core_info[cpu].pkg == -1) return; if (topology_depth > 2) printf("%4d|", cpu_top.core_info[cpu].pkg); if (topology_depth > 1) printf("%4d|", cpu_top.core_info[cpu].core); if (topology_depth > 0) printf("%4d|", cpu_top.core_info[cpu].cpu); for (mon = 0; mon < avail_monitors; mon++) { if (mon != 0) printf("||"); for (state = 0; state < monitors[mon]->hw_states_num; state++) { if (state != 0) printf("|"); s = monitors[mon]->hw_states[state]; if (s.get_count_percent) { ret = s.get_count_percent(s.id, &percent, cpu_top.core_info[cpu].cpu); if (ret) printf("******"); else if (percent >= 100.0) printf("%6.1f", percent); else printf("%6.2f", percent); } else if (s.get_count) { ret = s.get_count(s.id, &result, cpu_top.core_info[cpu].cpu); if (ret) printf("******"); else printf("%6llu", result); } else { printf(_("Monitor %s, Counter %s has no count " "function. Implementation error\n"), monitors[mon]->name, s.name); exit(EXIT_FAILURE); } } } /* * The monitor could still provide useful data, for example * AMD HW counters partly sit in PCI config space. * It's up to the monitor plug-in to check .is_online, this one * is just for additional info. */ if (!cpu_top.core_info[cpu].is_online && cpu_top.core_info[cpu].pkg != -1) { printf(_(" *is offline\n")); return; } else printf("\n"); } /* param: string passed by -m param (The list of monitors to show) * * Monitors must have been registered already, matching monitors * are picked out and available monitors array is overridden * with matching ones * * Monitors get sorted in the same order the user passes them */ static void parse_monitor_param(char *param) { unsigned int num; int mon, hits = 0; char *tmp = param, *token; struct cpuidle_monitor *tmp_mons[MONITORS_MAX]; for (mon = 0; mon < MONITORS_MAX; mon++, tmp = NULL) { token = strtok(tmp, ","); if (token == NULL) break; if (strlen(token) >= MONITOR_NAME_LEN) { printf(_("%s: max monitor name length" " (%d) exceeded\n"), token, MONITOR_NAME_LEN); continue; } for (num = 0; num < avail_monitors; num++) { if (!strcmp(monitors[num]->name, token)) { dprint("Found requested monitor: %s\n", token); tmp_mons[hits] = monitors[num]; hits++; } } } if (hits == 0) { printf(_("No matching monitor found in %s, " "try -l option\n"), param); exit(EXIT_FAILURE); } /* Override detected/registerd monitors array with requested one */ memcpy(monitors, tmp_mons, sizeof(struct cpuidle_monitor *) * MONITORS_MAX); avail_monitors = hits; } void list_monitors(void) { unsigned int mon; int state; cstate_t s; for (mon = 0; mon < avail_monitors; mon++) { printf(_("Monitor \"%s\" (%d states) - Might overflow after %u " "s\n"), monitors[mon]->name, monitors[mon]->hw_states_num, monitors[mon]->overflow_s); for (state = 0; state < monitors[mon]->hw_states_num; state++) { s = monitors[mon]->hw_states[state]; /* * ToDo show more state capabilities: * percent, time (granlarity) */ printf("%s\t[%c] -> %s\n", s.name, range_abbr[s.range], gettext(s.desc)); } } } int fork_it(char **argv) { int status; unsigned int num; unsigned long long timediff; pid_t child_pid; struct timespec start, end; child_pid = fork(); clock_gettime(CLOCK_REALTIME, &start); for (num = 0; num < avail_monitors; num++) monitors[num]->start(); if (!child_pid) { /* child */ execvp(argv[0], argv); } else { /* parent */ if (child_pid == -1) { perror("fork"); exit(1); } signal(SIGINT, SIG_IGN); signal(SIGQUIT, SIG_IGN); if (waitpid(child_pid, &status, 0) == -1) { perror("wait"); exit(1); } } clock_gettime(CLOCK_REALTIME, &end); for (num = 0; num < avail_monitors; num++) monitors[num]->stop(); timediff = timespec_diff_us(start, end); if (WIFEXITED(status)) printf(_("%s took %.5f seconds and exited with status %d\n"), argv[0], timediff / (1000.0 * 1000), WEXITSTATUS(status)); return 0; } int do_interval_measure(int i) { unsigned int num; int cpu; if (wake_cpus) for (cpu = 0; cpu < cpu_count; cpu++) bind_cpu(cpu); for (num = 0; num < avail_monitors; num++) { dprint("HW C-state residency monitor: %s - States: %d\n", monitors[num]->name, monitors[num]->hw_states_num); monitors[num]->start(); } sleep(i); if (wake_cpus) for (cpu = 0; cpu < cpu_count; cpu++) bind_cpu(cpu); for (num = 0; num < avail_monitors; num++) monitors[num]->stop(); return 0; } static void cmdline(int argc, char *argv[]) { int opt; progname = basename(argv[0]); while ((opt = getopt(argc, argv, "+lci:m:")) != -1) { switch (opt) { case 'l': if (mode) print_wrong_arg_exit(); mode = list; break; case 'i': /* only allow -i with -m or no option */ if (mode && mode != show) print_wrong_arg_exit(); interval = atoi(optarg); break; case 'm': if (mode) print_wrong_arg_exit(); mode = show; show_monitors_param = optarg; break; case 'c': wake_cpus = 1; break; default: print_wrong_arg_exit(); } } if (!mode) mode = show_all; } int cmd_monitor(int argc, char **argv) { unsigned int num; struct cpuidle_monitor *test_mon; int cpu; cmdline(argc, argv); cpu_count = get_cpu_topology(&cpu_top); if (cpu_count < 0) { printf(_("Cannot read number of available processors\n")); return EXIT_FAILURE; } if (!cpu_top.core_info[0].is_online) printf("WARNING: at least one cpu is offline\n"); /* Default is: monitor all CPUs */ if (bitmask_isallclear(cpus_chosen)) bitmask_setall(cpus_chosen); dprint("System has up to %d CPU cores\n", cpu_count); for (num = 0; all_monitors[num]; num++) { dprint("Try to register: %s\n", all_monitors[num]->name); test_mon = all_monitors[num]->do_register(); if (test_mon) { if (test_mon->flags.needs_root && !run_as_root) { fprintf(stderr, _("Available monitor %s needs " "root access\n"), test_mon->name); continue; } monitors[avail_monitors] = test_mon; dprint("%s registered\n", all_monitors[num]->name); avail_monitors++; } } if (avail_monitors == 0) { printf(_("No HW Cstate monitors found\n")); return 1; } if (mode == list) { list_monitors(); exit(EXIT_SUCCESS); } if (mode == show) parse_monitor_param(show_monitors_param); dprint("Packages: %d - Cores: %d - CPUs: %d\n", cpu_top.pkgs, cpu_top.cores, cpu_count); /* * if any params left, it must be a command to fork */ if (argc - optind) fork_it(argv + optind); else do_interval_measure(interval); /* ToDo: Topology parsing needs fixing first to do this more generically */ if (cpu_top.pkgs > 1) print_header(3); else print_header(1); for (cpu = 0; cpu < cpu_count; cpu++) { if (cpu_top.pkgs > 1) print_results(3, cpu); else print_results(1, cpu); } for (num = 0; num < avail_monitors; num++) { if (monitors[num]->unregister) monitors[num]->unregister(); } cpu_topology_release(cpu_top); return 0; }
linux-master
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2016 SUSE Software Solutions GmbH * Thomas Renninger <[email protected]> */ #if defined(__i386__) || defined(__x86_64__) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <time.h> #include <string.h> #include <pci/pci.h> #include "idle_monitor/cpupower-monitor.h" #include "helpers/helpers.h" #include "powercap.h" #define MAX_RAPL_ZONES 10 int rapl_zone_count; cstate_t rapl_zones[MAX_RAPL_ZONES]; struct powercap_zone *rapl_zones_pt[MAX_RAPL_ZONES] = { 0 }; unsigned long long rapl_zone_previous_count[MAX_RAPL_ZONES]; unsigned long long rapl_zone_current_count[MAX_RAPL_ZONES]; unsigned long long rapl_max_count; static int rapl_get_count_uj(unsigned int id, unsigned long long *count, unsigned int cpu) { if (rapl_zones_pt[id] == NULL) /* error */ return -1; *count = rapl_zone_current_count[id] - rapl_zone_previous_count[id]; return 0; } static int powercap_count_zones(struct powercap_zone *zone) { uint64_t val; int uj; if (rapl_zone_count >= MAX_RAPL_ZONES) return -1; if (!zone->has_energy_uj) return 0; printf("%s\n", zone->sys_name); uj = powercap_get_energy_uj(zone, &val); printf("%d\n", uj); strncpy(rapl_zones[rapl_zone_count].name, zone->name, CSTATE_NAME_LEN - 1); strcpy(rapl_zones[rapl_zone_count].desc, ""); rapl_zones[rapl_zone_count].id = rapl_zone_count; rapl_zones[rapl_zone_count].range = RANGE_MACHINE; rapl_zones[rapl_zone_count].get_count = rapl_get_count_uj; rapl_zones_pt[rapl_zone_count] = zone; rapl_zone_count++; return 0; } static int rapl_start(void) { int i, ret; uint64_t uj_val; for (i = 0; i < rapl_zone_count; i++) { ret = powercap_get_energy_uj(rapl_zones_pt[i], &uj_val); if (ret) return ret; rapl_zone_previous_count[i] = uj_val; } return 0; } static int rapl_stop(void) { int i; uint64_t uj_val; for (i = 0; i < rapl_zone_count; i++) { int ret; ret = powercap_get_energy_uj(rapl_zones_pt[i], &uj_val); if (ret) return ret; rapl_zone_current_count[i] = uj_val; if (rapl_max_count < uj_val) rapl_max_count = uj_val - rapl_zone_previous_count[i]; } return 0; } struct cpuidle_monitor *rapl_register(void) { struct powercap_zone *root_zone; char line[MAX_LINE_LEN] = ""; int ret, val; ret = powercap_get_driver(line, MAX_LINE_LEN); if (ret < 0) { dprint("No powercapping driver loaded\n"); return NULL; } dprint("Driver: %s\n", line); ret = powercap_get_enabled(&val); if (ret < 0) return NULL; if (!val) { dprint("Powercapping is disabled\n"); return NULL; } dprint("Powercap domain hierarchy:\n\n"); root_zone = powercap_init_zones(); if (root_zone == NULL) { dprint("No powercap info found\n"); return NULL; } powercap_walk_zones(root_zone, powercap_count_zones); rapl_monitor.hw_states_num = rapl_zone_count; return &rapl_monitor; } struct cpuidle_monitor rapl_monitor = { .name = "RAPL", .hw_states = rapl_zones, .hw_states_num = 0, .start = rapl_start, .stop = rapl_stop, .do_register = rapl_register, .flags.needs_root = 0, .overflow_s = 60 * 60 * 24 * 100, /* To be implemented */ }; #endif
linux-master
tools/power/cpupower/utils/idle_monitor/rapl_monitor.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2010,2011 Thomas Renninger <[email protected]>, Novell Inc. * * PCI initialization based on example code from: * Andreas Herrmann <[email protected]> */ #if defined(__i386__) || defined(__x86_64__) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <time.h> #include <string.h> #include <pci/pci.h> #include "idle_monitor/cpupower-monitor.h" #include "helpers/helpers.h" #define PCI_NON_PC0_OFFSET 0xb0 #define PCI_PC1_OFFSET 0xb4 #define PCI_PC6_OFFSET 0xb8 #define PCI_MONITOR_ENABLE_REG 0xe0 #define PCI_NON_PC0_ENABLE_BIT 0 #define PCI_PC1_ENABLE_BIT 1 #define PCI_PC6_ENABLE_BIT 2 #define PCI_NBP1_STAT_OFFSET 0x98 #define PCI_NBP1_ACTIVE_BIT 2 #define PCI_NBP1_ENTERED_BIT 1 #define PCI_NBP1_CAP_OFFSET 0x90 #define PCI_NBP1_CAPABLE_BIT 31 #define OVERFLOW_MS 343597 /* 32 bit register filled at 12500 HZ (1 tick per 80ns) */ enum amd_fam14h_states {NON_PC0 = 0, PC1, PC6, NBP1, AMD_FAM14H_STATE_NUM}; static int fam14h_get_count_percent(unsigned int self_id, double *percent, unsigned int cpu); static int fam14h_nbp1_count(unsigned int id, unsigned long long *count, unsigned int cpu); static cstate_t amd_fam14h_cstates[AMD_FAM14H_STATE_NUM] = { { .name = "!PC0", .desc = N_("Package in sleep state (PC1 or deeper)"), .id = NON_PC0, .range = RANGE_PACKAGE, .get_count_percent = fam14h_get_count_percent, }, { .name = "PC1", .desc = N_("Processor Package C1"), .id = PC1, .range = RANGE_PACKAGE, .get_count_percent = fam14h_get_count_percent, }, { .name = "PC6", .desc = N_("Processor Package C6"), .id = PC6, .range = RANGE_PACKAGE, .get_count_percent = fam14h_get_count_percent, }, { .name = "NBP1", .desc = N_("North Bridge P1 boolean counter (returns 0 or 1)"), .id = NBP1, .range = RANGE_PACKAGE, .get_count = fam14h_nbp1_count, }, }; static struct pci_access *pci_acc; static struct pci_dev *amd_fam14h_pci_dev; static int nbp1_entered; static struct timespec start_time; static unsigned long long timediff; #ifdef DEBUG struct timespec dbg_time; long dbg_timediff; #endif static unsigned long long *previous_count[AMD_FAM14H_STATE_NUM]; static unsigned long long *current_count[AMD_FAM14H_STATE_NUM]; static int amd_fam14h_get_pci_info(struct cstate *state, unsigned int *pci_offset, unsigned int *enable_bit, unsigned int cpu) { switch (state->id) { case NON_PC0: *enable_bit = PCI_NON_PC0_ENABLE_BIT; *pci_offset = PCI_NON_PC0_OFFSET; break; case PC1: *enable_bit = PCI_PC1_ENABLE_BIT; *pci_offset = PCI_PC1_OFFSET; break; case PC6: *enable_bit = PCI_PC6_ENABLE_BIT; *pci_offset = PCI_PC6_OFFSET; break; case NBP1: *enable_bit = PCI_NBP1_ENTERED_BIT; *pci_offset = PCI_NBP1_STAT_OFFSET; break; default: return -1; } return 0; } static int amd_fam14h_init(cstate_t *state, unsigned int cpu) { int enable_bit, pci_offset, ret; uint32_t val; ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); if (ret) return ret; /* NBP1 needs extra treating -> write 1 to D18F6x98 bit 1 for init */ if (state->id == NBP1) { val = pci_read_long(amd_fam14h_pci_dev, pci_offset); val |= 1 << enable_bit; val = pci_write_long(amd_fam14h_pci_dev, pci_offset, val); return ret; } /* Enable monitor */ val = pci_read_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG); dprint("Init %s: read at offset: 0x%x val: %u\n", state->name, PCI_MONITOR_ENABLE_REG, (unsigned int) val); val |= 1 << enable_bit; pci_write_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG, val); dprint("Init %s: offset: 0x%x enable_bit: %d - val: %u (%u)\n", state->name, PCI_MONITOR_ENABLE_REG, enable_bit, (unsigned int) val, cpu); /* Set counter to zero */ pci_write_long(amd_fam14h_pci_dev, pci_offset, 0); previous_count[state->id][cpu] = 0; return 0; } static int amd_fam14h_disable(cstate_t *state, unsigned int cpu) { int enable_bit, pci_offset, ret; uint32_t val; ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); if (ret) return ret; val = pci_read_long(amd_fam14h_pci_dev, pci_offset); dprint("%s: offset: 0x%x %u\n", state->name, pci_offset, val); if (state->id == NBP1) { /* was the bit whether NBP1 got entered set? */ nbp1_entered = (val & (1 << PCI_NBP1_ACTIVE_BIT)) | (val & (1 << PCI_NBP1_ENTERED_BIT)); dprint("NBP1 was %sentered - 0x%x - enable_bit: " "%d - pci_offset: 0x%x\n", nbp1_entered ? "" : "not ", val, enable_bit, pci_offset); return ret; } current_count[state->id][cpu] = val; dprint("%s: Current - %llu (%u)\n", state->name, current_count[state->id][cpu], cpu); dprint("%s: Previous - %llu (%u)\n", state->name, previous_count[state->id][cpu], cpu); val = pci_read_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG); val &= ~(1 << enable_bit); pci_write_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG, val); return 0; } static int fam14h_nbp1_count(unsigned int id, unsigned long long *count, unsigned int cpu) { if (id == NBP1) { if (nbp1_entered) *count = 1; else *count = 0; return 0; } return -1; } static int fam14h_get_count_percent(unsigned int id, double *percent, unsigned int cpu) { unsigned long diff; if (id >= AMD_FAM14H_STATE_NUM) return -1; /* residency count in 80ns -> divide through 12.5 to get us residency */ diff = current_count[id][cpu] - previous_count[id][cpu]; if (timediff == 0) *percent = 0.0; else *percent = 100.0 * diff / timediff / 12.5; dprint("Timediff: %llu - res~: %lu us - percent: %.2f %%\n", timediff, diff * 10 / 125, *percent); return 0; } static int amd_fam14h_start(void) { int num, cpu; clock_gettime(CLOCK_REALTIME, &start_time); for (num = 0; num < AMD_FAM14H_STATE_NUM; num++) { for (cpu = 0; cpu < cpu_count; cpu++) amd_fam14h_init(&amd_fam14h_cstates[num], cpu); } #ifdef DEBUG clock_gettime(CLOCK_REALTIME, &dbg_time); dbg_timediff = timespec_diff_us(start_time, dbg_time); dprint("Enabling counters took: %lu us\n", dbg_timediff); #endif return 0; } static int amd_fam14h_stop(void) { int num, cpu; struct timespec end_time; clock_gettime(CLOCK_REALTIME, &end_time); for (num = 0; num < AMD_FAM14H_STATE_NUM; num++) { for (cpu = 0; cpu < cpu_count; cpu++) amd_fam14h_disable(&amd_fam14h_cstates[num], cpu); } #ifdef DEBUG clock_gettime(CLOCK_REALTIME, &dbg_time); dbg_timediff = timespec_diff_us(end_time, dbg_time); dprint("Disabling counters took: %lu ns\n", dbg_timediff); #endif timediff = timespec_diff_us(start_time, end_time); if (timediff / 1000 > OVERFLOW_MS) print_overflow_err((unsigned int)timediff / 1000000, OVERFLOW_MS / 1000); return 0; } static int is_nbp1_capable(void) { uint32_t val; val = pci_read_long(amd_fam14h_pci_dev, PCI_NBP1_CAP_OFFSET); return val & (1 << 31); } struct cpuidle_monitor *amd_fam14h_register(void) { int num; if (cpupower_cpu_info.vendor != X86_VENDOR_AMD) return NULL; if (cpupower_cpu_info.family == 0x14) strncpy(amd_fam14h_monitor.name, "Fam_14h", MONITOR_NAME_LEN - 1); else if (cpupower_cpu_info.family == 0x12) strncpy(amd_fam14h_monitor.name, "Fam_12h", MONITOR_NAME_LEN - 1); else return NULL; /* We do not alloc for nbp1 machine wide counter */ for (num = 0; num < AMD_FAM14H_STATE_NUM - 1; num++) { previous_count[num] = calloc(cpu_count, sizeof(unsigned long long)); current_count[num] = calloc(cpu_count, sizeof(unsigned long long)); } /* We need PCI device: Slot 18, Func 6, compare with BKDG for fam 12h/14h */ amd_fam14h_pci_dev = pci_slot_func_init(&pci_acc, 0x18, 6); if (amd_fam14h_pci_dev == NULL || pci_acc == NULL) return NULL; if (!is_nbp1_capable()) amd_fam14h_monitor.hw_states_num = AMD_FAM14H_STATE_NUM - 1; amd_fam14h_monitor.name_len = strlen(amd_fam14h_monitor.name); return &amd_fam14h_monitor; } static void amd_fam14h_unregister(void) { int num; for (num = 0; num < AMD_FAM14H_STATE_NUM - 1; num++) { free(previous_count[num]); free(current_count[num]); } pci_cleanup(pci_acc); } struct cpuidle_monitor amd_fam14h_monitor = { .name = "", .hw_states = amd_fam14h_cstates, .hw_states_num = AMD_FAM14H_STATE_NUM, .start = amd_fam14h_start, .stop = amd_fam14h_stop, .do_register = amd_fam14h_register, .unregister = amd_fam14h_unregister, .flags.needs_root = 1, .overflow_s = OVERFLOW_MS / 1000, }; #endif /* #if defined(__i386__) || defined(__x86_64__) */
linux-master
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2010,2011 Thomas Renninger <[email protected]>, Novell Inc. * * Based on SandyBridge monitor. Implements the new package C-states * (PC8, PC9, PC10) coming with a specific Haswell (family 0x45) CPU. */ #if defined(__i386__) || defined(__x86_64__) #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include "helpers/helpers.h" #include "idle_monitor/cpupower-monitor.h" #define MSR_PKG_C8_RESIDENCY 0x00000630 #define MSR_PKG_C9_RESIDENCY 0x00000631 #define MSR_PKG_C10_RESIDENCY 0x00000632 #define MSR_TSC 0x10 enum intel_hsw_ext_id { PC8 = 0, PC9, PC10, HSW_EXT_CSTATE_COUNT, TSC = 0xFFFF }; static int hsw_ext_get_count_percent(unsigned int self_id, double *percent, unsigned int cpu); static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = { { .name = "PC8", .desc = N_("Processor Package C8"), .id = PC8, .range = RANGE_PACKAGE, .get_count_percent = hsw_ext_get_count_percent, }, { .name = "PC9", .desc = N_("Processor Package C9"), .id = PC9, .range = RANGE_PACKAGE, .get_count_percent = hsw_ext_get_count_percent, }, { .name = "PC10", .desc = N_("Processor Package C10"), .id = PC10, .range = RANGE_PACKAGE, .get_count_percent = hsw_ext_get_count_percent, }, }; static unsigned long long tsc_at_measure_start; static unsigned long long tsc_at_measure_end; static unsigned long long *previous_count[HSW_EXT_CSTATE_COUNT]; static unsigned long long *current_count[HSW_EXT_CSTATE_COUNT]; /* valid flag for all CPUs. If a MSR read failed it will be zero */ static int *is_valid; static int hsw_ext_get_count(enum intel_hsw_ext_id id, unsigned long long *val, unsigned int cpu) { int msr; switch (id) { case PC8: msr = MSR_PKG_C8_RESIDENCY; break; case PC9: msr = MSR_PKG_C9_RESIDENCY; break; case PC10: msr = MSR_PKG_C10_RESIDENCY; break; case TSC: msr = MSR_TSC; break; default: return -1; } if (read_msr(cpu, msr, val)) return -1; return 0; } static int hsw_ext_get_count_percent(unsigned int id, double *percent, unsigned int cpu) { *percent = 0.0; if (!is_valid[cpu]) return -1; *percent = (100.0 * (current_count[id][cpu] - previous_count[id][cpu])) / (tsc_at_measure_end - tsc_at_measure_start); dprint("%s: previous: %llu - current: %llu - (%u)\n", hsw_ext_cstates[id].name, previous_count[id][cpu], current_count[id][cpu], cpu); dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n", hsw_ext_cstates[id].name, (unsigned long long) tsc_at_measure_end - tsc_at_measure_start, current_count[id][cpu] - previous_count[id][cpu], *percent, cpu); return 0; } static int hsw_ext_start(void) { int num, cpu; unsigned long long val; for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) { for (cpu = 0; cpu < cpu_count; cpu++) { hsw_ext_get_count(num, &val, cpu); previous_count[num][cpu] = val; } } hsw_ext_get_count(TSC, &tsc_at_measure_start, base_cpu); return 0; } static int hsw_ext_stop(void) { unsigned long long val; int num, cpu; hsw_ext_get_count(TSC, &tsc_at_measure_end, base_cpu); for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) { for (cpu = 0; cpu < cpu_count; cpu++) { is_valid[cpu] = !hsw_ext_get_count(num, &val, cpu); current_count[num][cpu] = val; } } return 0; } struct cpuidle_monitor intel_hsw_ext_monitor; static struct cpuidle_monitor *hsw_ext_register(void) { int num; if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL || cpupower_cpu_info.family != 6) return NULL; switch (cpupower_cpu_info.model) { case 0x45: /* HSW */ break; default: return NULL; } is_valid = calloc(cpu_count, sizeof(int)); for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) { previous_count[num] = calloc(cpu_count, sizeof(unsigned long long)); current_count[num] = calloc(cpu_count, sizeof(unsigned long long)); } intel_hsw_ext_monitor.name_len = strlen(intel_hsw_ext_monitor.name); return &intel_hsw_ext_monitor; } void hsw_ext_unregister(void) { int num; free(is_valid); for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) { free(previous_count[num]); free(current_count[num]); } } struct cpuidle_monitor intel_hsw_ext_monitor = { .name = "HaswellExtended", .hw_states = hsw_ext_cstates, .hw_states_num = HSW_EXT_CSTATE_COUNT, .start = hsw_ext_start, .stop = hsw_ext_stop, .do_register = hsw_ext_register, .unregister = hsw_ext_unregister, .flags.needs_root = 1, .overflow_s = 922000000 /* 922337203 seconds TSC overflow at 20GHz */ }; #endif /* defined(__i386__) || defined(__x86_64__) */
linux-master
tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2010,2011 Thomas Renninger <[email protected]>, Novell Inc. * * Based on Len Brown's <[email protected]> turbostat tool. */ #if defined(__i386__) || defined(__x86_64__) #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include "helpers/helpers.h" #include "idle_monitor/cpupower-monitor.h" #define MSR_PKG_C2_RESIDENCY 0x60D #define MSR_PKG_C7_RESIDENCY 0x3FA #define MSR_CORE_C7_RESIDENCY 0x3FE #define MSR_TSC 0x10 enum intel_snb_id { C7 = 0, PC2, PC7, SNB_CSTATE_COUNT, TSC = 0xFFFF }; static int snb_get_count_percent(unsigned int self_id, double *percent, unsigned int cpu); static cstate_t snb_cstates[SNB_CSTATE_COUNT] = { { .name = "C7", .desc = N_("Processor Core C7"), .id = C7, .range = RANGE_CORE, .get_count_percent = snb_get_count_percent, }, { .name = "PC2", .desc = N_("Processor Package C2"), .id = PC2, .range = RANGE_PACKAGE, .get_count_percent = snb_get_count_percent, }, { .name = "PC7", .desc = N_("Processor Package C7"), .id = PC7, .range = RANGE_PACKAGE, .get_count_percent = snb_get_count_percent, }, }; static unsigned long long tsc_at_measure_start; static unsigned long long tsc_at_measure_end; static unsigned long long *previous_count[SNB_CSTATE_COUNT]; static unsigned long long *current_count[SNB_CSTATE_COUNT]; /* valid flag for all CPUs. If a MSR read failed it will be zero */ static int *is_valid; static int snb_get_count(enum intel_snb_id id, unsigned long long *val, unsigned int cpu) { int msr; switch (id) { case C7: msr = MSR_CORE_C7_RESIDENCY; break; case PC2: msr = MSR_PKG_C2_RESIDENCY; break; case PC7: msr = MSR_PKG_C7_RESIDENCY; break; case TSC: msr = MSR_TSC; break; default: return -1; } if (read_msr(cpu, msr, val)) return -1; return 0; } static int snb_get_count_percent(unsigned int id, double *percent, unsigned int cpu) { *percent = 0.0; if (!is_valid[cpu]) return -1; *percent = (100.0 * (current_count[id][cpu] - previous_count[id][cpu])) / (tsc_at_measure_end - tsc_at_measure_start); dprint("%s: previous: %llu - current: %llu - (%u)\n", snb_cstates[id].name, previous_count[id][cpu], current_count[id][cpu], cpu); dprint("%s: tsc_diff: %llu - count_diff: %llu - percent: %2.f (%u)\n", snb_cstates[id].name, (unsigned long long) tsc_at_measure_end - tsc_at_measure_start, current_count[id][cpu] - previous_count[id][cpu], *percent, cpu); return 0; } static int snb_start(void) { int num, cpu; unsigned long long val; for (num = 0; num < SNB_CSTATE_COUNT; num++) { for (cpu = 0; cpu < cpu_count; cpu++) { snb_get_count(num, &val, cpu); previous_count[num][cpu] = val; } } snb_get_count(TSC, &tsc_at_measure_start, base_cpu); return 0; } static int snb_stop(void) { unsigned long long val; int num, cpu; snb_get_count(TSC, &tsc_at_measure_end, base_cpu); for (num = 0; num < SNB_CSTATE_COUNT; num++) { for (cpu = 0; cpu < cpu_count; cpu++) { is_valid[cpu] = !snb_get_count(num, &val, cpu); current_count[num][cpu] = val; } } return 0; } struct cpuidle_monitor intel_snb_monitor; static struct cpuidle_monitor *snb_register(void) { int num; if (cpupower_cpu_info.vendor != X86_VENDOR_INTEL || cpupower_cpu_info.family != 6) return NULL; switch (cpupower_cpu_info.model) { case 0x2A: /* SNB */ case 0x2D: /* SNB Xeon */ case 0x3A: /* IVB */ case 0x3E: /* IVB Xeon */ case 0x3C: /* HSW */ case 0x3F: /* HSW */ case 0x45: /* HSW */ case 0x46: /* HSW */ break; default: return NULL; } is_valid = calloc(cpu_count, sizeof(int)); for (num = 0; num < SNB_CSTATE_COUNT; num++) { previous_count[num] = calloc(cpu_count, sizeof(unsigned long long)); current_count[num] = calloc(cpu_count, sizeof(unsigned long long)); } intel_snb_monitor.name_len = strlen(intel_snb_monitor.name); return &intel_snb_monitor; } void snb_unregister(void) { int num; free(is_valid); for (num = 0; num < SNB_CSTATE_COUNT; num++) { free(previous_count[num]); free(current_count[num]); } } struct cpuidle_monitor intel_snb_monitor = { .name = "SandyBridge", .hw_states = snb_cstates, .hw_states_num = SNB_CSTATE_COUNT, .start = snb_start, .stop = snb_stop, .do_register = snb_register, .unregister = snb_unregister, .flags.needs_root = 1, .overflow_s = 922000000 /* 922337203 seconds TSC overflow at 20GHz */ }; #endif /* defined(__i386__) || defined(__x86_64__) */
linux-master
tools/power/cpupower/utils/idle_monitor/snb_idle.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2004-2009 Dominik Brodowski <[email protected]> * (C) 2011 Thomas Renninger <[email protected]> Novell Inc. */ #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include "helpers/sysfs.h" unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) { int fd; ssize_t numread; fd = open(path, O_RDONLY); if (fd == -1) return 0; numread = read(fd, buf, buflen - 1); if (numread < 1) { close(fd); return 0; } buf[numread] = '\0'; close(fd); return (unsigned int) numread; } /* * Detect whether a CPU is online * * Returns: * 1 -> if CPU is online * 0 -> if CPU is offline * negative errno values in error case */ int sysfs_is_cpu_online(unsigned int cpu) { char path[SYSFS_PATH_MAX]; int fd; ssize_t numread; unsigned long long value; char linebuf[MAX_LINE_LEN]; char *endp; struct stat statbuf; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); if (stat(path, &statbuf) != 0) return 0; /* * kernel without CONFIG_HOTPLUG_CPU * -> cpuX directory exists, but not cpuX/online file */ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); if (stat(path, &statbuf) != 0) return 1; fd = open(path, O_RDONLY); if (fd == -1) return -errno; numread = read(fd, linebuf, MAX_LINE_LEN - 1); if (numread < 1) { close(fd); return -EIO; } linebuf[numread] = '\0'; close(fd); value = strtoull(linebuf, &endp, 0); if (value > 1) return -EINVAL; return value; } /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ /* * helper function to check whether a file under "../cpuX/cpuidle/stateX/" dir * exists. * For example the functionality to disable c-states was introduced in later * kernel versions, this function can be used to explicitly check for this * feature. * * returns 1 if the file exists, 0 otherwise. */ unsigned int sysfs_idlestate_file_exists(unsigned int cpu, unsigned int idlestate, const char *fname) { char path[SYSFS_PATH_MAX]; struct stat statbuf; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", cpu, idlestate, fname); if (stat(path, &statbuf) != 0) return 0; return 1; } /* * helper function to read file from /sys into given buffer * fname is a relative path under "cpuX/cpuidle/stateX/" dir * cstates starting with 0, C0 is not counted as cstate. * This means if you want C1 info, pass 0 as idlestate param */ unsigned int sysfs_idlestate_read_file(unsigned int cpu, unsigned int idlestate, const char *fname, char *buf, size_t buflen) { char path[SYSFS_PATH_MAX]; int fd; ssize_t numread; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", cpu, idlestate, fname); fd = open(path, O_RDONLY); if (fd == -1) return 0; numread = read(fd, buf, buflen - 1); if (numread < 1) { close(fd); return 0; } buf[numread] = '\0'; close(fd); return (unsigned int) numread; } /* * helper function to write a new value to a /sys file * fname is a relative path under "../cpuX/cpuidle/cstateY/" dir * * Returns the number of bytes written or 0 on error */ static unsigned int sysfs_idlestate_write_file(unsigned int cpu, unsigned int idlestate, const char *fname, const char *value, size_t len) { char path[SYSFS_PATH_MAX]; int fd; ssize_t numwrite; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", cpu, idlestate, fname); fd = open(path, O_WRONLY); if (fd == -1) return 0; numwrite = write(fd, value, len); if (numwrite < 1) { close(fd); return 0; } close(fd); return (unsigned int) numwrite; } /* read access to files which contain one numeric value */ enum idlestate_value { IDLESTATE_USAGE, IDLESTATE_POWER, IDLESTATE_LATENCY, IDLESTATE_TIME, IDLESTATE_DISABLE, MAX_IDLESTATE_VALUE_FILES }; static const char *idlestate_value_files[MAX_IDLESTATE_VALUE_FILES] = { [IDLESTATE_USAGE] = "usage", [IDLESTATE_POWER] = "power", [IDLESTATE_LATENCY] = "latency", [IDLESTATE_TIME] = "time", [IDLESTATE_DISABLE] = "disable", }; static unsigned long long sysfs_idlestate_get_one_value(unsigned int cpu, unsigned int idlestate, enum idlestate_value which) { unsigned long long value; unsigned int len; char linebuf[MAX_LINE_LEN]; char *endp; if (which >= MAX_IDLESTATE_VALUE_FILES) return 0; len = sysfs_idlestate_read_file(cpu, idlestate, idlestate_value_files[which], linebuf, sizeof(linebuf)); if (len == 0) return 0; value = strtoull(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return 0; return value; } /* read access to files which contain one string */ enum idlestate_string { IDLESTATE_DESC, IDLESTATE_NAME, MAX_IDLESTATE_STRING_FILES }; static const char *idlestate_string_files[MAX_IDLESTATE_STRING_FILES] = { [IDLESTATE_DESC] = "desc", [IDLESTATE_NAME] = "name", }; static char *sysfs_idlestate_get_one_string(unsigned int cpu, unsigned int idlestate, enum idlestate_string which) { char linebuf[MAX_LINE_LEN]; char *result; unsigned int len; if (which >= MAX_IDLESTATE_STRING_FILES) return NULL; len = sysfs_idlestate_read_file(cpu, idlestate, idlestate_string_files[which], linebuf, sizeof(linebuf)); if (len == 0) return NULL; result = strdup(linebuf); if (result == NULL) return NULL; if (result[strlen(result) - 1] == '\n') result[strlen(result) - 1] = '\0'; return result; } /* * Returns: * 1 if disabled * 0 if enabled * -1 if idlestate is not available * -2 if disabling is not supported by the kernel */ int sysfs_is_idlestate_disabled(unsigned int cpu, unsigned int idlestate) { if (sysfs_get_idlestate_count(cpu) <= idlestate) return -1; if (!sysfs_idlestate_file_exists(cpu, idlestate, idlestate_value_files[IDLESTATE_DISABLE])) return -2; return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_DISABLE); } /* * Pass 1 as last argument to disable or 0 to enable the state * Returns: * 0 on success * negative values on error, for example: * -1 if idlestate is not available * -2 if disabling is not supported by the kernel * -3 No write access to disable/enable C-states */ int sysfs_idlestate_disable(unsigned int cpu, unsigned int idlestate, unsigned int disable) { char value[SYSFS_PATH_MAX]; int bytes_written; if (sysfs_get_idlestate_count(cpu) <= idlestate) return -1; if (!sysfs_idlestate_file_exists(cpu, idlestate, idlestate_value_files[IDLESTATE_DISABLE])) return -2; snprintf(value, SYSFS_PATH_MAX, "%u", disable); bytes_written = sysfs_idlestate_write_file(cpu, idlestate, "disable", value, sizeof(disable)); if (bytes_written) return 0; return -3; } unsigned long sysfs_get_idlestate_latency(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_LATENCY); } unsigned long sysfs_get_idlestate_usage(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_USAGE); } unsigned long long sysfs_get_idlestate_time(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_value(cpu, idlestate, IDLESTATE_TIME); } char *sysfs_get_idlestate_name(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_NAME); } char *sysfs_get_idlestate_desc(unsigned int cpu, unsigned int idlestate) { return sysfs_idlestate_get_one_string(cpu, idlestate, IDLESTATE_DESC); } /* * Returns number of supported C-states of CPU core cpu * Negativ in error case * Zero if cpuidle does not export any C-states */ unsigned int sysfs_get_idlestate_count(unsigned int cpu) { char file[SYSFS_PATH_MAX]; struct stat statbuf; int idlestates = 1; snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle"); if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) return 0; snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu); if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) return 0; while (stat(file, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) { snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state%d", cpu, idlestates); idlestates++; } idlestates--; return idlestates; } /* CPUidle general /sys/devices/system/cpu/cpuidle/ sysfs access ********/ /* * helper function to read file from /sys into given buffer * fname is a relative path under "cpu/cpuidle/" dir */ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf, size_t buflen) { char path[SYSFS_PATH_MAX]; snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); return sysfs_read_file(path, buf, buflen); } /* read access to files which contain one string */ enum cpuidle_string { CPUIDLE_GOVERNOR, CPUIDLE_GOVERNOR_RO, CPUIDLE_DRIVER, MAX_CPUIDLE_STRING_FILES }; static const char *cpuidle_string_files[MAX_CPUIDLE_STRING_FILES] = { [CPUIDLE_GOVERNOR] = "current_governor", [CPUIDLE_GOVERNOR_RO] = "current_governor_ro", [CPUIDLE_DRIVER] = "current_driver", }; static char *sysfs_cpuidle_get_one_string(enum cpuidle_string which) { char linebuf[MAX_LINE_LEN]; char *result; unsigned int len; if (which >= MAX_CPUIDLE_STRING_FILES) return NULL; len = sysfs_cpuidle_read_file(cpuidle_string_files[which], linebuf, sizeof(linebuf)); if (len == 0) return NULL; result = strdup(linebuf); if (result == NULL) return NULL; if (result[strlen(result) - 1] == '\n') result[strlen(result) - 1] = '\0'; return result; } char *sysfs_get_cpuidle_governor(void) { char *tmp = sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR_RO); if (!tmp) return sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR); else return tmp; } char *sysfs_get_cpuidle_driver(void) { return sysfs_cpuidle_get_one_string(CPUIDLE_DRIVER); } /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ /* * Get sched_mc or sched_smt settings * Pass "mc" or "smt" as argument * * Returns negative value on failure */ int sysfs_get_sched(const char *smt_mc) { return -ENODEV; } /* * Get sched_mc or sched_smt settings * Pass "mc" or "smt" as argument * * Returns negative value on failure */ int sysfs_set_sched(const char *smt_mc, int val) { return -ENODEV; }
linux-master
tools/power/cpupower/utils/helpers/sysfs.c
// SPDX-License-Identifier: GPL-2.0 #if defined(__i386__) || defined(__x86_64__) #include <helpers/helpers.h> /* * pci_acc_init * * PCI access helper function depending on libpci * * **pacc : if a valid pci_dev is returned * *pacc must be passed to pci_acc_cleanup to free it * * domain: domain * bus: bus * slot: slot * func: func * vendor: vendor * device: device * Pass -1 for one of the six above to match any * * Returns : * struct pci_dev which can be used with pci_{read,write}_* functions * to access the PCI config space of matching pci devices */ struct pci_dev *pci_acc_init(struct pci_access **pacc, int domain, int bus, int slot, int func, int vendor, int dev) { struct pci_filter filter_nb_link; struct pci_dev *device; *pacc = pci_alloc(); if (*pacc == NULL) return NULL; pci_filter_init(*pacc, &filter_nb_link); filter_nb_link.domain = domain; filter_nb_link.bus = bus; filter_nb_link.slot = slot; filter_nb_link.func = func; filter_nb_link.vendor = vendor; filter_nb_link.device = dev; pci_init(*pacc); pci_scan_bus(*pacc); for (device = (*pacc)->devices; device; device = device->next) { if (pci_filter_match(&filter_nb_link, device)) return device; } pci_cleanup(*pacc); return NULL; } /* Typically one wants to get a specific slot(device)/func of the root domain and bus */ struct pci_dev *pci_slot_func_init(struct pci_access **pacc, int slot, int func) { return pci_acc_init(pacc, 0, 0, slot, func, -1, -1); } #endif /* defined(__i386__) || defined(__x86_64__) */
linux-master
tools/power/cpupower/utils/helpers/pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2010,2011 Thomas Renninger <[email protected]>, Novell Inc. * * ToDo: Needs to be done more properly for AMD/Intel specifics */ /* Helper struct for qsort, must be in sync with cpupower_topology.cpu_info */ /* Be careful: Need to pass unsigned to the sort, so that offlined cores are in the end, but double check for -1 for offlined cpus at other places */ #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <cpuidle.h> /* CPU topology/hierarchy parsing ******************/
linux-master
tools/power/cpupower/utils/helpers/topology.c
// SPDX-License-Identifier: GPL-2.0 #if defined(__i386__) || defined(__x86_64__) #include <unistd.h> #include <errno.h> #include <stdio.h> #include <stdint.h> #include <pci/pci.h> #include "helpers/helpers.h" #include "cpufreq.h" #include "acpi_cppc.h" /* ACPI P-States Helper Functions for AMD Processors ***************/ #define MSR_AMD_PSTATE_STATUS 0xc0010063 #define MSR_AMD_PSTATE 0xc0010064 #define MSR_AMD_PSTATE_LIMIT 0xc0010061 union core_pstate { /* pre fam 17h: */ struct { unsigned fid:6; unsigned did:3; unsigned vid:7; unsigned res1:6; unsigned nbdid:1; unsigned res2:2; unsigned nbvid:7; unsigned iddval:8; unsigned idddiv:2; unsigned res3:21; unsigned en:1; } pstate; /* since fam 17h: */ struct { unsigned fid:8; unsigned did:6; unsigned vid:8; unsigned iddval:8; unsigned idddiv:2; unsigned res1:31; unsigned en:1; } pstatedef; unsigned long long val; }; static int get_did(union core_pstate pstate) { int t; if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) t = pstate.pstatedef.did; else if (cpupower_cpu_info.family == 0x12) t = pstate.val & 0xf; else t = pstate.pstate.did; return t; } static int get_cof(union core_pstate pstate) { int t; int fid, did, cof; did = get_did(pstate); if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) { fid = pstate.pstatedef.fid; cof = 200 * fid / did; } else { t = 0x10; fid = pstate.pstate.fid; if (cpupower_cpu_info.family == 0x11) t = 0x8; cof = (100 * (fid + t)) >> did; } return cof; } /* Needs: * cpu -> the cpu that gets evaluated * boost_states -> how much boost states the machines support * * Fills up: * pstates -> a pointer to an array of size MAX_HW_PSTATES * must be initialized with zeros. * All available HW pstates (including boost states) * no -> amount of pstates above array got filled up with * * returns zero on success, -1 on failure */ int decode_pstates(unsigned int cpu, int boost_states, unsigned long *pstates, int *no) { int i, psmax; union core_pstate pstate; unsigned long long val; /* Only read out frequencies from HW if HW Pstate is supported, * otherwise frequencies are exported via ACPI tables. */ if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_HW_PSTATE)) return -1; if (read_msr(cpu, MSR_AMD_PSTATE_LIMIT, &val)) return -1; psmax = (val >> 4) & 0x7; psmax += boost_states; for (i = 0; i <= psmax; i++) { if (i >= MAX_HW_PSTATES) { fprintf(stderr, "HW pstates [%d] exceeding max [%d]\n", psmax, MAX_HW_PSTATES); return -1; } if (read_msr(cpu, MSR_AMD_PSTATE + i, &pstate.val)) return -1; /* The enabled bit (bit 63) is common for all families */ if (!pstate.pstatedef.en) continue; pstates[i] = get_cof(pstate); } *no = i; return 0; } int amd_pci_get_num_boost_states(int *active, int *states) { struct pci_access *pci_acc; struct pci_dev *device; uint8_t val = 0; *active = *states = 0; device = pci_slot_func_init(&pci_acc, 0x18, 4); if (device == NULL) return -ENODEV; val = pci_read_byte(device, 0x15c); if (val & 3) *active = 1; else *active = 0; *states = (val >> 2) & 7; pci_cleanup(pci_acc); return 0; } /* ACPI P-States Helper Functions for AMD Processors ***************/ /* AMD P-State Helper Functions ************************************/ enum amd_pstate_value { AMD_PSTATE_HIGHEST_PERF, AMD_PSTATE_MAX_FREQ, AMD_PSTATE_LOWEST_NONLINEAR_FREQ, MAX_AMD_PSTATE_VALUE_READ_FILES, }; static const char *amd_pstate_value_files[MAX_AMD_PSTATE_VALUE_READ_FILES] = { [AMD_PSTATE_HIGHEST_PERF] = "amd_pstate_highest_perf", [AMD_PSTATE_MAX_FREQ] = "amd_pstate_max_freq", [AMD_PSTATE_LOWEST_NONLINEAR_FREQ] = "amd_pstate_lowest_nonlinear_freq", }; static unsigned long amd_pstate_get_data(unsigned int cpu, enum amd_pstate_value value) { return cpufreq_get_sysfs_value_from_table(cpu, amd_pstate_value_files, value, MAX_AMD_PSTATE_VALUE_READ_FILES); } void amd_pstate_boost_init(unsigned int cpu, int *support, int *active) { unsigned long highest_perf, nominal_perf, cpuinfo_min, cpuinfo_max, amd_pstate_max; highest_perf = amd_pstate_get_data(cpu, AMD_PSTATE_HIGHEST_PERF); nominal_perf = acpi_cppc_get_data(cpu, NOMINAL_PERF); *support = highest_perf > nominal_perf ? 1 : 0; if (!(*support)) return; cpufreq_get_hardware_limits(cpu, &cpuinfo_min, &cpuinfo_max); amd_pstate_max = amd_pstate_get_data(cpu, AMD_PSTATE_MAX_FREQ); *active = cpuinfo_max == amd_pstate_max ? 1 : 0; } void amd_pstate_show_perf_and_freq(unsigned int cpu, int no_rounding) { printf(_(" AMD PSTATE Highest Performance: %lu. Maximum Frequency: "), amd_pstate_get_data(cpu, AMD_PSTATE_HIGHEST_PERF)); /* * If boost isn't active, the cpuinfo_max doesn't indicate real max * frequency. So we read it back from amd-pstate sysfs entry. */ print_speed(amd_pstate_get_data(cpu, AMD_PSTATE_MAX_FREQ), no_rounding); printf(".\n"); printf(_(" AMD PSTATE Nominal Performance: %lu. Nominal Frequency: "), acpi_cppc_get_data(cpu, NOMINAL_PERF)); print_speed(acpi_cppc_get_data(cpu, NOMINAL_FREQ) * 1000, no_rounding); printf(".\n"); printf(_(" AMD PSTATE Lowest Non-linear Performance: %lu. Lowest Non-linear Frequency: "), acpi_cppc_get_data(cpu, LOWEST_NONLINEAR_PERF)); print_speed(amd_pstate_get_data(cpu, AMD_PSTATE_LOWEST_NONLINEAR_FREQ), no_rounding); printf(".\n"); printf(_(" AMD PSTATE Lowest Performance: %lu. Lowest Frequency: "), acpi_cppc_get_data(cpu, LOWEST_PERF)); print_speed(acpi_cppc_get_data(cpu, LOWEST_FREQ) * 1000, no_rounding); printf(".\n"); } /* AMD P-State Helper Functions ************************************/ #endif /* defined(__i386__) || defined(__x86_64__) */
linux-master
tools/power/cpupower/utils/helpers/amd.c
// SPDX-License-Identifier: GPL-2.0 #if defined(__i386__) || defined(__x86_64__) #include <fcntl.h> #include <stdio.h> #include <unistd.h> #include <stdint.h> #include "helpers/helpers.h" /* Intel specific MSRs */ #define MSR_IA32_PERF_STATUS 0x198 #define MSR_IA32_MISC_ENABLES 0x1a0 #define MSR_NEHALEM_TURBO_RATIO_LIMIT 0x1ad /* * read_msr * * Will return 0 on success and -1 on failure. * Possible errno values could be: * EFAULT -If the read/write did not fully complete * EIO -If the CPU does not support MSRs * ENXIO -If the CPU does not exist */ int read_msr(int cpu, unsigned int idx, unsigned long long *val) { int fd; char msr_file_name[64]; sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu); fd = open(msr_file_name, O_RDONLY); if (fd < 0) return -1; if (lseek(fd, idx, SEEK_CUR) == -1) goto err; if (read(fd, val, sizeof *val) != sizeof *val) goto err; close(fd); return 0; err: close(fd); return -1; } /* * write_msr * * Will return 0 on success and -1 on failure. * Possible errno values could be: * EFAULT -If the read/write did not fully complete * EIO -If the CPU does not support MSRs * ENXIO -If the CPU does not exist */ int write_msr(int cpu, unsigned int idx, unsigned long long val) { int fd; char msr_file_name[64]; sprintf(msr_file_name, "/dev/cpu/%d/msr", cpu); fd = open(msr_file_name, O_WRONLY); if (fd < 0) return -1; if (lseek(fd, idx, SEEK_CUR) == -1) goto err; if (write(fd, &val, sizeof val) != sizeof val) goto err; close(fd); return 0; err: close(fd); return -1; } unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu) { unsigned long long val; int ret; if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_HAS_TURBO_RATIO)) return -1; ret = read_msr(cpu, MSR_NEHALEM_TURBO_RATIO_LIMIT, &val); if (ret) return ret; return val; } #endif
linux-master
tools/power/cpupower/utils/helpers/msr.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <errno.h> #include <string.h> #include <unistd.h> #include <stdlib.h> #include "helpers/helpers.h" static const char *cpu_vendor_table[X86_VENDOR_MAX] = { "Unknown", "GenuineIntel", "AuthenticAMD", "HygonGenuine", }; #if defined(__i386__) || defined(__x86_64__) /* from gcc */ #include <cpuid.h> /* * CPUID functions returning a single datum * * Define unsigned int cpuid_e[abcd]x(unsigned int op) */ #define cpuid_func(reg) \ unsigned int cpuid_##reg(unsigned int op) \ { \ unsigned int eax, ebx, ecx, edx; \ __cpuid(op, eax, ebx, ecx, edx); \ return reg; \ } cpuid_func(eax); cpuid_func(ebx); cpuid_func(ecx); cpuid_func(edx); #endif /* defined(__i386__) || defined(__x86_64__) */ /* get_cpu_info * * Extract CPU vendor, family, model, stepping info from /proc/cpuinfo * * Returns 0 on success or a negativ error code * * TBD: Should there be a cpuid alternative for this if /proc is not mounted? */ int get_cpu_info(struct cpupower_cpu_info *cpu_info) { FILE *fp; char value[64]; unsigned int proc, x; unsigned int unknown = 0xffffff; unsigned int cpuid_level, ext_cpuid_level; int ret = -EINVAL; cpu_info->vendor = X86_VENDOR_UNKNOWN; cpu_info->family = unknown; cpu_info->model = unknown; cpu_info->stepping = unknown; cpu_info->caps = 0; fp = fopen("/proc/cpuinfo", "r"); if (!fp) return -EIO; while (!feof(fp)) { if (!fgets(value, 64, fp)) continue; value[63 - 1] = '\0'; if (!strncmp(value, "processor\t: ", 12)) sscanf(value, "processor\t: %u", &proc); if (proc != (unsigned int)base_cpu) continue; /* Get CPU vendor */ if (!strncmp(value, "vendor_id", 9)) { for (x = 1; x < X86_VENDOR_MAX; x++) { if (strstr(value, cpu_vendor_table[x])) cpu_info->vendor = x; } /* Get CPU family, etc. */ } else if (!strncmp(value, "cpu family\t: ", 13)) { sscanf(value, "cpu family\t: %u", &cpu_info->family); } else if (!strncmp(value, "model\t\t: ", 9)) { sscanf(value, "model\t\t: %u", &cpu_info->model); } else if (!strncmp(value, "stepping\t: ", 10)) { sscanf(value, "stepping\t: %u", &cpu_info->stepping); /* Exit -> all values must have been set */ if (cpu_info->vendor == X86_VENDOR_UNKNOWN || cpu_info->family == unknown || cpu_info->model == unknown || cpu_info->stepping == unknown) { ret = -EINVAL; goto out; } ret = 0; goto out; } } ret = -ENODEV; out: fclose(fp); /* Get some useful CPU capabilities from cpuid */ if (cpu_info->vendor != X86_VENDOR_AMD && cpu_info->vendor != X86_VENDOR_HYGON && cpu_info->vendor != X86_VENDOR_INTEL) return ret; cpuid_level = cpuid_eax(0); ext_cpuid_level = cpuid_eax(0x80000000); /* Invariant TSC */ if (ext_cpuid_level >= 0x80000007 && (cpuid_edx(0x80000007) & (1 << 8))) cpu_info->caps |= CPUPOWER_CAP_INV_TSC; /* Aperf/Mperf registers support */ if (cpuid_level >= 6 && (cpuid_ecx(6) & 0x1)) cpu_info->caps |= CPUPOWER_CAP_APERF; /* AMD or Hygon Boost state enable/disable register */ if (cpu_info->vendor == X86_VENDOR_AMD || cpu_info->vendor == X86_VENDOR_HYGON) { if (ext_cpuid_level >= 0x80000007) { if (cpuid_edx(0x80000007) & (1 << 9)) { cpu_info->caps |= CPUPOWER_CAP_AMD_CPB; if (cpu_info->family >= 0x17) cpu_info->caps |= CPUPOWER_CAP_AMD_CPB_MSR; } if ((cpuid_edx(0x80000007) & (1 << 7)) && cpu_info->family != 0x14) { /* HW pstate was not implemented in family 0x14 */ cpu_info->caps |= CPUPOWER_CAP_AMD_HW_PSTATE; if (cpu_info->family >= 0x17) cpu_info->caps |= CPUPOWER_CAP_AMD_PSTATEDEF; } } if (ext_cpuid_level >= 0x80000008 && cpuid_ebx(0x80000008) & (1 << 4)) cpu_info->caps |= CPUPOWER_CAP_AMD_RDPRU; if (cpupower_amd_pstate_enabled()) { cpu_info->caps |= CPUPOWER_CAP_AMD_PSTATE; /* * If AMD P-State is enabled, the firmware will treat * AMD P-State function as high priority. */ cpu_info->caps &= ~CPUPOWER_CAP_AMD_CPB; cpu_info->caps &= ~CPUPOWER_CAP_AMD_CPB_MSR; cpu_info->caps &= ~CPUPOWER_CAP_AMD_HW_PSTATE; cpu_info->caps &= ~CPUPOWER_CAP_AMD_PSTATEDEF; } } if (cpu_info->vendor == X86_VENDOR_INTEL) { if (cpuid_level >= 6 && (cpuid_eax(6) & (1 << 1))) cpu_info->caps |= CPUPOWER_CAP_INTEL_IDA; } if (cpu_info->vendor == X86_VENDOR_INTEL) { /* Intel's perf-bias MSR support */ if (cpuid_level >= 6 && (cpuid_ecx(6) & (1 << 3))) cpu_info->caps |= CPUPOWER_CAP_PERF_BIAS; /* Intel's Turbo Ratio Limit support */ if (cpu_info->family == 6) { switch (cpu_info->model) { case 0x1A: /* Core i7, Xeon 5500 series * Bloomfield, Gainstown NHM-EP */ case 0x1E: /* Core i7 and i5 Processor * Clarksfield, Lynnfield, Jasper Forest */ case 0x1F: /* Core i7 and i5 Processor - Nehalem */ case 0x25: /* Westmere Client * Clarkdale, Arrandale */ case 0x2C: /* Westmere EP - Gulftown */ cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; break; case 0x2A: /* SNB */ case 0x2D: /* SNB Xeon */ case 0x3A: /* IVB */ case 0x3E: /* IVB Xeon */ cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; cpu_info->caps |= CPUPOWER_CAP_IS_SNB; break; case 0x2E: /* Nehalem-EX Xeon - Beckton */ case 0x2F: /* Westmere-EX Xeon - Eagleton */ default: break; } } } /* printf("ID: %u - Extid: 0x%x - Caps: 0x%llx\n", cpuid_level, ext_cpuid_level, cpu_info->caps); */ return ret; }
linux-master
tools/power/cpupower/utils/helpers/cpuid.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <stdlib.h> #include <string.h> #include <helpers/bitmask.h> /* How many bits in an unsigned long */ #define bitsperlong (8 * sizeof(unsigned long)) /* howmany(a,b) : how many elements of size b needed to hold all of a */ #define howmany(x, y) (((x)+((y)-1))/(y)) /* How many longs in mask of n bits */ #define longsperbits(n) howmany(n, bitsperlong) #define max(a, b) ((a) > (b) ? (a) : (b)) /* * Allocate and free `struct bitmask *` */ /* Allocate a new `struct bitmask` with a size of n bits */ struct bitmask *bitmask_alloc(unsigned int n) { struct bitmask *bmp; bmp = malloc(sizeof(*bmp)); if (!bmp) return 0; bmp->size = n; bmp->maskp = calloc(longsperbits(n), sizeof(unsigned long)); if (!bmp->maskp) { free(bmp); return 0; } return bmp; } /* Free `struct bitmask` */ void bitmask_free(struct bitmask *bmp) { if (!bmp) return; free(bmp->maskp); bmp->maskp = (unsigned long *)0xdeadcdef; /* double free tripwire */ free(bmp); } /* * The routines _getbit() and _setbit() are the only * routines that actually understand the layout of bmp->maskp[]. * * On little endian architectures, this could simply be an array of * bytes. But the kernel layout of bitmasks _is_ visible to userspace * via the sched_(set/get)affinity calls in Linux 2.6, and on big * endian architectures, it is painfully obvious that this is an * array of unsigned longs. */ /* Return the value (0 or 1) of bit n in bitmask bmp */ static unsigned int _getbit(const struct bitmask *bmp, unsigned int n) { if (n < bmp->size) return (bmp->maskp[n/bitsperlong] >> (n % bitsperlong)) & 1; else return 0; } /* Set bit n in bitmask bmp to value v (0 or 1) */ static void _setbit(struct bitmask *bmp, unsigned int n, unsigned int v) { if (n < bmp->size) { if (v) bmp->maskp[n/bitsperlong] |= 1UL << (n % bitsperlong); else bmp->maskp[n/bitsperlong] &= ~(1UL << (n % bitsperlong)); } } /* * When parsing bitmask lists, only allow numbers, separated by one * of the allowed next characters. * * The parameter 'sret' is the return from a sscanf "%u%c". It is * -1 if the sscanf input string was empty. It is 0 if the first * character in the sscanf input string was not a decimal number. * It is 1 if the unsigned number matching the "%u" was the end of the * input string. It is 2 if one or more additional characters followed * the matched unsigned number. If it is 2, then 'nextc' is the first * character following the number. The parameter 'ok_next_chars' * is the nul-terminated list of allowed next characters. * * The mask term just scanned was ok if and only if either the numbers * matching the %u were all of the input or if the next character in * the input past the numbers was one of the allowed next characters. */ static int scan_was_ok(int sret, char nextc, const char *ok_next_chars) { return sret == 1 || (sret == 2 && strchr(ok_next_chars, nextc) != NULL); } static const char *nexttoken(const char *q, int sep) { if (q) q = strchr(q, sep); if (q) q++; return q; } /* Set a single bit i in bitmask */ struct bitmask *bitmask_setbit(struct bitmask *bmp, unsigned int i) { _setbit(bmp, i, 1); return bmp; } /* Set all bits in bitmask: bmp = ~0 */ struct bitmask *bitmask_setall(struct bitmask *bmp) { unsigned int i; for (i = 0; i < bmp->size; i++) _setbit(bmp, i, 1); return bmp; } /* Clear all bits in bitmask: bmp = 0 */ struct bitmask *bitmask_clearall(struct bitmask *bmp) { unsigned int i; for (i = 0; i < bmp->size; i++) _setbit(bmp, i, 0); return bmp; } /* True if all bits are clear */ int bitmask_isallclear(const struct bitmask *bmp) { unsigned int i; for (i = 0; i < bmp->size; i++) if (_getbit(bmp, i)) return 0; return 1; } /* True if specified bit i is set */ int bitmask_isbitset(const struct bitmask *bmp, unsigned int i) { return _getbit(bmp, i); } /* Number of lowest set bit (min) */ unsigned int bitmask_first(const struct bitmask *bmp) { return bitmask_next(bmp, 0); } /* Number of highest set bit (max) */ unsigned int bitmask_last(const struct bitmask *bmp) { unsigned int i; unsigned int m = bmp->size; for (i = 0; i < bmp->size; i++) if (_getbit(bmp, i)) m = i; return m; } /* Number of next set bit at or above given bit i */ unsigned int bitmask_next(const struct bitmask *bmp, unsigned int i) { unsigned int n; for (n = i; n < bmp->size; n++) if (_getbit(bmp, n)) break; return n; } /* * Parses a comma-separated list of numbers and ranges of numbers, * with optional ':%u' strides modifying ranges, into provided bitmask. * Some examples of input lists and their equivalent simple list: * Input Equivalent to * 0-3 0,1,2,3 * 0-7:2 0,2,4,6 * 1,3,5-7 1,3,5,6,7 * 0-3:2,8-15:4 0,2,8,12 */ int bitmask_parselist(const char *buf, struct bitmask *bmp) { const char *p, *q; bitmask_clearall(bmp); q = buf; while (p = q, q = nexttoken(q, ','), p) { unsigned int a; /* begin of range */ unsigned int b; /* end of range */ unsigned int s; /* stride */ const char *c1, *c2; /* next tokens after '-' or ',' */ char nextc; /* char after sscanf %u match */ int sret; /* sscanf return (number of matches) */ sret = sscanf(p, "%u%c", &a, &nextc); if (!scan_was_ok(sret, nextc, ",-")) goto err; b = a; s = 1; c1 = nexttoken(p, '-'); c2 = nexttoken(p, ','); if (c1 != NULL && (c2 == NULL || c1 < c2)) { sret = sscanf(c1, "%u%c", &b, &nextc); if (!scan_was_ok(sret, nextc, ",:")) goto err; c1 = nexttoken(c1, ':'); if (c1 != NULL && (c2 == NULL || c1 < c2)) { sret = sscanf(c1, "%u%c", &s, &nextc); if (!scan_was_ok(sret, nextc, ",")) goto err; } } if (!(a <= b)) goto err; if (b >= bmp->size) goto err; while (a <= b) { _setbit(bmp, a, 1); a += s; } } return 0; err: bitmask_clearall(bmp); return -1; } /* * emit(buf, buflen, rbot, rtop, len) * * Helper routine for bitmask_displaylist(). Write decimal number * or range to buf+len, suppressing output past buf+buflen, with optional * comma-prefix. Return len of what would be written to buf, if it * all fit. */ static inline int emit(char *buf, int buflen, int rbot, int rtop, int len) { if (len > 0) len += snprintf(buf + len, max(buflen - len, 0), ","); if (rbot == rtop) len += snprintf(buf + len, max(buflen - len, 0), "%d", rbot); else len += snprintf(buf + len, max(buflen - len, 0), "%d-%d", rbot, rtop); return len; } /* * Write decimal list representation of bmp to buf. * * Output format is a comma-separated list of decimal numbers and * ranges. Consecutively set bits are shown as two hyphen-separated * decimal numbers, the smallest and largest bit numbers set in * the range. Output format is compatible with the format * accepted as input by bitmap_parselist(). * * The return value is the number of characters which would be * generated for the given input, excluding the trailing '\0', as * per ISO C99. */ int bitmask_displaylist(char *buf, int buflen, const struct bitmask *bmp) { int len = 0; /* current bit is 'cur', most recently seen range is [rbot, rtop] */ unsigned int cur, rbot, rtop; if (buflen > 0) *buf = 0; rbot = cur = bitmask_first(bmp); while (cur < bmp->size) { rtop = cur; cur = bitmask_next(bmp, cur+1); if (cur >= bmp->size || cur > rtop + 1) { len = emit(buf, buflen, rbot, rtop, len); rbot = cur; } } return len; }
linux-master
tools/power/cpupower/utils/helpers/bitmask.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include "helpers/helpers.h" #include "helpers/sysfs.h" #include "cpufreq.h" #if defined(__i386__) || defined(__x86_64__) #include "cpupower_intern.h" #define MSR_AMD_HWCR 0xc0010015 int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active, int *states) { int ret; unsigned long long val; *support = *active = *states = 0; if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_CPB) { *support = 1; /* AMD Family 0x17 does not utilize PCI D18F4 like prior * families and has no fixed discrete boost states but * has Hardware determined variable increments instead. */ if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_CPB_MSR) { if (!read_msr(cpu, MSR_AMD_HWCR, &val)) { if (!(val & CPUPOWER_AMD_CPBDIS)) *active = 1; } } else { ret = amd_pci_get_num_boost_states(active, states); if (ret) return ret; } } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATE) { amd_pstate_boost_init(cpu, support, active); } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA) *support = *active = 1; return 0; } int cpupower_intel_get_perf_bias(unsigned int cpu) { char linebuf[MAX_LINE_LEN]; char path[SYSFS_PATH_MAX]; unsigned long val; char *endp; if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS)) return -1; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/power/energy_perf_bias", cpu); if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0) return -1; val = strtol(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return -1; return val; } int cpupower_intel_set_perf_bias(unsigned int cpu, unsigned int val) { char path[SYSFS_PATH_MAX]; char linebuf[3] = {}; if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS)) return -1; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/power/energy_perf_bias", cpu); snprintf(linebuf, sizeof(linebuf), "%d", val); if (cpupower_write_sysfs(path, linebuf, 3) <= 0) return -1; return 0; } int cpupower_set_epp(unsigned int cpu, char *epp) { char path[SYSFS_PATH_MAX]; char linebuf[30] = {}; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/energy_performance_preference", cpu); if (!is_valid_path(path)) return -1; snprintf(linebuf, sizeof(linebuf), "%s", epp); if (cpupower_write_sysfs(path, linebuf, 30) <= 0) return -1; return 0; } int cpupower_set_amd_pstate_mode(char *mode) { char path[SYSFS_PATH_MAX]; char linebuf[20] = {}; snprintf(path, sizeof(path), PATH_TO_CPU "amd_pstate/status"); if (!is_valid_path(path)) return -1; snprintf(linebuf, sizeof(linebuf), "%s\n", mode); if (cpupower_write_sysfs(path, linebuf, 20) <= 0) return -1; return 0; } int cpupower_set_turbo_boost(int turbo_boost) { char path[SYSFS_PATH_MAX]; char linebuf[2] = {}; snprintf(path, sizeof(path), PATH_TO_CPU "cpufreq/boost"); if (!is_valid_path(path)) return -1; snprintf(linebuf, sizeof(linebuf), "%d", turbo_boost); if (cpupower_write_sysfs(path, linebuf, 2) <= 0) return -1; return 0; } bool cpupower_amd_pstate_enabled(void) { char *driver = cpufreq_get_driver(0); bool ret = false; if (!driver) return ret; if (!strncmp(driver, "amd", 3)) ret = true; cpufreq_put_driver(driver); return ret; } #endif /* #if defined(__i386__) || defined(__x86_64__) */ /* get_cpustate * * Gather the information of all online CPUs into bitmask struct */ void get_cpustate(void) { unsigned int cpu = 0; bitmask_clearall(online_cpus); bitmask_clearall(offline_cpus); for (cpu = bitmask_first(cpus_chosen); cpu <= bitmask_last(cpus_chosen); cpu++) { if (cpupower_is_cpu_online(cpu) == 1) bitmask_setbit(online_cpus, cpu); else bitmask_setbit(offline_cpus, cpu); continue; } } /* print_online_cpus * * Print the CPU numbers of all CPUs that are online currently */ void print_online_cpus(void) { int str_len = 0; char *online_cpus_str = NULL; str_len = online_cpus->size * 5; online_cpus_str = (void *)malloc(sizeof(char) * str_len); if (!bitmask_isallclear(online_cpus)) { bitmask_displaylist(online_cpus_str, str_len, online_cpus); printf(_("Following CPUs are online:\n%s\n"), online_cpus_str); } } /* print_offline_cpus * * Print the CPU numbers of all CPUs that are offline currently */ void print_offline_cpus(void) { int str_len = 0; char *offline_cpus_str = NULL; str_len = offline_cpus->size * 5; offline_cpus_str = (void *)malloc(sizeof(char) * str_len); if (!bitmask_isallclear(offline_cpus)) { bitmask_displaylist(offline_cpus_str, str_len, offline_cpus); printf(_("Following CPUs are offline:\n%s\n"), offline_cpus_str); printf(_("cpupower set operation was not performed on them\n")); } } /* * print_speed * * Print the exact CPU frequency with appropriate unit */ void print_speed(unsigned long speed, int no_rounding) { unsigned long tmp; if (no_rounding) { if (speed > 1000000) printf("%u.%06u GHz", ((unsigned int)speed / 1000000), ((unsigned int)speed % 1000000)); else if (speed > 1000) printf("%u.%03u MHz", ((unsigned int)speed / 1000), (unsigned int)(speed % 1000)); else printf("%lu kHz", speed); } else { if (speed > 1000000) { tmp = speed % 10000; if (tmp >= 5000) speed += 10000; printf("%u.%02u GHz", ((unsigned int)speed / 1000000), ((unsigned int)(speed % 1000000) / 10000)); } else if (speed > 100000) { tmp = speed % 1000; if (tmp >= 500) speed += 1000; printf("%u MHz", ((unsigned int)speed / 1000)); } else if (speed > 1000) { tmp = speed % 100; if (tmp >= 50) speed += 100; printf("%u.%01u MHz", ((unsigned int)speed / 1000), ((unsigned int)(speed % 1000) / 100)); } } }
linux-master
tools/power/cpupower/utils/helpers/misc.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2004-2009 Dominik Brodowski <[email protected]> */ #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <stdio.h> #include <errno.h> #include <stdlib.h> #include "cpupower.h" #include "cpupower_intern.h" int is_valid_path(const char *path) { if (access(path, F_OK) == -1) return 0; return 1; } unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen) { ssize_t numread; int fd; fd = open(path, O_RDONLY); if (fd == -1) return 0; numread = read(fd, buf, buflen - 1); if (numread < 1) { close(fd); return 0; } buf[numread] = '\0'; close(fd); return (unsigned int) numread; } unsigned int cpupower_write_sysfs(const char *path, char *buf, size_t buflen) { ssize_t numwritten; int fd; fd = open(path, O_WRONLY); if (fd == -1) return 0; numwritten = write(fd, buf, buflen - 1); if (numwritten < 1) { perror(path); close(fd); return -1; } close(fd); return (unsigned int) numwritten; } /* * Detect whether a CPU is online * * Returns: * 1 -> if CPU is online * 0 -> if CPU is offline * negative errno values in error case */ int cpupower_is_cpu_online(unsigned int cpu) { char path[SYSFS_PATH_MAX]; int fd; ssize_t numread; unsigned long long value; char linebuf[MAX_LINE_LEN]; char *endp; struct stat statbuf; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); if (stat(path, &statbuf) != 0) return 0; /* * kernel without CONFIG_HOTPLUG_CPU * -> cpuX directory exists, but not cpuX/online file */ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); if (stat(path, &statbuf) != 0) return 1; fd = open(path, O_RDONLY); if (fd == -1) return -errno; numread = read(fd, linebuf, MAX_LINE_LEN - 1); if (numread < 1) { close(fd); return -EIO; } linebuf[numread] = '\0'; close(fd); value = strtoull(linebuf, &endp, 0); if (value > 1) return -EINVAL; return value; } /* returns -1 on failure, 0 on success */ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result) { char linebuf[MAX_LINE_LEN]; char *endp; char path[SYSFS_PATH_MAX]; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s", cpu, fname); if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0) return -1; *result = strtol(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return -1; return 0; } static int __compare(const void *t1, const void *t2) { struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1; struct cpuid_core_info *top2 = (struct cpuid_core_info *)t2; if (top1->pkg < top2->pkg) return -1; else if (top1->pkg > top2->pkg) return 1; else if (top1->core < top2->core) return -1; else if (top1->core > top2->core) return 1; else if (top1->cpu < top2->cpu) return -1; else if (top1->cpu > top2->cpu) return 1; else return 0; } /* * Returns amount of cpus, negative on error, cpu_top must be * passed to cpu_topology_release to free resources * * Array is sorted after ->pkg, ->core, then ->cpu */ int get_cpu_topology(struct cpupower_topology *cpu_top) { int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF); cpu_top->core_info = malloc(sizeof(struct cpuid_core_info) * cpus); if (cpu_top->core_info == NULL) return -ENOMEM; cpu_top->pkgs = cpu_top->cores = 0; for (cpu = 0; cpu < cpus; cpu++) { cpu_top->core_info[cpu].cpu = cpu; cpu_top->core_info[cpu].is_online = cpupower_is_cpu_online(cpu); if(sysfs_topology_read_file( cpu, "physical_package_id", &(cpu_top->core_info[cpu].pkg)) < 0) { cpu_top->core_info[cpu].pkg = -1; cpu_top->core_info[cpu].core = -1; continue; } if(sysfs_topology_read_file( cpu, "core_id", &(cpu_top->core_info[cpu].core)) < 0) { cpu_top->core_info[cpu].pkg = -1; cpu_top->core_info[cpu].core = -1; continue; } } qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), __compare); /* Count the number of distinct pkgs values. This works because the primary sort of the core_info struct was just done by pkg value. */ last_pkg = cpu_top->core_info[0].pkg; for(cpu = 1; cpu < cpus; cpu++) { if (cpu_top->core_info[cpu].pkg != last_pkg && cpu_top->core_info[cpu].pkg != -1) { last_pkg = cpu_top->core_info[cpu].pkg; cpu_top->pkgs++; } } if (!(cpu_top->core_info[0].pkg == -1)) cpu_top->pkgs++; /* Intel's cores count is not consecutively numbered, there may * be a core_id of 3, but none of 2. Assume there always is 0 * Get amount of cores by counting duplicates in a package for (cpu = 0; cpu_top->core_info[cpu].pkg = 0 && cpu < cpus; cpu++) { if (cpu_top->core_info[cpu].core == 0) cpu_top->cores++; */ return cpus; } void cpu_topology_release(struct cpupower_topology cpu_top) { free(cpu_top.core_info); }
linux-master
tools/power/cpupower/lib/cpupower.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2004-2009 Dominik Brodowski <[email protected]> * (C) 2011 Thomas Renninger <[email protected]> Novell Inc. */ #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include "cpuidle.h" #include "cpupower_intern.h" /* * helper function to check whether a file under "../cpuX/cpuidle/stateX/" dir * exists. * For example the functionality to disable c-states was introduced in later * kernel versions, this function can be used to explicitly check for this * feature. * * returns 1 if the file exists, 0 otherwise. */ static unsigned int cpuidle_state_file_exists(unsigned int cpu, unsigned int idlestate, const char *fname) { char path[SYSFS_PATH_MAX]; struct stat statbuf; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", cpu, idlestate, fname); if (stat(path, &statbuf) != 0) return 0; return 1; } /* * helper function to read file from /sys into given buffer * fname is a relative path under "cpuX/cpuidle/stateX/" dir * cstates starting with 0, C0 is not counted as cstate. * This means if you want C1 info, pass 0 as idlestate param */ static unsigned int cpuidle_state_read_file(unsigned int cpu, unsigned int idlestate, const char *fname, char *buf, size_t buflen) { char path[SYSFS_PATH_MAX]; int fd; ssize_t numread; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", cpu, idlestate, fname); fd = open(path, O_RDONLY); if (fd == -1) return 0; numread = read(fd, buf, buflen - 1); if (numread < 1) { close(fd); return 0; } buf[numread] = '\0'; close(fd); return (unsigned int) numread; } /* * helper function to write a new value to a /sys file * fname is a relative path under "../cpuX/cpuidle/cstateY/" dir * * Returns the number of bytes written or 0 on error */ static unsigned int cpuidle_state_write_file(unsigned int cpu, unsigned int idlestate, const char *fname, const char *value, size_t len) { char path[SYSFS_PATH_MAX]; int fd; ssize_t numwrite; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpuidle/state%u/%s", cpu, idlestate, fname); fd = open(path, O_WRONLY); if (fd == -1) return 0; numwrite = write(fd, value, len); if (numwrite < 1) { close(fd); return 0; } close(fd); return (unsigned int) numwrite; } /* read access to files which contain one numeric value */ enum idlestate_value { IDLESTATE_USAGE, IDLESTATE_POWER, IDLESTATE_LATENCY, IDLESTATE_TIME, IDLESTATE_DISABLE, MAX_IDLESTATE_VALUE_FILES }; static const char *idlestate_value_files[MAX_IDLESTATE_VALUE_FILES] = { [IDLESTATE_USAGE] = "usage", [IDLESTATE_POWER] = "power", [IDLESTATE_LATENCY] = "latency", [IDLESTATE_TIME] = "time", [IDLESTATE_DISABLE] = "disable", }; static unsigned long long cpuidle_state_get_one_value(unsigned int cpu, unsigned int idlestate, enum idlestate_value which) { unsigned long long value; unsigned int len; char linebuf[MAX_LINE_LEN]; char *endp; if (which >= MAX_IDLESTATE_VALUE_FILES) return 0; len = cpuidle_state_read_file(cpu, idlestate, idlestate_value_files[which], linebuf, sizeof(linebuf)); if (len == 0) return 0; value = strtoull(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return 0; return value; } /* read access to files which contain one string */ enum idlestate_string { IDLESTATE_DESC, IDLESTATE_NAME, MAX_IDLESTATE_STRING_FILES }; static const char *idlestate_string_files[MAX_IDLESTATE_STRING_FILES] = { [IDLESTATE_DESC] = "desc", [IDLESTATE_NAME] = "name", }; static char *cpuidle_state_get_one_string(unsigned int cpu, unsigned int idlestate, enum idlestate_string which) { char linebuf[MAX_LINE_LEN]; char *result; unsigned int len; if (which >= MAX_IDLESTATE_STRING_FILES) return NULL; len = cpuidle_state_read_file(cpu, idlestate, idlestate_string_files[which], linebuf, sizeof(linebuf)); if (len == 0) return NULL; result = strdup(linebuf); if (result == NULL) return NULL; if (result[strlen(result) - 1] == '\n') result[strlen(result) - 1] = '\0'; return result; } /* * Returns: * 1 if disabled * 0 if enabled * -1 if idlestate is not available * -2 if disabling is not supported by the kernel */ int cpuidle_is_state_disabled(unsigned int cpu, unsigned int idlestate) { if (cpuidle_state_count(cpu) <= idlestate) return -1; if (!cpuidle_state_file_exists(cpu, idlestate, idlestate_value_files[IDLESTATE_DISABLE])) return -2; return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_DISABLE); } /* * Pass 1 as last argument to disable or 0 to enable the state * Returns: * 0 on success * negative values on error, for example: * -1 if idlestate is not available * -2 if disabling is not supported by the kernel * -3 No write access to disable/enable C-states */ int cpuidle_state_disable(unsigned int cpu, unsigned int idlestate, unsigned int disable) { char value[SYSFS_PATH_MAX]; int bytes_written; if (cpuidle_state_count(cpu) <= idlestate) return -1; if (!cpuidle_state_file_exists(cpu, idlestate, idlestate_value_files[IDLESTATE_DISABLE])) return -2; snprintf(value, SYSFS_PATH_MAX, "%u", disable); bytes_written = cpuidle_state_write_file(cpu, idlestate, "disable", value, sizeof(disable)); if (bytes_written) return 0; return -3; } unsigned long cpuidle_state_latency(unsigned int cpu, unsigned int idlestate) { return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_LATENCY); } unsigned long cpuidle_state_usage(unsigned int cpu, unsigned int idlestate) { return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_USAGE); } unsigned long long cpuidle_state_time(unsigned int cpu, unsigned int idlestate) { return cpuidle_state_get_one_value(cpu, idlestate, IDLESTATE_TIME); } char *cpuidle_state_name(unsigned int cpu, unsigned int idlestate) { return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_NAME); } char *cpuidle_state_desc(unsigned int cpu, unsigned int idlestate) { return cpuidle_state_get_one_string(cpu, idlestate, IDLESTATE_DESC); } /* * Returns number of supported C-states of CPU core cpu * Negativ in error case * Zero if cpuidle does not export any C-states */ unsigned int cpuidle_state_count(unsigned int cpu) { char file[SYSFS_PATH_MAX]; struct stat statbuf; int idlestates = 1; snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle"); if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) return 0; snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu); if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) return 0; while (stat(file, &statbuf) == 0 && S_ISDIR(statbuf.st_mode)) { snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state%d", cpu, idlestates); idlestates++; } idlestates--; return idlestates; } /* CPUidle general /sys/devices/system/cpu/cpuidle/ sysfs access ********/ /* * helper function to read file from /sys into given buffer * fname is a relative path under "cpu/cpuidle/" dir */ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf, size_t buflen) { char path[SYSFS_PATH_MAX]; snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); return cpupower_read_sysfs(path, buf, buflen); } /* read access to files which contain one string */ enum cpuidle_string { CPUIDLE_GOVERNOR, CPUIDLE_GOVERNOR_RO, CPUIDLE_DRIVER, MAX_CPUIDLE_STRING_FILES }; static const char *cpuidle_string_files[MAX_CPUIDLE_STRING_FILES] = { [CPUIDLE_GOVERNOR] = "current_governor", [CPUIDLE_GOVERNOR_RO] = "current_governor_ro", [CPUIDLE_DRIVER] = "current_driver", }; static char *sysfs_cpuidle_get_one_string(enum cpuidle_string which) { char linebuf[MAX_LINE_LEN]; char *result; unsigned int len; if (which >= MAX_CPUIDLE_STRING_FILES) return NULL; len = sysfs_cpuidle_read_file(cpuidle_string_files[which], linebuf, sizeof(linebuf)); if (len == 0) return NULL; result = strdup(linebuf); if (result == NULL) return NULL; if (result[strlen(result) - 1] == '\n') result[strlen(result) - 1] = '\0'; return result; } char *cpuidle_get_governor(void) { char *tmp = sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR_RO); if (!tmp) return sysfs_cpuidle_get_one_string(CPUIDLE_GOVERNOR); else return tmp; } char *cpuidle_get_driver(void) { return sysfs_cpuidle_get_one_string(CPUIDLE_DRIVER); } /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */
linux-master
tools/power/cpupower/lib/cpuidle.c
// SPDX-License-Identifier: GPL-2.0-only #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include "cpupower_intern.h" #include "acpi_cppc.h" /* ACPI CPPC sysfs access ***********************************************/ static int acpi_cppc_read_file(unsigned int cpu, const char *fname, char *buf, size_t buflen) { char path[SYSFS_PATH_MAX]; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/acpi_cppc/%s", cpu, fname); return cpupower_read_sysfs(path, buf, buflen); } static const char * const acpi_cppc_value_files[] = { [HIGHEST_PERF] = "highest_perf", [LOWEST_PERF] = "lowest_perf", [NOMINAL_PERF] = "nominal_perf", [LOWEST_NONLINEAR_PERF] = "lowest_nonlinear_perf", [LOWEST_FREQ] = "lowest_freq", [NOMINAL_FREQ] = "nominal_freq", [REFERENCE_PERF] = "reference_perf", [WRAPAROUND_TIME] = "wraparound_time" }; unsigned long acpi_cppc_get_data(unsigned int cpu, enum acpi_cppc_value which) { unsigned long long value; unsigned int len; char linebuf[MAX_LINE_LEN]; char *endp; if (which >= MAX_CPPC_VALUE_FILES) return 0; len = acpi_cppc_read_file(cpu, acpi_cppc_value_files[which], linebuf, sizeof(linebuf)); if (len == 0) return 0; value = strtoull(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return 0; return value; }
linux-master
tools/power/cpupower/lib/acpi_cppc.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2016 SUSE Software Solutions GmbH * Thomas Renninger <[email protected]> */ #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <stdlib.h> #include <string.h> #include <fcntl.h> #include <stdio.h> #include <dirent.h> #include "powercap.h" static unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) { int fd; ssize_t numread; fd = open(path, O_RDONLY); if (fd == -1) return 0; numread = read(fd, buf, buflen - 1); if (numread < 1) { close(fd); return 0; } buf[numread] = '\0'; close(fd); return (unsigned int) numread; } static int sysfs_get_enabled(char *path, int *mode) { int fd; char yes_no; int ret = 0; *mode = 0; fd = open(path, O_RDONLY); if (fd == -1) { ret = -1; goto out; } if (read(fd, &yes_no, 1) != 1) { ret = -1; goto out_close; } if (yes_no == '1') { *mode = 1; goto out_close; } else if (yes_no == '0') { goto out_close; } else { ret = -1; goto out_close; } out_close: close(fd); out: return ret; } int powercap_get_enabled(int *mode) { char path[SYSFS_PATH_MAX] = PATH_TO_POWERCAP "/intel-rapl/enabled"; return sysfs_get_enabled(path, mode); } /* * Hardcoded, because rapl is the only powercap implementation - * this needs to get more generic if more powercap implementations * should show up */ int powercap_get_driver(char *driver, int buflen) { char file[SYSFS_PATH_MAX] = PATH_TO_RAPL; struct stat statbuf; if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) { driver = ""; return -1; } else if (buflen > 10) { strcpy(driver, "intel-rapl"); return 0; } else return -1; } enum powercap_get64 { GET_ENERGY_UJ, GET_MAX_ENERGY_RANGE_UJ, GET_POWER_UW, GET_MAX_POWER_RANGE_UW, MAX_GET_64_FILES }; static const char *powercap_get64_files[MAX_GET_64_FILES] = { [GET_POWER_UW] = "power_uw", [GET_MAX_POWER_RANGE_UW] = "max_power_range_uw", [GET_ENERGY_UJ] = "energy_uj", [GET_MAX_ENERGY_RANGE_UJ] = "max_energy_range_uj", }; static int sysfs_powercap_get64_val(struct powercap_zone *zone, enum powercap_get64 which, uint64_t *val) { char file[SYSFS_PATH_MAX] = PATH_TO_POWERCAP "/"; int ret; char buf[MAX_LINE_LEN]; strcat(file, zone->sys_name); strcat(file, "/"); strcat(file, powercap_get64_files[which]); ret = sysfs_read_file(file, buf, MAX_LINE_LEN); if (ret < 0) return ret; if (ret == 0) return -1; *val = strtoll(buf, NULL, 10); return 0; } int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val) { return sysfs_powercap_get64_val(zone, GET_MAX_ENERGY_RANGE_UJ, val); } int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val) { return sysfs_powercap_get64_val(zone, GET_ENERGY_UJ, val); } int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val) { return sysfs_powercap_get64_val(zone, GET_MAX_POWER_RANGE_UW, val); } int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val) { return sysfs_powercap_get64_val(zone, GET_POWER_UW, val); } int powercap_zone_get_enabled(struct powercap_zone *zone, int *mode) { char path[SYSFS_PATH_MAX] = PATH_TO_POWERCAP; if ((strlen(PATH_TO_POWERCAP) + strlen(zone->sys_name)) + strlen("/enabled") + 1 >= SYSFS_PATH_MAX) return -1; strcat(path, "/"); strcat(path, zone->sys_name); strcat(path, "/enabled"); return sysfs_get_enabled(path, mode); } int powercap_zone_set_enabled(struct powercap_zone *zone, int mode) { /* To be done if needed */ return 0; } int powercap_read_zone(struct powercap_zone *zone) { struct dirent *dent; DIR *zone_dir; char sysfs_dir[SYSFS_PATH_MAX] = PATH_TO_POWERCAP; struct powercap_zone *child_zone; char file[SYSFS_PATH_MAX] = PATH_TO_POWERCAP; int i, ret = 0; uint64_t val = 0; strcat(sysfs_dir, "/"); strcat(sysfs_dir, zone->sys_name); zone_dir = opendir(sysfs_dir); if (zone_dir == NULL) return -1; strcat(file, "/"); strcat(file, zone->sys_name); strcat(file, "/name"); sysfs_read_file(file, zone->name, MAX_LINE_LEN); if (zone->parent) zone->tree_depth = zone->parent->tree_depth + 1; ret = powercap_get_energy_uj(zone, &val); if (ret == 0) zone->has_energy_uj = 1; ret = powercap_get_power_uw(zone, &val); if (ret == 0) zone->has_power_uw = 1; while ((dent = readdir(zone_dir)) != NULL) { struct stat st; if (strcmp(dent->d_name, ".") == 0 || strcmp(dent->d_name, "..") == 0) continue; if (stat(dent->d_name, &st) != 0 || !S_ISDIR(st.st_mode)) if (fstatat(dirfd(zone_dir), dent->d_name, &st, 0) < 0) continue; if (strncmp(dent->d_name, "intel-rapl:", 11) != 0) continue; child_zone = calloc(1, sizeof(struct powercap_zone)); if (child_zone == NULL) return -1; for (i = 0; i < POWERCAP_MAX_CHILD_ZONES; i++) { if (zone->children[i] == NULL) { zone->children[i] = child_zone; break; } if (i == POWERCAP_MAX_CHILD_ZONES - 1) { free(child_zone); fprintf(stderr, "Reached POWERCAP_MAX_CHILD_ZONES %d\n", POWERCAP_MAX_CHILD_ZONES); return -1; } } strcpy(child_zone->sys_name, zone->sys_name); strcat(child_zone->sys_name, "/"); strcat(child_zone->sys_name, dent->d_name); child_zone->parent = zone; if (zone->tree_depth >= POWERCAP_MAX_TREE_DEPTH) { fprintf(stderr, "Maximum zone hierarchy depth[%d] reached\n", POWERCAP_MAX_TREE_DEPTH); ret = -1; break; } powercap_read_zone(child_zone); } closedir(zone_dir); return ret; } struct powercap_zone *powercap_init_zones(void) { int enabled; struct powercap_zone *root_zone; int ret; char file[SYSFS_PATH_MAX] = PATH_TO_RAPL "/enabled"; ret = sysfs_get_enabled(file, &enabled); if (ret) return NULL; if (!enabled) return NULL; root_zone = calloc(1, sizeof(struct powercap_zone)); if (!root_zone) return NULL; strcpy(root_zone->sys_name, "intel-rapl/intel-rapl:0"); powercap_read_zone(root_zone); return root_zone; } /* Call function *f on the passed zone and all its children */ int powercap_walk_zones(struct powercap_zone *zone, int (*f)(struct powercap_zone *zone)) { int i, ret; if (!zone) return -1; ret = f(zone); if (ret) return ret; for (i = 0; i < POWERCAP_MAX_CHILD_ZONES; i++) { if (zone->children[i] != NULL) powercap_walk_zones(zone->children[i], f); } return 0; }
linux-master
tools/power/cpupower/lib/powercap.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2004-2009 Dominik Brodowski <[email protected]> */ #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include "cpufreq.h" #include "cpupower_intern.h" /* CPUFREQ sysfs access **************************************************/ /* helper function to read file from /sys into given buffer */ /* fname is a relative path under "cpuX/cpufreq" dir */ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname, char *buf, size_t buflen) { char path[SYSFS_PATH_MAX]; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", cpu, fname); return cpupower_read_sysfs(path, buf, buflen); } /* helper function to write a new value to a /sys file */ /* fname is a relative path under "cpuX/cpufreq" dir */ static unsigned int sysfs_cpufreq_write_file(unsigned int cpu, const char *fname, const char *value, size_t len) { char path[SYSFS_PATH_MAX]; int fd; ssize_t numwrite; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", cpu, fname); fd = open(path, O_WRONLY); if (fd == -1) return 0; numwrite = write(fd, value, len); if (numwrite < 1) { close(fd); return 0; } close(fd); return (unsigned int) numwrite; } /* read access to files which contain one numeric value */ enum cpufreq_value { CPUINFO_CUR_FREQ, CPUINFO_MIN_FREQ, CPUINFO_MAX_FREQ, CPUINFO_LATENCY, SCALING_CUR_FREQ, SCALING_MIN_FREQ, SCALING_MAX_FREQ, STATS_NUM_TRANSITIONS, MAX_CPUFREQ_VALUE_READ_FILES }; static const char *cpufreq_value_files[MAX_CPUFREQ_VALUE_READ_FILES] = { [CPUINFO_CUR_FREQ] = "cpuinfo_cur_freq", [CPUINFO_MIN_FREQ] = "cpuinfo_min_freq", [CPUINFO_MAX_FREQ] = "cpuinfo_max_freq", [CPUINFO_LATENCY] = "cpuinfo_transition_latency", [SCALING_CUR_FREQ] = "scaling_cur_freq", [SCALING_MIN_FREQ] = "scaling_min_freq", [SCALING_MAX_FREQ] = "scaling_max_freq", [STATS_NUM_TRANSITIONS] = "stats/total_trans" }; unsigned long cpufreq_get_sysfs_value_from_table(unsigned int cpu, const char **table, unsigned int index, unsigned int size) { unsigned long value; unsigned int len; char linebuf[MAX_LINE_LEN]; char *endp; if (!table || index >= size || !table[index]) return 0; len = sysfs_cpufreq_read_file(cpu, table[index], linebuf, sizeof(linebuf)); if (len == 0) return 0; value = strtoul(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return 0; return value; } static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu, enum cpufreq_value which) { return cpufreq_get_sysfs_value_from_table(cpu, cpufreq_value_files, which, MAX_CPUFREQ_VALUE_READ_FILES); } /* read access to files which contain one string */ enum cpufreq_string { SCALING_DRIVER, SCALING_GOVERNOR, MAX_CPUFREQ_STRING_FILES }; static const char *cpufreq_string_files[MAX_CPUFREQ_STRING_FILES] = { [SCALING_DRIVER] = "scaling_driver", [SCALING_GOVERNOR] = "scaling_governor", }; static char *sysfs_cpufreq_get_one_string(unsigned int cpu, enum cpufreq_string which) { char linebuf[MAX_LINE_LEN]; char *result; unsigned int len; if (which >= MAX_CPUFREQ_STRING_FILES) return NULL; len = sysfs_cpufreq_read_file(cpu, cpufreq_string_files[which], linebuf, sizeof(linebuf)); if (len == 0) return NULL; result = strdup(linebuf); if (result == NULL) return NULL; if (result[strlen(result) - 1] == '\n') result[strlen(result) - 1] = '\0'; return result; } /* write access */ enum cpufreq_write { WRITE_SCALING_MIN_FREQ, WRITE_SCALING_MAX_FREQ, WRITE_SCALING_GOVERNOR, WRITE_SCALING_SET_SPEED, MAX_CPUFREQ_WRITE_FILES }; static const char *cpufreq_write_files[MAX_CPUFREQ_WRITE_FILES] = { [WRITE_SCALING_MIN_FREQ] = "scaling_min_freq", [WRITE_SCALING_MAX_FREQ] = "scaling_max_freq", [WRITE_SCALING_GOVERNOR] = "scaling_governor", [WRITE_SCALING_SET_SPEED] = "scaling_setspeed", }; static int sysfs_cpufreq_write_one_value(unsigned int cpu, enum cpufreq_write which, const char *new_value, size_t len) { if (which >= MAX_CPUFREQ_WRITE_FILES) return 0; if (sysfs_cpufreq_write_file(cpu, cpufreq_write_files[which], new_value, len) != len) return -ENODEV; return 0; }; unsigned long cpufreq_get_freq_kernel(unsigned int cpu) { return sysfs_cpufreq_get_one_value(cpu, SCALING_CUR_FREQ); } unsigned long cpufreq_get_freq_hardware(unsigned int cpu) { return sysfs_cpufreq_get_one_value(cpu, CPUINFO_CUR_FREQ); } unsigned long cpufreq_get_transition_latency(unsigned int cpu) { return sysfs_cpufreq_get_one_value(cpu, CPUINFO_LATENCY); } int cpufreq_get_hardware_limits(unsigned int cpu, unsigned long *min, unsigned long *max) { if ((!min) || (!max)) return -EINVAL; *min = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MIN_FREQ); if (!*min) return -ENODEV; *max = sysfs_cpufreq_get_one_value(cpu, CPUINFO_MAX_FREQ); if (!*max) return -ENODEV; return 0; } char *cpufreq_get_driver(unsigned int cpu) { return sysfs_cpufreq_get_one_string(cpu, SCALING_DRIVER); } void cpufreq_put_driver(char *ptr) { if (!ptr) return; free(ptr); } struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu) { struct cpufreq_policy *policy; policy = malloc(sizeof(struct cpufreq_policy)); if (!policy) return NULL; policy->governor = sysfs_cpufreq_get_one_string(cpu, SCALING_GOVERNOR); if (!policy->governor) { free(policy); return NULL; } policy->min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ); policy->max = sysfs_cpufreq_get_one_value(cpu, SCALING_MAX_FREQ); if ((!policy->min) || (!policy->max)) { free(policy->governor); free(policy); return NULL; } return policy; } void cpufreq_put_policy(struct cpufreq_policy *policy) { if ((!policy) || (!policy->governor)) return; free(policy->governor); policy->governor = NULL; free(policy); } struct cpufreq_available_governors *cpufreq_get_available_governors(unsigned int cpu) { struct cpufreq_available_governors *first = NULL; struct cpufreq_available_governors *current = NULL; char linebuf[MAX_LINE_LEN]; unsigned int pos, i; unsigned int len; len = sysfs_cpufreq_read_file(cpu, "scaling_available_governors", linebuf, sizeof(linebuf)); if (len == 0) return NULL; pos = 0; for (i = 0; i < len; i++) { if (linebuf[i] == ' ' || linebuf[i] == '\n') { if (i - pos < 2) continue; if (current) { current->next = malloc(sizeof(*current)); if (!current->next) goto error_out; current = current->next; } else { first = malloc(sizeof(*first)); if (!first) return NULL; current = first; } current->first = first; current->next = NULL; current->governor = malloc(i - pos + 1); if (!current->governor) goto error_out; memcpy(current->governor, linebuf + pos, i - pos); current->governor[i - pos] = '\0'; pos = i + 1; } } return first; error_out: while (first) { current = first->next; if (first->governor) free(first->governor); free(first); first = current; } return NULL; } void cpufreq_put_available_governors(struct cpufreq_available_governors *any) { struct cpufreq_available_governors *tmp, *next; if (!any) return; tmp = any->first; while (tmp) { next = tmp->next; if (tmp->governor) free(tmp->governor); free(tmp); tmp = next; } } struct cpufreq_available_frequencies *cpufreq_get_available_frequencies(unsigned int cpu) { struct cpufreq_available_frequencies *first = NULL; struct cpufreq_available_frequencies *current = NULL; char one_value[SYSFS_PATH_MAX]; char linebuf[MAX_LINE_LEN]; unsigned int pos, i; unsigned int len; len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies", linebuf, sizeof(linebuf)); if (len == 0) return NULL; pos = 0; for (i = 0; i < len; i++) { if (linebuf[i] == ' ' || linebuf[i] == '\n') { if (i - pos < 2) continue; if (i - pos >= SYSFS_PATH_MAX) goto error_out; if (current) { current->next = malloc(sizeof(*current)); if (!current->next) goto error_out; current = current->next; } else { first = malloc(sizeof(*first)); if (!first) return NULL; current = first; } current->first = first; current->next = NULL; memcpy(one_value, linebuf + pos, i - pos); one_value[i - pos] = '\0'; if (sscanf(one_value, "%lu", &current->frequency) != 1) goto error_out; pos = i + 1; } } return first; error_out: while (first) { current = first->next; free(first); first = current; } return NULL; } struct cpufreq_available_frequencies *cpufreq_get_boost_frequencies(unsigned int cpu) { struct cpufreq_available_frequencies *first = NULL; struct cpufreq_available_frequencies *current = NULL; char one_value[SYSFS_PATH_MAX]; char linebuf[MAX_LINE_LEN]; unsigned int pos, i; unsigned int len; len = sysfs_cpufreq_read_file(cpu, "scaling_boost_frequencies", linebuf, sizeof(linebuf)); if (len == 0) return NULL; pos = 0; for (i = 0; i < len; i++) { if (linebuf[i] == ' ' || linebuf[i] == '\n') { if (i - pos < 2) continue; if (i - pos >= SYSFS_PATH_MAX) goto error_out; if (current) { current->next = malloc(sizeof(*current)); if (!current->next) goto error_out; current = current->next; } else { first = malloc(sizeof(*first)); if (!first) return NULL; current = first; } current->first = first; current->next = NULL; memcpy(one_value, linebuf + pos, i - pos); one_value[i - pos] = '\0'; if (sscanf(one_value, "%lu", &current->frequency) != 1) goto error_out; pos = i + 1; } } return first; error_out: while (first) { current = first->next; free(first); first = current; } return NULL; } void cpufreq_put_available_frequencies(struct cpufreq_available_frequencies *any) { struct cpufreq_available_frequencies *tmp, *next; if (!any) return; tmp = any->first; while (tmp) { next = tmp->next; free(tmp); tmp = next; } } void cpufreq_put_boost_frequencies(struct cpufreq_available_frequencies *any) { cpufreq_put_available_frequencies(any); } static struct cpufreq_affected_cpus *sysfs_get_cpu_list(unsigned int cpu, const char *file) { struct cpufreq_affected_cpus *first = NULL; struct cpufreq_affected_cpus *current = NULL; char one_value[SYSFS_PATH_MAX]; char linebuf[MAX_LINE_LEN]; unsigned int pos, i; unsigned int len; len = sysfs_cpufreq_read_file(cpu, file, linebuf, sizeof(linebuf)); if (len == 0) return NULL; pos = 0; for (i = 0; i < len; i++) { if (i == len || linebuf[i] == ' ' || linebuf[i] == '\n') { if (i - pos < 1) continue; if (i - pos >= SYSFS_PATH_MAX) goto error_out; if (current) { current->next = malloc(sizeof(*current)); if (!current->next) goto error_out; current = current->next; } else { first = malloc(sizeof(*first)); if (!first) return NULL; current = first; } current->first = first; current->next = NULL; memcpy(one_value, linebuf + pos, i - pos); one_value[i - pos] = '\0'; if (sscanf(one_value, "%u", &current->cpu) != 1) goto error_out; pos = i + 1; } } return first; error_out: while (first) { current = first->next; free(first); first = current; } return NULL; } struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned int cpu) { return sysfs_get_cpu_list(cpu, "affected_cpus"); } void cpufreq_put_affected_cpus(struct cpufreq_affected_cpus *any) { struct cpufreq_affected_cpus *tmp, *next; if (!any) return; tmp = any->first; while (tmp) { next = tmp->next; free(tmp); tmp = next; } } struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned int cpu) { return sysfs_get_cpu_list(cpu, "related_cpus"); } void cpufreq_put_related_cpus(struct cpufreq_affected_cpus *any) { cpufreq_put_affected_cpus(any); } static int verify_gov(char *new_gov, char *passed_gov) { unsigned int i, j = 0; if (!passed_gov || (strlen(passed_gov) > 19)) return -EINVAL; strncpy(new_gov, passed_gov, 20); for (i = 0; i < 20; i++) { if (j) { new_gov[i] = '\0'; continue; } if ((new_gov[i] >= 'a') && (new_gov[i] <= 'z')) continue; if ((new_gov[i] >= 'A') && (new_gov[i] <= 'Z')) continue; if (new_gov[i] == '-') continue; if (new_gov[i] == '_') continue; if (new_gov[i] == '\0') { j = 1; continue; } return -EINVAL; } new_gov[19] = '\0'; return 0; } int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy) { char min[SYSFS_PATH_MAX]; char max[SYSFS_PATH_MAX]; char gov[SYSFS_PATH_MAX]; int ret; unsigned long old_min; int write_max_first; if (!policy || !(policy->governor)) return -EINVAL; if (policy->max < policy->min) return -EINVAL; if (verify_gov(gov, policy->governor)) return -EINVAL; snprintf(min, SYSFS_PATH_MAX, "%lu", policy->min); snprintf(max, SYSFS_PATH_MAX, "%lu", policy->max); old_min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ); write_max_first = (old_min && (policy->max < old_min) ? 0 : 1); if (write_max_first) { ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ, max, strlen(max)); if (ret) return ret; } ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ, min, strlen(min)); if (ret) return ret; if (!write_max_first) { ret = sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ, max, strlen(max)); if (ret) return ret; } return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR, gov, strlen(gov)); } int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq) { char value[SYSFS_PATH_MAX]; snprintf(value, SYSFS_PATH_MAX, "%lu", min_freq); return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MIN_FREQ, value, strlen(value)); } int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq) { char value[SYSFS_PATH_MAX]; snprintf(value, SYSFS_PATH_MAX, "%lu", max_freq); return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_MAX_FREQ, value, strlen(value)); } int cpufreq_modify_policy_governor(unsigned int cpu, char *governor) { char new_gov[SYSFS_PATH_MAX]; if ((!governor) || (strlen(governor) > 19)) return -EINVAL; if (verify_gov(new_gov, governor)) return -EINVAL; return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_GOVERNOR, new_gov, strlen(new_gov)); } int cpufreq_set_frequency(unsigned int cpu, unsigned long target_frequency) { struct cpufreq_policy *pol = cpufreq_get_policy(cpu); char userspace_gov[] = "userspace"; char freq[SYSFS_PATH_MAX]; int ret; if (!pol) return -ENODEV; if (strncmp(pol->governor, userspace_gov, 9) != 0) { ret = cpufreq_modify_policy_governor(cpu, userspace_gov); if (ret) { cpufreq_put_policy(pol); return ret; } } cpufreq_put_policy(pol); snprintf(freq, SYSFS_PATH_MAX, "%lu", target_frequency); return sysfs_cpufreq_write_one_value(cpu, WRITE_SCALING_SET_SPEED, freq, strlen(freq)); } struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu, unsigned long long *total_time) { struct cpufreq_stats *first = NULL; struct cpufreq_stats *current = NULL; char one_value[SYSFS_PATH_MAX]; char linebuf[MAX_LINE_LEN]; unsigned int pos, i; unsigned int len; len = sysfs_cpufreq_read_file(cpu, "stats/time_in_state", linebuf, sizeof(linebuf)); if (len == 0) return NULL; *total_time = 0; pos = 0; for (i = 0; i < len; i++) { if (i == strlen(linebuf) || linebuf[i] == '\n') { if (i - pos < 2) continue; if ((i - pos) >= SYSFS_PATH_MAX) goto error_out; if (current) { current->next = malloc(sizeof(*current)); if (!current->next) goto error_out; current = current->next; } else { first = malloc(sizeof(*first)); if (!first) return NULL; current = first; } current->first = first; current->next = NULL; memcpy(one_value, linebuf + pos, i - pos); one_value[i - pos] = '\0'; if (sscanf(one_value, "%lu %llu", &current->frequency, &current->time_in_state) != 2) goto error_out; *total_time = *total_time + current->time_in_state; pos = i + 1; } } return first; error_out: while (first) { current = first->next; free(first); first = current; } return NULL; } void cpufreq_put_stats(struct cpufreq_stats *any) { struct cpufreq_stats *tmp, *next; if (!any) return; tmp = any->first; while (tmp) { next = tmp->next; free(tmp); tmp = next; } } unsigned long cpufreq_get_transitions(unsigned int cpu) { return sysfs_cpufreq_get_one_value(cpu, STATS_NUM_TRANSITIONS); }
linux-master
tools/power/cpupower/lib/cpufreq.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2003 - 2004 Dominik Brodowski <[email protected]> * * Based on code found in * linux/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c * and originally developed by Jeremy Fitzhardinge. * * USAGE: simply run it to decode the current settings on CPU 0, * or pass the CPU number as argument, or pass the MSR content * as argument. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #define MCPU 32 #define MSR_IA32_PERF_STATUS 0x198 static int rdmsr(unsigned int cpu, unsigned int msr, unsigned int *lo, unsigned int *hi) { int fd; char file[20]; unsigned long long val; int retval = -1; *lo = *hi = 0; if (cpu > MCPU) goto err1; sprintf(file, "/dev/cpu/%d/msr", cpu); fd = open(file, O_RDONLY); if (fd < 0) goto err1; if (lseek(fd, msr, SEEK_CUR) == -1) goto err2; if (read(fd, &val, 8) != 8) goto err2; *lo = (uint32_t )(val & 0xffffffffull); *hi = (uint32_t )(val>>32 & 0xffffffffull); retval = 0; err2: close(fd); err1: return retval; } static void decode (unsigned int msr) { unsigned int multiplier; unsigned int mv; multiplier = ((msr >> 8) & 0xFF); mv = (((msr & 0xFF) * 16) + 700); printf("0x%x means multiplier %d @ %d mV\n", msr, multiplier, mv); } static int decode_live(unsigned int cpu) { unsigned int lo, hi; int err; err = rdmsr(cpu, MSR_IA32_PERF_STATUS, &lo, &hi); if (err) { printf("can't get MSR_IA32_PERF_STATUS for cpu %d\n", cpu); printf("Possible trouble: you don't run an Enhanced SpeedStep capable cpu\n"); printf("or you are not root, or the msr driver is not present\n"); return 1; } decode(lo); return 0; } int main (int argc, char **argv) { unsigned int cpu, mode = 0; if (argc < 2) cpu = 0; else { cpu = strtoul(argv[1], NULL, 0); if (cpu >= MCPU) mode = 1; } if (mode) decode(cpu); else decode_live(cpu); return 0; }
linux-master
tools/power/cpupower/debug/i386/centrino-decode.c
// SPDX-License-Identifier: GPL-2.0-only // dump_psb. (c) 2004, Dave Jones, Red Hat Inc. #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #define _GNU_SOURCE #include <getopt.h> #include <sys/mman.h> #define LEN (0x100000 - 0xc0000) #define OFFSET (0xc0000) #ifndef __packed #define __packed __attribute((packed)) #endif static long relevant; static const int fid_to_mult[32] = { 110, 115, 120, 125, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100, 105, 30, 190, 40, 200, 130, 135, 140, 210, 150, 225, 160, 165, 170, 180, -1, -1, }; static const int vid_to_voltage[32] = { 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, 1075, 1050, 1024, 1000, 975, 950, 925, 0, }; struct psb_header { char signature[10]; u_char version; u_char flags; u_short settlingtime; u_char res1; u_char numpst; } __packed; struct pst_header { u_int32_t cpuid; u_char fsb; u_char maxfid; u_char startvid; u_char numpstates; } __packed; static u_int fsb; static u_int sgtc; static int decode_pst(char *p, int npstates) { int i; int freq, fid, vid; for (i = 0; i < npstates; ++i) { fid = *p++; vid = *p++; freq = 100 * fid_to_mult[fid] * fsb; printf(" %2d %8dkHz FID %02x (%2d.%01d) VID %02x (%4dmV)\n", i, freq, fid, fid_to_mult[fid]/10, fid_to_mult[fid]%10, vid, vid_to_voltage[vid]); } return 0; } static void decode_psb(char *p, int numpst) { int i; struct psb_header *psb; struct pst_header *pst; psb = (struct psb_header*) p; if (psb->version != 0x12) return; printf("PSB version: %hhx flags: %hhx settling time %hhuus res1 %hhx num pst %hhu\n", psb->version, psb->flags, psb->settlingtime, psb->res1, psb->numpst); sgtc = psb->settlingtime * 100; if (sgtc < 10000) sgtc = 10000; p = ((char *) psb) + sizeof(struct psb_header); if (numpst < 0) numpst = psb->numpst; else printf("Overriding number of pst :%d\n", numpst); for (i = 0; i < numpst; i++) { pst = (struct pst_header*) p; if (relevant != 0) { if (relevant!= pst->cpuid) goto next_one; } printf(" PST %d cpuid %.3x fsb %hhu mfid %hhx svid %hhx numberstates %hhu\n", i+1, pst->cpuid, pst->fsb, pst->maxfid, pst->startvid, pst->numpstates); fsb = pst->fsb; decode_pst(p + sizeof(struct pst_header), pst->numpstates); next_one: p += sizeof(struct pst_header) + 2*pst->numpstates; } } static struct option info_opts[] = { {"numpst", no_argument, NULL, 'n'}, }; void print_help(void) { printf ("Usage: dump_psb [options]\n"); printf ("Options:\n"); printf (" -n, --numpst Set number of PST tables to scan\n"); printf (" -r, --relevant Only display PSTs relevant to cpuid N\n"); } int main(int argc, char *argv[]) { int fd; int numpst=-1; int ret=0, cont=1; char *mem = NULL; char *p; do { ret = getopt_long(argc, argv, "hr:n:", info_opts, NULL); switch (ret){ case '?': case 'h': print_help(); cont = 0; break; case 'r': relevant = strtol(optarg, NULL, 16); break; case 'n': numpst = strtol(optarg, NULL, 10); break; case -1: cont = 0; break; } } while(cont); fd = open("/dev/mem", O_RDONLY); if (fd < 0) { printf ("Couldn't open /dev/mem. Are you root?\n"); exit(1); } mem = mmap(mem, 0x100000 - 0xc0000, PROT_READ, MAP_SHARED, fd, 0xc0000); close(fd); for (p = mem; p - mem < LEN; p+=16) { if (memcmp(p, "AMDK7PNOW!", 10) == 0) { decode_psb(p, numpst); break; } } munmap(mem, LEN); return 0; }
linux-master
tools/power/cpupower/debug/i386/dump_psb.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2003 Bruno Ducrot * (C) 2004 Dominik Brodowski <[email protected]> * * Based on code found in * linux/include/asm-i386/ist.h and linux/arch/i386/kernel/setup.c * and originally developed by Andy Grover <[email protected]> */ #include <stdio.h> #include <string.h> #include <lrmi.h> int main (void) { struct LRMI_regs r; int retval; if (!LRMI_init()) return 0; memset(&r, 0, sizeof(r)); r.eax = 0x0000E980; r.edx = 0x47534943; retval = LRMI_int(0x15, &r); if (!retval) { printf("Failed!\n"); return 0; } if (r.eax == 0x47534943) { printf("BIOS supports GSIC call:\n"); printf("\tsignature: %c%c%c%c\n", (r.eax >> 24) & 0xff, (r.eax >> 16) & 0xff, (r.eax >> 8) & 0xff, (r.eax) & 0xff); printf("\tcommand port = 0x%.4x\n", r.ebx & 0xffff); printf("\tcommand = 0x%.4x\n", (r.ebx >> 16) & 0xffff); printf("\tevent port = 0x%.8x\n", r.ecx); printf("\tflags = 0x%.8x\n", r.edx); if (((r.ebx >> 16) & 0xffff) != 0x82) { printf("non-default command value. If speedstep-smi " "doesn't work out of the box,\nyou may want to " "try out the default value by passing " "smi_cmd=0x82 to the module\n ON YOUR OWN " "RISK.\n"); } if ((r.ebx & 0xffff) != 0xb2) { printf("non-default command port. If speedstep-smi " "doesn't work out of the box,\nyou may want to " "try out the default value by passing " "smi_port=0x82 to the module\n ON YOUR OWN " "RISK.\n"); } } else { printf("BIOS DOES NOT support GSIC call. Dumping registers anyway:\n"); printf("eax = 0x%.8x\n", r.eax); printf("ebx = 0x%.8x\n", r.ebx); printf("ecx = 0x%.8x\n", r.ecx); printf("edx = 0x%.8x\n", r.edx); printf("Note also that some BIOS do not support the initial " "GSIC call, but the newer\nspeedstep-smi driver may " "work.\nFor this, you need to pass some arguments to " "the speedstep-smi driver:\n"); printf("\tsmi_cmd=0x?? smi_port=0x?? smi_sig=1\n"); printf("\nUnfortunately, you have to know what exactly are " "smi_cmd and smi_port, and this\nis system " "dependent.\n"); } return 1; }
linux-master
tools/power/cpupower/debug/i386/intel_gsic.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) 2004 Bruno Ducrot <[email protected]> * * Based on code found in * linux/arch/i386/kernel/cpu/cpufreq/powernow-k8.c * and originally developed by Paul Devriendt */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #define MCPU 32 #define MSR_FIDVID_STATUS 0xc0010042 #define MSR_S_HI_CURRENT_VID 0x0000001f #define MSR_S_LO_CURRENT_FID 0x0000003f static int get_fidvid(uint32_t cpu, uint32_t *fid, uint32_t *vid) { int err = 1; uint64_t msr = 0; int fd; char file[20]; if (cpu > MCPU) goto out; sprintf(file, "/dev/cpu/%d/msr", cpu); fd = open(file, O_RDONLY); if (fd < 0) goto out; lseek(fd, MSR_FIDVID_STATUS, SEEK_CUR); if (read(fd, &msr, 8) != 8) goto err1; *fid = ((uint32_t )(msr & 0xffffffffull)) & MSR_S_LO_CURRENT_FID; *vid = ((uint32_t )(msr>>32 & 0xffffffffull)) & MSR_S_HI_CURRENT_VID; err = 0; err1: close(fd); out: return err; } /* Return a frequency in MHz, given an input fid */ static uint32_t find_freq_from_fid(uint32_t fid) { return 800 + (fid * 100); } /* Return a voltage in miliVolts, given an input vid */ static uint32_t find_millivolts_from_vid(uint32_t vid) { return 1550-vid*25; } int main (int argc, char *argv[]) { int err; int cpu; uint32_t fid, vid; if (argc < 2) cpu = 0; else cpu = strtoul(argv[1], NULL, 0); err = get_fidvid(cpu, &fid, &vid); if (err) { printf("can't get fid, vid from MSR\n"); printf("Possible trouble: you don't run a powernow-k8 capable cpu\n"); printf("or you are not root, or the msr driver is not present\n"); exit(1); } printf("cpu %d currently at %d MHz and %d mV\n", cpu, find_freq_from_fid(fid), find_millivolts_from_vid(vid)); return 0; }
linux-master
tools/power/cpupower/debug/i386/powernow-k8-decode.c
// SPDX-License-Identifier: GPL-2.0-only /* * test module to check whether the TSC-based delay routine continues * to work properly after cpufreq transitions. Needs ACPI to work * properly. * * Based partly on the Power Management Timer (PMTMR) code to be found * in arch/i386/kernel/timers/timer_pm.c on recent 2.6. kernels, especially * code written by John Stultz. The read_pmtmr function was copied verbatim * from that file. * * (C) 2004 Dominik Brodowski * * To use: * 1.) pass clock=tsc to the kernel on your bootloader * 2.) modprobe this module (it'll fail) * 3.) change CPU frequency * 4.) modprobe this module again * 5.) if the third value, "diff_pmtmr", changes between 2. and 4., the * TSC-based delay routine on the Linux kernel does not correctly * handle the cpufreq transition. Please report this to * [email protected] */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/acpi.h> #include <asm/io.h> static int pm_tmr_ioport = 0; /*helper function to safely read acpi pm timesource*/ static u32 read_pmtmr(void) { u32 v1=0,v2=0,v3=0; /* It has been reported that because of various broken * chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time * source is not latched, so you must read it multiple * times to insure a safe value is read. */ do { v1 = inl(pm_tmr_ioport); v2 = inl(pm_tmr_ioport); v3 = inl(pm_tmr_ioport); } while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)); /* mask the output to 24 bits */ return (v2 & 0xFFFFFF); } static int __init cpufreq_test_tsc(void) { u32 now, then, diff; u64 now_tsc, then_tsc, diff_tsc; int i; /* the following code snipped is copied from arch/x86/kernel/acpi/boot.c of Linux v2.6.25. */ /* detect the location of the ACPI PM Timer */ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) { /* FADT rev. 2 */ if (acpi_gbl_FADT.xpm_timer_block.space_id != ACPI_ADR_SPACE_SYSTEM_IO) return 0; pm_tmr_ioport = acpi_gbl_FADT.xpm_timer_block.address; /* * "X" fields are optional extensions to the original V1.0 * fields, so we must selectively expand V1.0 fields if the * corresponding X field is zero. */ if (!pm_tmr_ioport) pm_tmr_ioport = acpi_gbl_FADT.pm_timer_block; } else { /* FADT rev. 1 */ pm_tmr_ioport = acpi_gbl_FADT.pm_timer_block; } printk(KERN_DEBUG "start--> \n"); then = read_pmtmr(); then_tsc = rdtsc(); for (i=0;i<20;i++) { mdelay(100); now = read_pmtmr(); now_tsc = rdtsc(); diff = (now - then) & 0xFFFFFF; diff_tsc = now_tsc - then_tsc; printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc); then = now; then_tsc = now_tsc; } printk(KERN_DEBUG "<-- end \n"); return -ENODEV; } static void __exit cpufreq_none(void) { return; } module_init(cpufreq_test_tsc) module_exit(cpufreq_none) MODULE_AUTHOR("Dominik Brodowski"); MODULE_DESCRIPTION("Verify the TSC cpufreq notifier working correctly -- needs ACPI-enabled system"); MODULE_LICENSE ("GPL");
linux-master
tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
// SPDX-License-Identifier: GPL-2.0-only /* * x86_energy_perf_policy -- set the energy versus performance * policy preference bias on recent X86 processors. */ /* * Copyright (c) 2010 - 2017 Intel Corporation. * Len Brown <[email protected]> */ #define _GNU_SOURCE #include MSRHEADER #include <stdio.h> #include <unistd.h> #include <sys/types.h> #include <sched.h> #include <sys/stat.h> #include <sys/resource.h> #include <getopt.h> #include <err.h> #include <fcntl.h> #include <signal.h> #include <sys/time.h> #include <limits.h> #include <stdlib.h> #include <string.h> #include <cpuid.h> #include <errno.h> #define OPTARG_NORMAL (INT_MAX - 1) #define OPTARG_POWER (INT_MAX - 2) #define OPTARG_BALANCE_POWER (INT_MAX - 3) #define OPTARG_BALANCE_PERFORMANCE (INT_MAX - 4) #define OPTARG_PERFORMANCE (INT_MAX - 5) struct msr_hwp_cap { unsigned char highest; unsigned char guaranteed; unsigned char efficient; unsigned char lowest; }; struct msr_hwp_request { unsigned char hwp_min; unsigned char hwp_max; unsigned char hwp_desired; unsigned char hwp_epp; unsigned int hwp_window; unsigned char hwp_use_pkg; } req_update; unsigned int debug; unsigned int verbose; unsigned int force; char *progname; int base_cpu; unsigned char update_epb; unsigned long long new_epb; unsigned char turbo_is_enabled; unsigned char update_turbo; unsigned char turbo_update_value; unsigned char update_hwp_epp; unsigned char update_hwp_min; unsigned char update_hwp_max; unsigned char update_hwp_desired; unsigned char update_hwp_window; unsigned char update_hwp_use_pkg; unsigned char update_hwp_enable; #define hwp_update_enabled() (update_hwp_enable | update_hwp_epp | update_hwp_max | update_hwp_min | update_hwp_desired | update_hwp_window | update_hwp_use_pkg) int max_cpu_num; int max_pkg_num; #define MAX_PACKAGES 64 unsigned int first_cpu_in_pkg[MAX_PACKAGES]; unsigned long long pkg_present_set; unsigned long long pkg_selected_set; cpu_set_t *cpu_present_set; cpu_set_t *cpu_selected_set; int genuine_intel; size_t cpu_setsize; char *proc_stat = "/proc/stat"; unsigned int has_epb; /* MSR_IA32_ENERGY_PERF_BIAS */ unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ unsigned int has_hwp_request_pkg; /* IA32_HWP_REQUEST_PKG */ unsigned int bdx_highest_ratio; #define PATH_TO_CPU "/sys/devices/system/cpu/" #define SYSFS_PATH_MAX 255 /* * maintain compatibility with original implementation, but don't document it: */ void usage(void) { fprintf(stderr, "%s [options] [scope][field value]\n", progname); fprintf(stderr, "scope: --cpu cpu-list [--hwp-use-pkg #] | --pkg pkg-list\n"); fprintf(stderr, "field: --all | --epb | --hwp-epp | --hwp-min | --hwp-max | --hwp-desired\n"); fprintf(stderr, "other: --hwp-enable | --turbo-enable (0 | 1) | --help | --force\n"); fprintf(stderr, "value: ( # | \"normal\" | \"performance\" | \"balance-performance\" | \"balance-power\"| \"power\")\n"); fprintf(stderr, "--hwp-window usec\n"); fprintf(stderr, "Specify only Energy Performance BIAS (legacy usage):\n"); fprintf(stderr, "%s: [-c cpu] [-v] (-r | policy-value )\n", progname); exit(1); } /* * If bdx_highest_ratio is set, * then we must translate between MSR format and simple ratio * used on the cmdline. */ int ratio_2_msr_perf(int ratio) { int msr_perf; if (!bdx_highest_ratio) return ratio; msr_perf = ratio * 255 / bdx_highest_ratio; if (debug) fprintf(stderr, "%d = ratio_to_msr_perf(%d)\n", msr_perf, ratio); return msr_perf; } int msr_perf_2_ratio(int msr_perf) { int ratio; double d; if (!bdx_highest_ratio) return msr_perf; d = (double)msr_perf * (double) bdx_highest_ratio / 255.0; d = d + 0.5; /* round */ ratio = (int)d; if (debug) fprintf(stderr, "%d = msr_perf_ratio(%d) {%f}\n", ratio, msr_perf, d); return ratio; } int parse_cmdline_epb(int i) { if (!has_epb) errx(1, "EPB not enabled on this platform"); update_epb = 1; switch (i) { case OPTARG_POWER: return ENERGY_PERF_BIAS_POWERSAVE; case OPTARG_BALANCE_POWER: return ENERGY_PERF_BIAS_BALANCE_POWERSAVE; case OPTARG_NORMAL: return ENERGY_PERF_BIAS_NORMAL; case OPTARG_BALANCE_PERFORMANCE: return ENERGY_PERF_BIAS_BALANCE_PERFORMANCE; case OPTARG_PERFORMANCE: return ENERGY_PERF_BIAS_PERFORMANCE; } if (i < 0 || i > ENERGY_PERF_BIAS_POWERSAVE) errx(1, "--epb must be from 0 to 15"); return i; } #define HWP_CAP_LOWEST 0 #define HWP_CAP_HIGHEST 255 /* * "performance" changes hwp_min to cap.highest * All others leave it at cap.lowest */ int parse_cmdline_hwp_min(int i) { update_hwp_min = 1; switch (i) { case OPTARG_POWER: case OPTARG_BALANCE_POWER: case OPTARG_NORMAL: case OPTARG_BALANCE_PERFORMANCE: return HWP_CAP_LOWEST; case OPTARG_PERFORMANCE: return HWP_CAP_HIGHEST; } return i; } /* * "power" changes hwp_max to cap.lowest * All others leave it at cap.highest */ int parse_cmdline_hwp_max(int i) { update_hwp_max = 1; switch (i) { case OPTARG_POWER: return HWP_CAP_LOWEST; case OPTARG_NORMAL: case OPTARG_BALANCE_POWER: case OPTARG_BALANCE_PERFORMANCE: case OPTARG_PERFORMANCE: return HWP_CAP_HIGHEST; } return i; } /* * for --hwp-des, all strings leave it in autonomous mode * If you want to change it, you need to explicitly pick a value */ int parse_cmdline_hwp_desired(int i) { update_hwp_desired = 1; switch (i) { case OPTARG_POWER: case OPTARG_BALANCE_POWER: case OPTARG_BALANCE_PERFORMANCE: case OPTARG_NORMAL: case OPTARG_PERFORMANCE: return 0; /* autonomous */ } return i; } int parse_cmdline_hwp_window(int i) { unsigned int exponent; update_hwp_window = 1; switch (i) { case OPTARG_POWER: case OPTARG_BALANCE_POWER: case OPTARG_NORMAL: case OPTARG_BALANCE_PERFORMANCE: case OPTARG_PERFORMANCE: return 0; } if (i < 0 || i > 1270000000) { fprintf(stderr, "--hwp-window: 0 for auto; 1 - 1270000000 usec for window duration\n"); usage(); } for (exponent = 0; ; ++exponent) { if (debug) printf("%d 10^%d\n", i, exponent); if (i <= 127) break; i = i / 10; } if (debug) fprintf(stderr, "%d*10^%d: 0x%x\n", i, exponent, (exponent << 7) | i); return (exponent << 7) | i; } int parse_cmdline_hwp_epp(int i) { update_hwp_epp = 1; switch (i) { case OPTARG_POWER: return HWP_EPP_POWERSAVE; case OPTARG_BALANCE_POWER: return HWP_EPP_BALANCE_POWERSAVE; case OPTARG_NORMAL: case OPTARG_BALANCE_PERFORMANCE: return HWP_EPP_BALANCE_PERFORMANCE; case OPTARG_PERFORMANCE: return HWP_EPP_PERFORMANCE; } if (i < 0 || i > 0xff) { fprintf(stderr, "--hwp-epp must be from 0 to 0xff\n"); usage(); } return i; } int parse_cmdline_turbo(int i) { update_turbo = 1; switch (i) { case OPTARG_POWER: return 0; case OPTARG_NORMAL: case OPTARG_BALANCE_POWER: case OPTARG_BALANCE_PERFORMANCE: case OPTARG_PERFORMANCE: return 1; } if (i < 0 || i > 1) { fprintf(stderr, "--turbo-enable: 1 to enable, 0 to disable\n"); usage(); } return i; } int parse_optarg_string(char *s) { int i; char *endptr; if (!strncmp(s, "default", 7)) return OPTARG_NORMAL; if (!strncmp(s, "normal", 6)) return OPTARG_NORMAL; if (!strncmp(s, "power", 9)) return OPTARG_POWER; if (!strncmp(s, "balance-power", 17)) return OPTARG_BALANCE_POWER; if (!strncmp(s, "balance-performance", 19)) return OPTARG_BALANCE_PERFORMANCE; if (!strncmp(s, "performance", 11)) return OPTARG_PERFORMANCE; i = strtol(s, &endptr, 0); if (s == endptr) { fprintf(stderr, "no digits in \"%s\"\n", s); usage(); } if (i == LONG_MIN || i == LONG_MAX) errx(-1, "%s", s); if (i > 0xFF) errx(-1, "%d (0x%x) must be < 256", i, i); if (i < 0) errx(-1, "%d (0x%x) must be >= 0", i, i); return i; } void parse_cmdline_all(char *s) { force++; update_hwp_enable = 1; req_update.hwp_min = parse_cmdline_hwp_min(parse_optarg_string(s)); req_update.hwp_max = parse_cmdline_hwp_max(parse_optarg_string(s)); req_update.hwp_epp = parse_cmdline_hwp_epp(parse_optarg_string(s)); if (has_epb) new_epb = parse_cmdline_epb(parse_optarg_string(s)); turbo_update_value = parse_cmdline_turbo(parse_optarg_string(s)); req_update.hwp_desired = parse_cmdline_hwp_desired(parse_optarg_string(s)); req_update.hwp_window = parse_cmdline_hwp_window(parse_optarg_string(s)); } void validate_cpu_selected_set(void) { int cpu; if (CPU_COUNT_S(cpu_setsize, cpu_selected_set) == 0) errx(0, "no CPUs requested"); for (cpu = 0; cpu <= max_cpu_num; ++cpu) { if (CPU_ISSET_S(cpu, cpu_setsize, cpu_selected_set)) if (!CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set)) errx(1, "Requested cpu% is not present", cpu); } } void parse_cmdline_cpu(char *s) { char *startp, *endp; int cpu = 0; if (pkg_selected_set) { usage(); errx(1, "--cpu | --pkg"); } cpu_selected_set = CPU_ALLOC((max_cpu_num + 1)); if (cpu_selected_set == NULL) err(1, "cpu_selected_set"); CPU_ZERO_S(cpu_setsize, cpu_selected_set); for (startp = s; startp && *startp;) { if (*startp == ',') { startp++; continue; } if (*startp == '-') { int end_cpu; startp++; end_cpu = strtol(startp, &endp, 10); if (startp == endp) continue; while (cpu <= end_cpu) { if (cpu > max_cpu_num) errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num); CPU_SET_S(cpu, cpu_setsize, cpu_selected_set); cpu++; } startp = endp; continue; } if (strncmp(startp, "all", 3) == 0) { for (cpu = 0; cpu <= max_cpu_num; cpu += 1) { if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set)) CPU_SET_S(cpu, cpu_setsize, cpu_selected_set); } startp += 3; if (*startp == 0) break; } /* "--cpu even" is not documented */ if (strncmp(startp, "even", 4) == 0) { for (cpu = 0; cpu <= max_cpu_num; cpu += 2) { if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set)) CPU_SET_S(cpu, cpu_setsize, cpu_selected_set); } startp += 4; if (*startp == 0) break; } /* "--cpu odd" is not documented */ if (strncmp(startp, "odd", 3) == 0) { for (cpu = 1; cpu <= max_cpu_num; cpu += 2) { if (CPU_ISSET_S(cpu, cpu_setsize, cpu_present_set)) CPU_SET_S(cpu, cpu_setsize, cpu_selected_set); } startp += 3; if (*startp == 0) break; } cpu = strtol(startp, &endp, 10); if (startp == endp) errx(1, "--cpu cpu-set: confused by '%s'", startp); if (cpu > max_cpu_num) errx(1, "Requested cpu%d exceeds max cpu%d", cpu, max_cpu_num); CPU_SET_S(cpu, cpu_setsize, cpu_selected_set); startp = endp; } validate_cpu_selected_set(); } void parse_cmdline_pkg(char *s) { char *startp, *endp; int pkg = 0; if (cpu_selected_set) { usage(); errx(1, "--pkg | --cpu"); } pkg_selected_set = 0; for (startp = s; startp && *startp;) { if (*startp == ',') { startp++; continue; } if (*startp == '-') { int end_pkg; startp++; end_pkg = strtol(startp, &endp, 10); if (startp == endp) continue; while (pkg <= end_pkg) { if (pkg > max_pkg_num) errx(1, "Requested pkg%d exceeds max pkg%d", pkg, max_pkg_num); pkg_selected_set |= 1 << pkg; pkg++; } startp = endp; continue; } if (strncmp(startp, "all", 3) == 0) { pkg_selected_set = pkg_present_set; return; } pkg = strtol(startp, &endp, 10); if (pkg > max_pkg_num) errx(1, "Requested pkg%d Exceeds max pkg%d", pkg, max_pkg_num); pkg_selected_set |= 1 << pkg; startp = endp; } } void for_packages(unsigned long long pkg_set, int (func)(int)) { int pkg_num; for (pkg_num = 0; pkg_num <= max_pkg_num; ++pkg_num) { if (pkg_set & (1UL << pkg_num)) func(pkg_num); } } void print_version(void) { printf("x86_energy_perf_policy 17.05.11 (C) Len Brown <[email protected]>\n"); } void cmdline(int argc, char **argv) { int opt; int option_index = 0; static struct option long_options[] = { {"all", required_argument, 0, 'a'}, {"cpu", required_argument, 0, 'c'}, {"pkg", required_argument, 0, 'p'}, {"debug", no_argument, 0, 'd'}, {"hwp-desired", required_argument, 0, 'D'}, {"epb", required_argument, 0, 'B'}, {"force", no_argument, 0, 'f'}, {"hwp-enable", no_argument, 0, 'e'}, {"help", no_argument, 0, 'h'}, {"hwp-epp", required_argument, 0, 'P'}, {"hwp-min", required_argument, 0, 'm'}, {"hwp-max", required_argument, 0, 'M'}, {"read", no_argument, 0, 'r'}, {"turbo-enable", required_argument, 0, 't'}, {"hwp-use-pkg", required_argument, 0, 'u'}, {"version", no_argument, 0, 'v'}, {"hwp-window", required_argument, 0, 'w'}, {0, 0, 0, 0 } }; progname = argv[0]; while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:", long_options, &option_index)) != -1) { switch (opt) { case 'a': parse_cmdline_all(optarg); break; case 'B': new_epb = parse_cmdline_epb(parse_optarg_string(optarg)); break; case 'c': parse_cmdline_cpu(optarg); break; case 'e': update_hwp_enable = 1; break; case 'h': usage(); break; case 'd': debug++; verbose++; break; case 'f': force++; break; case 'D': req_update.hwp_desired = parse_cmdline_hwp_desired(parse_optarg_string(optarg)); break; case 'm': req_update.hwp_min = parse_cmdline_hwp_min(parse_optarg_string(optarg)); break; case 'M': req_update.hwp_max = parse_cmdline_hwp_max(parse_optarg_string(optarg)); break; case 'p': parse_cmdline_pkg(optarg); break; case 'P': req_update.hwp_epp = parse_cmdline_hwp_epp(parse_optarg_string(optarg)); break; case 'r': /* v1 used -r to specify read-only mode, now the default */ break; case 't': turbo_update_value = parse_cmdline_turbo(parse_optarg_string(optarg)); break; case 'u': update_hwp_use_pkg++; if (atoi(optarg) == 0) req_update.hwp_use_pkg = 0; else req_update.hwp_use_pkg = 1; break; case 'v': print_version(); exit(0); break; case 'w': req_update.hwp_window = parse_cmdline_hwp_window(parse_optarg_string(optarg)); break; default: usage(); } } /* * v1 allowed "performance"|"normal"|"power" with no policy specifier * to update BIAS. Continue to support that, even though no longer documented. */ if (argc == optind + 1) new_epb = parse_cmdline_epb(parse_optarg_string(argv[optind])); if (argc > optind + 1) { fprintf(stderr, "stray parameter '%s'\n", argv[optind + 1]); usage(); } } /* * Open a file, and exit on failure */ FILE *fopen_or_die(const char *path, const char *mode) { FILE *filep = fopen(path, "r"); if (!filep) err(1, "%s: open failed", path); return filep; } void err_on_hypervisor(void) { FILE *cpuinfo; char *flags, *hypervisor; char *buffer; /* On VMs /proc/cpuinfo contains a "flags" entry for hypervisor */ cpuinfo = fopen_or_die("/proc/cpuinfo", "ro"); buffer = malloc(4096); if (!buffer) { fclose(cpuinfo); err(-ENOMEM, "buffer malloc fail"); } if (!fread(buffer, 1024, 1, cpuinfo)) { fclose(cpuinfo); free(buffer); err(1, "Reading /proc/cpuinfo failed"); } flags = strstr(buffer, "flags"); rewind(cpuinfo); fseek(cpuinfo, flags - buffer, SEEK_SET); if (!fgets(buffer, 4096, cpuinfo)) { fclose(cpuinfo); free(buffer); err(1, "Reading /proc/cpuinfo failed"); } fclose(cpuinfo); hypervisor = strstr(buffer, "hypervisor"); free(buffer); if (hypervisor) err(-1, "not supported on this virtual machine"); } int get_msr(int cpu, int offset, unsigned long long *msr) { int retval; char pathname[32]; int fd; sprintf(pathname, "/dev/cpu/%d/msr", cpu); fd = open(pathname, O_RDONLY); if (fd < 0) err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname); retval = pread(fd, msr, sizeof(*msr), offset); if (retval != sizeof(*msr)) { err_on_hypervisor(); err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset); } if (debug > 1) fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr); close(fd); return 0; } int put_msr(int cpu, int offset, unsigned long long new_msr) { char pathname[32]; int retval; int fd; sprintf(pathname, "/dev/cpu/%d/msr", cpu); fd = open(pathname, O_RDWR); if (fd < 0) err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname); retval = pwrite(fd, &new_msr, sizeof(new_msr), offset); if (retval != sizeof(new_msr)) err(-2, "pwrite(cpu%d, offset 0x%x, 0x%llx) = %d", cpu, offset, new_msr, retval); close(fd); if (debug > 1) fprintf(stderr, "put_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, new_msr); return 0; } static unsigned int read_sysfs(const char *path, char *buf, size_t buflen) { ssize_t numread; int fd; fd = open(path, O_RDONLY); if (fd == -1) return 0; numread = read(fd, buf, buflen - 1); if (numread < 1) { close(fd); return 0; } buf[numread] = '\0'; close(fd); return (unsigned int) numread; } static unsigned int write_sysfs(const char *path, char *buf, size_t buflen) { ssize_t numwritten; int fd; fd = open(path, O_WRONLY); if (fd == -1) return 0; numwritten = write(fd, buf, buflen - 1); if (numwritten < 1) { perror("write failed\n"); close(fd); return -1; } close(fd); return (unsigned int) numwritten; } void print_hwp_cap(int cpu, struct msr_hwp_cap *cap, char *str) { if (cpu != -1) printf("cpu%d: ", cpu); printf("HWP_CAP: low %d eff %d guar %d high %d\n", cap->lowest, cap->efficient, cap->guaranteed, cap->highest); } void read_hwp_cap(int cpu, struct msr_hwp_cap *cap, unsigned int msr_offset) { unsigned long long msr; get_msr(cpu, msr_offset, &msr); cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr)); cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr)); cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr)); cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr)); } void print_hwp_request(int cpu, struct msr_hwp_request *h, char *str) { if (cpu != -1) printf("cpu%d: ", cpu); if (str) printf("%s", str); printf("HWP_REQ: min %d max %d des %d epp %d window 0x%x (%d*10^%dus) use_pkg %d\n", h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp, h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7, h->hwp_use_pkg); } void print_hwp_request_pkg(int pkg, struct msr_hwp_request *h, char *str) { printf("pkg%d: ", pkg); if (str) printf("%s", str); printf("HWP_REQ_PKG: min %d max %d des %d epp %d window 0x%x (%d*10^%dus)\n", h->hwp_min, h->hwp_max, h->hwp_desired, h->hwp_epp, h->hwp_window, h->hwp_window & 0x7F, (h->hwp_window >> 7) & 0x7); } void read_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset) { unsigned long long msr; get_msr(cpu, msr_offset, &msr); hwp_req->hwp_min = msr_perf_2_ratio((((msr) >> 0) & 0xff)); hwp_req->hwp_max = msr_perf_2_ratio((((msr) >> 8) & 0xff)); hwp_req->hwp_desired = msr_perf_2_ratio((((msr) >> 16) & 0xff)); hwp_req->hwp_epp = (((msr) >> 24) & 0xff); hwp_req->hwp_window = (((msr) >> 32) & 0x3ff); hwp_req->hwp_use_pkg = (((msr) >> 42) & 0x1); } void write_hwp_request(int cpu, struct msr_hwp_request *hwp_req, unsigned int msr_offset) { unsigned long long msr = 0; if (debug > 1) printf("cpu%d: requesting min %d max %d des %d epp %d window 0x%0x use_pkg %d\n", cpu, hwp_req->hwp_min, hwp_req->hwp_max, hwp_req->hwp_desired, hwp_req->hwp_epp, hwp_req->hwp_window, hwp_req->hwp_use_pkg); msr |= HWP_MIN_PERF(ratio_2_msr_perf(hwp_req->hwp_min)); msr |= HWP_MAX_PERF(ratio_2_msr_perf(hwp_req->hwp_max)); msr |= HWP_DESIRED_PERF(ratio_2_msr_perf(hwp_req->hwp_desired)); msr |= HWP_ENERGY_PERF_PREFERENCE(hwp_req->hwp_epp); msr |= HWP_ACTIVITY_WINDOW(hwp_req->hwp_window); msr |= HWP_PACKAGE_CONTROL(hwp_req->hwp_use_pkg); put_msr(cpu, msr_offset, msr); } static int get_epb(int cpu) { char path[SYSFS_PATH_MAX]; char linebuf[3]; char *endp; long val; if (!has_epb) return -1; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/power/energy_perf_bias", cpu); if (!read_sysfs(path, linebuf, 3)) return -1; val = strtol(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return -1; return (int)val; } static int set_epb(int cpu, int val) { char path[SYSFS_PATH_MAX]; char linebuf[3]; char *endp; int ret; if (!has_epb) return -1; snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/power/energy_perf_bias", cpu); snprintf(linebuf, sizeof(linebuf), "%d", val); ret = write_sysfs(path, linebuf, 3); if (ret <= 0) return -1; val = strtol(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return -1; return (int)val; } int print_cpu_msrs(int cpu) { struct msr_hwp_request req; struct msr_hwp_cap cap; int epb; epb = get_epb(cpu); if (epb >= 0) printf("cpu%d: EPB %u\n", cpu, (unsigned int) epb); if (!has_hwp) return 0; read_hwp_request(cpu, &req, MSR_HWP_REQUEST); print_hwp_request(cpu, &req, ""); read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES); print_hwp_cap(cpu, &cap, ""); return 0; } int print_pkg_msrs(int pkg) { struct msr_hwp_request req; unsigned long long msr; if (!has_hwp) return 0; read_hwp_request(first_cpu_in_pkg[pkg], &req, MSR_HWP_REQUEST_PKG); print_hwp_request_pkg(pkg, &req, ""); if (has_hwp_notify) { get_msr(first_cpu_in_pkg[pkg], MSR_HWP_INTERRUPT, &msr); fprintf(stderr, "pkg%d: MSR_HWP_INTERRUPT: 0x%08llx (Excursion_Min-%sabled, Guaranteed_Perf_Change-%sabled)\n", pkg, msr, ((msr) & 0x2) ? "EN" : "Dis", ((msr) & 0x1) ? "EN" : "Dis"); } get_msr(first_cpu_in_pkg[pkg], MSR_HWP_STATUS, &msr); fprintf(stderr, "pkg%d: MSR_HWP_STATUS: 0x%08llx (%sExcursion_Min, %sGuaranteed_Perf_Change)\n", pkg, msr, ((msr) & 0x4) ? "" : "No-", ((msr) & 0x1) ? "" : "No-"); return 0; } /* * Assumption: All HWP systems have 100 MHz bus clock */ int ratio_2_sysfs_khz(int ratio) { int bclk_khz = 100 * 1000; /* 100,000 KHz = 100 MHz */ return ratio * bclk_khz; } /* * If HWP is enabled and cpufreq sysfs attribtes are present, * then update sysfs, so that it will not become * stale when we write to MSRs. * (intel_pstate's max_perf_pct and min_perf_pct will follow cpufreq, * so we don't have to touch that.) */ void update_cpufreq_scaling_freq(int is_max, int cpu, unsigned int ratio) { char pathname[64]; FILE *fp; int retval; int khz; sprintf(pathname, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_%s_freq", cpu, is_max ? "max" : "min"); fp = fopen(pathname, "w"); if (!fp) { if (debug) perror(pathname); return; } khz = ratio_2_sysfs_khz(ratio); retval = fprintf(fp, "%d", khz); if (retval < 0) if (debug) perror("fprintf"); if (debug) printf("echo %d > %s\n", khz, pathname); fclose(fp); } /* * We update all sysfs before updating any MSRs because of * bugs in cpufreq/intel_pstate where the sysfs writes * for a CPU may change the min/max values on other CPUS. */ int update_sysfs(int cpu) { if (!has_hwp) return 0; if (!hwp_update_enabled()) return 0; if (access("/sys/devices/system/cpu/cpu0/cpufreq", F_OK)) return 0; if (update_hwp_min) update_cpufreq_scaling_freq(0, cpu, req_update.hwp_min); if (update_hwp_max) update_cpufreq_scaling_freq(1, cpu, req_update.hwp_max); return 0; } int verify_hwp_req_self_consistency(int cpu, struct msr_hwp_request *req) { /* fail if min > max requested */ if (req->hwp_min > req->hwp_max) { errx(1, "cpu%d: requested hwp-min %d > hwp_max %d", cpu, req->hwp_min, req->hwp_max); } /* fail if desired > max requestd */ if (req->hwp_desired && (req->hwp_desired > req->hwp_max)) { errx(1, "cpu%d: requested hwp-desired %d > hwp_max %d", cpu, req->hwp_desired, req->hwp_max); } /* fail if desired < min requestd */ if (req->hwp_desired && (req->hwp_desired < req->hwp_min)) { errx(1, "cpu%d: requested hwp-desired %d < requested hwp_min %d", cpu, req->hwp_desired, req->hwp_min); } return 0; } int check_hwp_request_v_hwp_capabilities(int cpu, struct msr_hwp_request *req, struct msr_hwp_cap *cap) { if (update_hwp_max) { if (req->hwp_max > cap->highest) errx(1, "cpu%d: requested max %d > capabilities highest %d, use --force?", cpu, req->hwp_max, cap->highest); if (req->hwp_max < cap->lowest) errx(1, "cpu%d: requested max %d < capabilities lowest %d, use --force?", cpu, req->hwp_max, cap->lowest); } if (update_hwp_min) { if (req->hwp_min > cap->highest) errx(1, "cpu%d: requested min %d > capabilities highest %d, use --force?", cpu, req->hwp_min, cap->highest); if (req->hwp_min < cap->lowest) errx(1, "cpu%d: requested min %d < capabilities lowest %d, use --force?", cpu, req->hwp_min, cap->lowest); } if (update_hwp_min && update_hwp_max && (req->hwp_min > req->hwp_max)) errx(1, "cpu%d: requested min %d > requested max %d", cpu, req->hwp_min, req->hwp_max); if (update_hwp_desired && req->hwp_desired) { if (req->hwp_desired > req->hwp_max) errx(1, "cpu%d: requested desired %d > requested max %d, use --force?", cpu, req->hwp_desired, req->hwp_max); if (req->hwp_desired < req->hwp_min) errx(1, "cpu%d: requested desired %d < requested min %d, use --force?", cpu, req->hwp_desired, req->hwp_min); if (req->hwp_desired < cap->lowest) errx(1, "cpu%d: requested desired %d < capabilities lowest %d, use --force?", cpu, req->hwp_desired, cap->lowest); if (req->hwp_desired > cap->highest) errx(1, "cpu%d: requested desired %d > capabilities highest %d, use --force?", cpu, req->hwp_desired, cap->highest); } return 0; } int update_hwp_request(int cpu) { struct msr_hwp_request req; struct msr_hwp_cap cap; int msr_offset = MSR_HWP_REQUEST; read_hwp_request(cpu, &req, msr_offset); if (debug) print_hwp_request(cpu, &req, "old: "); if (update_hwp_min) req.hwp_min = req_update.hwp_min; if (update_hwp_max) req.hwp_max = req_update.hwp_max; if (update_hwp_desired) req.hwp_desired = req_update.hwp_desired; if (update_hwp_window) req.hwp_window = req_update.hwp_window; if (update_hwp_epp) req.hwp_epp = req_update.hwp_epp; req.hwp_use_pkg = req_update.hwp_use_pkg; read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES); if (debug) print_hwp_cap(cpu, &cap, ""); if (!force) check_hwp_request_v_hwp_capabilities(cpu, &req, &cap); verify_hwp_req_self_consistency(cpu, &req); write_hwp_request(cpu, &req, msr_offset); if (debug) { read_hwp_request(cpu, &req, msr_offset); print_hwp_request(cpu, &req, "new: "); } return 0; } int update_hwp_request_pkg(int pkg) { struct msr_hwp_request req; struct msr_hwp_cap cap; int cpu = first_cpu_in_pkg[pkg]; int msr_offset = MSR_HWP_REQUEST_PKG; read_hwp_request(cpu, &req, msr_offset); if (debug) print_hwp_request_pkg(pkg, &req, "old: "); if (update_hwp_min) req.hwp_min = req_update.hwp_min; if (update_hwp_max) req.hwp_max = req_update.hwp_max; if (update_hwp_desired) req.hwp_desired = req_update.hwp_desired; if (update_hwp_window) req.hwp_window = req_update.hwp_window; if (update_hwp_epp) req.hwp_epp = req_update.hwp_epp; read_hwp_cap(cpu, &cap, MSR_HWP_CAPABILITIES); if (debug) print_hwp_cap(cpu, &cap, ""); if (!force) check_hwp_request_v_hwp_capabilities(cpu, &req, &cap); verify_hwp_req_self_consistency(cpu, &req); write_hwp_request(cpu, &req, msr_offset); if (debug) { read_hwp_request(cpu, &req, msr_offset); print_hwp_request_pkg(pkg, &req, "new: "); } return 0; } int enable_hwp_on_cpu(int cpu) { unsigned long long msr; get_msr(cpu, MSR_PM_ENABLE, &msr); put_msr(cpu, MSR_PM_ENABLE, 1); if (verbose) printf("cpu%d: MSR_PM_ENABLE old: %d new: %d\n", cpu, (unsigned int) msr, 1); return 0; } int update_cpu_msrs(int cpu) { unsigned long long msr; int epb; if (update_epb) { epb = get_epb(cpu); set_epb(cpu, new_epb); if (verbose) printf("cpu%d: ENERGY_PERF_BIAS old: %d new: %d\n", cpu, epb, (unsigned int) new_epb); } if (update_turbo) { int turbo_is_present_and_disabled; get_msr(cpu, MSR_IA32_MISC_ENABLE, &msr); turbo_is_present_and_disabled = ((msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE) != 0); if (turbo_update_value == 1) { if (turbo_is_present_and_disabled) { msr &= ~MSR_IA32_MISC_ENABLE_TURBO_DISABLE; put_msr(cpu, MSR_IA32_MISC_ENABLE, msr); if (verbose) printf("cpu%d: turbo ENABLE\n", cpu); } } else { /* * if "turbo_is_enabled" were known to be describe this cpu * then we could use it here to skip redundant disable requests. * but cpu may be in a different package, so we always write. */ msr |= MSR_IA32_MISC_ENABLE_TURBO_DISABLE; put_msr(cpu, MSR_IA32_MISC_ENABLE, msr); if (verbose) printf("cpu%d: turbo DISABLE\n", cpu); } } if (!has_hwp) return 0; if (!hwp_update_enabled()) return 0; update_hwp_request(cpu); return 0; } unsigned int get_pkg_num(int cpu) { FILE *fp; char pathname[128]; unsigned int pkg; int retval; sprintf(pathname, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); fp = fopen_or_die(pathname, "r"); retval = fscanf(fp, "%d\n", &pkg); if (retval != 1) errx(1, "%s: failed to parse", pathname); return pkg; } int set_max_cpu_pkg_num(int cpu) { unsigned int pkg; if (max_cpu_num < cpu) max_cpu_num = cpu; pkg = get_pkg_num(cpu); if (pkg >= MAX_PACKAGES) errx(1, "cpu%d: %d >= MAX_PACKAGES (%d)", cpu, pkg, MAX_PACKAGES); if (pkg > max_pkg_num) max_pkg_num = pkg; if ((pkg_present_set & (1ULL << pkg)) == 0) { pkg_present_set |= (1ULL << pkg); first_cpu_in_pkg[pkg] = cpu; } return 0; } int mark_cpu_present(int cpu) { CPU_SET_S(cpu, cpu_setsize, cpu_present_set); return 0; } /* * run func(cpu) on every cpu in /proc/stat * return max_cpu number */ int for_all_proc_cpus(int (func)(int)) { FILE *fp; int cpu_num; int retval; fp = fopen_or_die(proc_stat, "r"); retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); if (retval != 0) err(1, "%s: failed to parse format", proc_stat); while (1) { retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); if (retval != 1) break; retval = func(cpu_num); if (retval) { fclose(fp); return retval; } } fclose(fp); return 0; } void for_all_cpus_in_set(size_t set_size, cpu_set_t *cpu_set, int (func)(int)) { int cpu_num; for (cpu_num = 0; cpu_num <= max_cpu_num; ++cpu_num) if (CPU_ISSET_S(cpu_num, set_size, cpu_set)) func(cpu_num); } void init_data_structures(void) { for_all_proc_cpus(set_max_cpu_pkg_num); cpu_setsize = CPU_ALLOC_SIZE((max_cpu_num + 1)); cpu_present_set = CPU_ALLOC((max_cpu_num + 1)); if (cpu_present_set == NULL) err(3, "CPU_ALLOC"); CPU_ZERO_S(cpu_setsize, cpu_present_set); for_all_proc_cpus(mark_cpu_present); } /* clear has_hwp if it is not enable (or being enabled) */ void verify_hwp_is_enabled(void) { unsigned long long msr; if (!has_hwp) /* set in early_cpuid() */ return; /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */ get_msr(base_cpu, MSR_PM_ENABLE, &msr); if ((msr & 1) == 0) { fprintf(stderr, "HWP can be enabled using '--hwp-enable'\n"); has_hwp = 0; return; } } int req_update_bounds_check(void) { if (!hwp_update_enabled()) return 0; /* fail if min > max requested */ if ((update_hwp_max && update_hwp_min) && (req_update.hwp_min > req_update.hwp_max)) { printf("hwp-min %d > hwp_max %d\n", req_update.hwp_min, req_update.hwp_max); return -EINVAL; } /* fail if desired > max requestd */ if (req_update.hwp_desired && update_hwp_max && (req_update.hwp_desired > req_update.hwp_max)) { printf("hwp-desired cannot be greater than hwp_max\n"); return -EINVAL; } /* fail if desired < min requestd */ if (req_update.hwp_desired && update_hwp_min && (req_update.hwp_desired < req_update.hwp_min)) { printf("hwp-desired cannot be less than hwp_min\n"); return -EINVAL; } return 0; } void set_base_cpu(void) { base_cpu = sched_getcpu(); if (base_cpu < 0) err(-ENODEV, "No valid cpus found"); } void probe_dev_msr(void) { struct stat sb; char pathname[32]; sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); if (stat(pathname, &sb)) if (system("/sbin/modprobe msr > /dev/null 2>&1")) err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); } static void get_cpuid_or_exit(unsigned int leaf, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { if (!__get_cpuid(leaf, eax, ebx, ecx, edx)) errx(1, "Processor not supported\n"); } /* * early_cpuid() * initialize turbo_is_enabled, has_hwp, has_epb * before cmdline is parsed */ void early_cpuid(void) { unsigned int eax, ebx, ecx, edx; unsigned int fms, family, model; get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; if (family == 6 || family == 0xf) model += ((fms >> 16) & 0xf) << 4; if (model == 0x4F) { unsigned long long msr; get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); bdx_highest_ratio = msr & 0xFF; } get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx); turbo_is_enabled = (eax >> 1) & 1; has_hwp = (eax >> 7) & 1; has_epb = (ecx >> 3) & 1; } /* * parse_cpuid() * set * has_hwp, has_hwp_notify, has_hwp_activity_window, has_hwp_epp, has_hwp_request_pkg, has_epb */ void parse_cpuid(void) { unsigned int eax, ebx, ecx, edx, max_level; unsigned int fms, family, model, stepping; eax = ebx = ecx = edx = 0; get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx); if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) genuine_intel = 1; if (debug) fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", (char *)&ebx, (char *)&edx, (char *)&ecx); get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; if (family == 6 || family == 0xf) model += ((fms >> 16) & 0xf) << 4; if (debug) { fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n", max_level, family, model, stepping, family, model, stepping); fprintf(stderr, "CPUID(1): %s %s %s %s %s %s %s %s\n", ecx & (1 << 0) ? "SSE3" : "-", ecx & (1 << 3) ? "MONITOR" : "-", ecx & (1 << 7) ? "EIST" : "-", ecx & (1 << 8) ? "TM2" : "-", edx & (1 << 4) ? "TSC" : "-", edx & (1 << 5) ? "MSR" : "-", edx & (1 << 22) ? "ACPI-TM" : "-", edx & (1 << 29) ? "TM" : "-"); } if (!(edx & (1 << 5))) errx(1, "CPUID: no MSR"); get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx); /* turbo_is_enabled already set */ /* has_hwp already set */ has_hwp_notify = eax & (1 << 8); has_hwp_activity_window = eax & (1 << 9); has_hwp_epp = eax & (1 << 10); has_hwp_request_pkg = eax & (1 << 11); if (!has_hwp_request_pkg && update_hwp_use_pkg) errx(1, "--hwp-use-pkg is not available on this hardware"); /* has_epb already set */ if (debug) fprintf(stderr, "CPUID(6): %sTURBO, %sHWP, %sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n", turbo_is_enabled ? "" : "No-", has_hwp ? "" : "No-", has_hwp_notify ? "" : "No-", has_hwp_activity_window ? "" : "No-", has_hwp_epp ? "" : "No-", has_hwp_request_pkg ? "" : "No-", has_epb ? "" : "No-"); return; /* success */ } int main(int argc, char **argv) { set_base_cpu(); probe_dev_msr(); init_data_structures(); early_cpuid(); /* initial cpuid parse before cmdline */ cmdline(argc, argv); if (debug) print_version(); parse_cpuid(); /* If CPU-set and PKG-set are not initialized, default to all CPUs */ if ((cpu_selected_set == 0) && (pkg_selected_set == 0)) cpu_selected_set = cpu_present_set; /* * If HWP is being enabled, do it now, so that subsequent operations * that access HWP registers can work. */ if (update_hwp_enable) for_all_cpus_in_set(cpu_setsize, cpu_selected_set, enable_hwp_on_cpu); /* If HWP present, but disabled, warn and ignore from here forward */ verify_hwp_is_enabled(); if (req_update_bounds_check()) return -EINVAL; /* display information only, no updates to settings */ if (!update_epb && !update_turbo && !hwp_update_enabled()) { if (cpu_selected_set) for_all_cpus_in_set(cpu_setsize, cpu_selected_set, print_cpu_msrs); if (has_hwp_request_pkg) { if (pkg_selected_set == 0) pkg_selected_set = pkg_present_set; for_packages(pkg_selected_set, print_pkg_msrs); } return 0; } /* update CPU set */ if (cpu_selected_set) { for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_sysfs); for_all_cpus_in_set(cpu_setsize, cpu_selected_set, update_cpu_msrs); } else if (pkg_selected_set) for_packages(pkg_selected_set, update_hwp_request_pkg); return 0; }
linux-master
tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
// SPDX-License-Identifier: GPL-2.0-only /* * turbostat -- show CPU frequency and C-state residency * on modern Intel and AMD processors. * * Copyright (c) 2023 Intel Corporation. * Len Brown <[email protected]> */ #define _GNU_SOURCE #include MSRHEADER #include INTEL_FAMILY_HEADER #include <stdarg.h> #include <stdio.h> #include <err.h> #include <unistd.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/stat.h> #include <sys/select.h> #include <sys/resource.h> #include <fcntl.h> #include <signal.h> #include <sys/time.h> #include <stdlib.h> #include <getopt.h> #include <dirent.h> #include <string.h> #include <ctype.h> #include <sched.h> #include <time.h> #include <cpuid.h> #include <sys/capability.h> #include <errno.h> #include <math.h> #include <linux/perf_event.h> #include <asm/unistd.h> #include <stdbool.h> #define UNUSED(x) (void)(x) /* * This list matches the column headers, except * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time * 2. Core and CPU are moved to the end, we can't have strings that contain them * matching on them for --show and --hide. */ /* * buffer size used by sscanf() for added column names * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters */ #define NAME_BYTES 20 #define PATH_BYTES 128 enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE }; enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC }; enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT }; struct msr_counter { unsigned int msr_num; char name[NAME_BYTES]; char path[PATH_BYTES]; unsigned int width; enum counter_type type; enum counter_format format; struct msr_counter *next; unsigned int flags; #define FLAGS_HIDE (1 << 0) #define FLAGS_SHOW (1 << 1) #define SYSFS_PERCPU (1 << 1) }; struct msr_counter bic[] = { { 0x0, "usec", "", 0, 0, 0, NULL, 0 }, { 0x0, "Time_Of_Day_Seconds", "", 0, 0, 0, NULL, 0 }, { 0x0, "Package", "", 0, 0, 0, NULL, 0 }, { 0x0, "Node", "", 0, 0, 0, NULL, 0 }, { 0x0, "Avg_MHz", "", 0, 0, 0, NULL, 0 }, { 0x0, "Busy%", "", 0, 0, 0, NULL, 0 }, { 0x0, "Bzy_MHz", "", 0, 0, 0, NULL, 0 }, { 0x0, "TSC_MHz", "", 0, 0, 0, NULL, 0 }, { 0x0, "IRQ", "", 0, 0, 0, NULL, 0 }, { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL, 0 }, { 0x0, "sysfs", "", 0, 0, 0, NULL, 0 }, { 0x0, "CPU%c1", "", 0, 0, 0, NULL, 0 }, { 0x0, "CPU%c3", "", 0, 0, 0, NULL, 0 }, { 0x0, "CPU%c6", "", 0, 0, 0, NULL, 0 }, { 0x0, "CPU%c7", "", 0, 0, 0, NULL, 0 }, { 0x0, "ThreadC", "", 0, 0, 0, NULL, 0 }, { 0x0, "CoreTmp", "", 0, 0, 0, NULL, 0 }, { 0x0, "CoreCnt", "", 0, 0, 0, NULL, 0 }, { 0x0, "PkgTmp", "", 0, 0, 0, NULL, 0 }, { 0x0, "GFX%rc6", "", 0, 0, 0, NULL, 0 }, { 0x0, "GFXMHz", "", 0, 0, 0, NULL, 0 }, { 0x0, "Pkg%pc2", "", 0, 0, 0, NULL, 0 }, { 0x0, "Pkg%pc3", "", 0, 0, 0, NULL, 0 }, { 0x0, "Pkg%pc6", "", 0, 0, 0, NULL, 0 }, { 0x0, "Pkg%pc7", "", 0, 0, 0, NULL, 0 }, { 0x0, "Pkg%pc8", "", 0, 0, 0, NULL, 0 }, { 0x0, "Pkg%pc9", "", 0, 0, 0, NULL, 0 }, { 0x0, "Pk%pc10", "", 0, 0, 0, NULL, 0 }, { 0x0, "CPU%LPI", "", 0, 0, 0, NULL, 0 }, { 0x0, "SYS%LPI", "", 0, 0, 0, NULL, 0 }, { 0x0, "PkgWatt", "", 0, 0, 0, NULL, 0 }, { 0x0, "CorWatt", "", 0, 0, 0, NULL, 0 }, { 0x0, "GFXWatt", "", 0, 0, 0, NULL, 0 }, { 0x0, "PkgCnt", "", 0, 0, 0, NULL, 0 }, { 0x0, "RAMWatt", "", 0, 0, 0, NULL, 0 }, { 0x0, "PKG_%", "", 0, 0, 0, NULL, 0 }, { 0x0, "RAM_%", "", 0, 0, 0, NULL, 0 }, { 0x0, "Pkg_J", "", 0, 0, 0, NULL, 0 }, { 0x0, "Cor_J", "", 0, 0, 0, NULL, 0 }, { 0x0, "GFX_J", "", 0, 0, 0, NULL, 0 }, { 0x0, "RAM_J", "", 0, 0, 0, NULL, 0 }, { 0x0, "Mod%c6", "", 0, 0, 0, NULL, 0 }, { 0x0, "Totl%C0", "", 0, 0, 0, NULL, 0 }, { 0x0, "Any%C0", "", 0, 0, 0, NULL, 0 }, { 0x0, "GFX%C0", "", 0, 0, 0, NULL, 0 }, { 0x0, "CPUGFX%", "", 0, 0, 0, NULL, 0 }, { 0x0, "Core", "", 0, 0, 0, NULL, 0 }, { 0x0, "CPU", "", 0, 0, 0, NULL, 0 }, { 0x0, "APIC", "", 0, 0, 0, NULL, 0 }, { 0x0, "X2APIC", "", 0, 0, 0, NULL, 0 }, { 0x0, "Die", "", 0, 0, 0, NULL, 0 }, { 0x0, "GFXAMHz", "", 0, 0, 0, NULL, 0 }, { 0x0, "IPC", "", 0, 0, 0, NULL, 0 }, { 0x0, "CoreThr", "", 0, 0, 0, NULL, 0 }, { 0x0, "UncMHz", "", 0, 0, 0, NULL, 0 }, }; #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) #define BIC_USEC (1ULL << 0) #define BIC_TOD (1ULL << 1) #define BIC_Package (1ULL << 2) #define BIC_Node (1ULL << 3) #define BIC_Avg_MHz (1ULL << 4) #define BIC_Busy (1ULL << 5) #define BIC_Bzy_MHz (1ULL << 6) #define BIC_TSC_MHz (1ULL << 7) #define BIC_IRQ (1ULL << 8) #define BIC_SMI (1ULL << 9) #define BIC_sysfs (1ULL << 10) #define BIC_CPU_c1 (1ULL << 11) #define BIC_CPU_c3 (1ULL << 12) #define BIC_CPU_c6 (1ULL << 13) #define BIC_CPU_c7 (1ULL << 14) #define BIC_ThreadC (1ULL << 15) #define BIC_CoreTmp (1ULL << 16) #define BIC_CoreCnt (1ULL << 17) #define BIC_PkgTmp (1ULL << 18) #define BIC_GFX_rc6 (1ULL << 19) #define BIC_GFXMHz (1ULL << 20) #define BIC_Pkgpc2 (1ULL << 21) #define BIC_Pkgpc3 (1ULL << 22) #define BIC_Pkgpc6 (1ULL << 23) #define BIC_Pkgpc7 (1ULL << 24) #define BIC_Pkgpc8 (1ULL << 25) #define BIC_Pkgpc9 (1ULL << 26) #define BIC_Pkgpc10 (1ULL << 27) #define BIC_CPU_LPI (1ULL << 28) #define BIC_SYS_LPI (1ULL << 29) #define BIC_PkgWatt (1ULL << 30) #define BIC_CorWatt (1ULL << 31) #define BIC_GFXWatt (1ULL << 32) #define BIC_PkgCnt (1ULL << 33) #define BIC_RAMWatt (1ULL << 34) #define BIC_PKG__ (1ULL << 35) #define BIC_RAM__ (1ULL << 36) #define BIC_Pkg_J (1ULL << 37) #define BIC_Cor_J (1ULL << 38) #define BIC_GFX_J (1ULL << 39) #define BIC_RAM_J (1ULL << 40) #define BIC_Mod_c6 (1ULL << 41) #define BIC_Totl_c0 (1ULL << 42) #define BIC_Any_c0 (1ULL << 43) #define BIC_GFX_c0 (1ULL << 44) #define BIC_CPUGFX (1ULL << 45) #define BIC_Core (1ULL << 46) #define BIC_CPU (1ULL << 47) #define BIC_APIC (1ULL << 48) #define BIC_X2APIC (1ULL << 49) #define BIC_Die (1ULL << 50) #define BIC_GFXACTMHz (1ULL << 51) #define BIC_IPC (1ULL << 52) #define BIC_CORE_THROT_CNT (1ULL << 53) #define BIC_UNCORE_MHZ (1ULL << 54) #define BIC_TOPOLOGY (BIC_Package | BIC_Node | BIC_CoreCnt | BIC_PkgCnt | BIC_Core | BIC_CPU | BIC_Die ) #define BIC_THERMAL_PWR ( BIC_CoreTmp | BIC_PkgTmp | BIC_PkgWatt | BIC_CorWatt | BIC_GFXWatt | BIC_RAMWatt | BIC_PKG__ | BIC_RAM__) #define BIC_FREQUENCY ( BIC_Avg_MHz | BIC_Busy | BIC_Bzy_MHz | BIC_TSC_MHz | BIC_GFXMHz | BIC_GFXACTMHz | BIC_UNCORE_MHZ) #define BIC_IDLE ( BIC_sysfs | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX) #define BIC_OTHER ( BIC_IRQ | BIC_SMI | BIC_ThreadC | BIC_CoreTmp | BIC_IPC) #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT); unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) #define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME) #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) #define BIC_IS_ENABLED(COUNTER_BIT) (bic_enabled & COUNTER_BIT) char *proc_stat = "/proc/stat"; FILE *outf; int *fd_percpu; int *fd_instr_count_percpu; struct timeval interval_tv = { 5, 0 }; struct timespec interval_ts = { 5, 0 }; /* Save original CPU model */ unsigned int model_orig; unsigned int num_iterations; unsigned int header_iterations; unsigned int debug; unsigned int quiet; unsigned int shown; unsigned int sums_need_wide_columns; unsigned int rapl_joules; unsigned int summary_only; unsigned int list_header_only; unsigned int dump_only; unsigned int do_snb_cstates; unsigned int do_knl_cstates; unsigned int do_slm_cstates; unsigned int use_c1_residency_msr; unsigned int has_aperf; unsigned int has_epb; unsigned int has_turbo; unsigned int is_hybrid; unsigned int do_irtl_snb; unsigned int do_irtl_hsw; unsigned int units = 1000000; /* MHz etc */ unsigned int genuine_intel; unsigned int authentic_amd; unsigned int hygon_genuine; unsigned int max_level, max_extended_level; unsigned int has_invariant_tsc; unsigned int do_nhm_platform_info; unsigned int no_MSR_MISC_PWR_MGMT; unsigned int aperf_mperf_multiplier = 1; double bclk; double base_hz; unsigned int has_base_hz; double tsc_tweak = 1.0; unsigned int show_pkg_only; unsigned int show_core_only; char *output_buffer, *outp; unsigned int do_rapl; unsigned int do_dts; unsigned int do_ptm; unsigned int do_ipc; unsigned long long gfx_cur_rc6_ms; unsigned long long cpuidle_cur_cpu_lpi_us; unsigned long long cpuidle_cur_sys_lpi_us; unsigned int gfx_cur_mhz; unsigned int gfx_act_mhz; unsigned int tj_max; unsigned int tj_max_override; int tcc_offset_bits; double rapl_power_units, rapl_time_units; double rapl_dram_energy_units, rapl_energy_units; double rapl_joule_counter_range; unsigned int do_core_perf_limit_reasons; unsigned int has_automatic_cstate_conversion; unsigned int dis_cstate_prewake; unsigned int do_gfx_perf_limit_reasons; unsigned int do_ring_perf_limit_reasons; unsigned int crystal_hz; unsigned long long tsc_hz; int base_cpu; double discover_bclk(unsigned int family, unsigned int model); unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ unsigned int has_hwp_notify; /* IA32_HWP_INTERRUPT */ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */ unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ unsigned int has_misc_feature_control; unsigned int first_counter_read = 1; int ignore_stdin; #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ /* 0x611 MSR_PKG_ENERGY_STATUS */ #define RAPL_PKG_PERF_STATUS (1 << 1) /* 0x613 MSR_PKG_PERF_STATUS */ #define RAPL_PKG_POWER_INFO (1 << 2) /* 0x614 MSR_PKG_POWER_INFO */ #define RAPL_DRAM (1 << 3) /* 0x618 MSR_DRAM_POWER_LIMIT */ /* 0x619 MSR_DRAM_ENERGY_STATUS */ #define RAPL_DRAM_PERF_STATUS (1 << 4) /* 0x61b MSR_DRAM_PERF_STATUS */ #define RAPL_DRAM_POWER_INFO (1 << 5) /* 0x61c MSR_DRAM_POWER_INFO */ #define RAPL_CORES_POWER_LIMIT (1 << 6) /* 0x638 MSR_PP0_POWER_LIMIT */ #define RAPL_CORE_POLICY (1 << 7) /* 0x63a MSR_PP0_POLICY */ #define RAPL_GFX (1 << 8) /* 0x640 MSR_PP1_POWER_LIMIT */ /* 0x641 MSR_PP1_ENERGY_STATUS */ /* 0x642 MSR_PP1_POLICY */ #define RAPL_CORES_ENERGY_STATUS (1 << 9) /* 0x639 MSR_PP0_ENERGY_STATUS */ #define RAPL_PER_CORE_ENERGY (1 << 10) /* Indicates cores energy collection is per-core, * not per-package. */ #define RAPL_AMD_F17H (1 << 11) /* 0xc0010299 MSR_RAPL_PWR_UNIT */ /* 0xc001029a MSR_CORE_ENERGY_STAT */ /* 0xc001029b MSR_PKG_ENERGY_STAT */ #define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT) #define TJMAX_DEFAULT 100 /* MSRs that are not yet in the kernel-provided header. */ #define MSR_RAPL_PWR_UNIT 0xc0010299 #define MSR_CORE_ENERGY_STAT 0xc001029a #define MSR_PKG_ENERGY_STAT 0xc001029b #define MAX(a, b) ((a) > (b) ? (a) : (b)) int backwards_count; char *progname; #define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */ cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_subset; size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size; #define MAX_ADDED_COUNTERS 8 #define MAX_ADDED_THREAD_COUNTERS 24 #define BITMASK_SIZE 32 struct thread_data { struct timeval tv_begin; struct timeval tv_end; struct timeval tv_delta; unsigned long long tsc; unsigned long long aperf; unsigned long long mperf; unsigned long long c1; unsigned long long instr_count; unsigned long long irq_count; unsigned int smi_count; unsigned int cpu_id; unsigned int apic_id; unsigned int x2apic_id; unsigned int flags; bool is_atom; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 unsigned long long counter[MAX_ADDED_THREAD_COUNTERS]; } *thread_even, *thread_odd; struct core_data { unsigned long long c3; unsigned long long c6; unsigned long long c7; unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ unsigned int core_temp_c; unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */ unsigned int core_id; unsigned long long core_throt_cnt; unsigned long long counter[MAX_ADDED_COUNTERS]; } *core_even, *core_odd; struct pkg_data { unsigned long long pc2; unsigned long long pc3; unsigned long long pc6; unsigned long long pc7; unsigned long long pc8; unsigned long long pc9; unsigned long long pc10; unsigned long long cpu_lpi; unsigned long long sys_lpi; unsigned long long pkg_wtd_core_c0; unsigned long long pkg_any_core_c0; unsigned long long pkg_any_gfxe_c0; unsigned long long pkg_both_core_gfxe_c0; long long gfx_rc6_ms; unsigned int gfx_mhz; unsigned int gfx_act_mhz; unsigned int package_id; unsigned long long energy_pkg; /* MSR_PKG_ENERGY_STATUS */ unsigned long long energy_dram; /* MSR_DRAM_ENERGY_STATUS */ unsigned long long energy_cores; /* MSR_PP0_ENERGY_STATUS */ unsigned long long energy_gfx; /* MSR_PP1_ENERGY_STATUS */ unsigned long long rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */ unsigned long long rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */ unsigned int pkg_temp_c; unsigned int uncore_mhz; unsigned long long counter[MAX_ADDED_COUNTERS]; } *package_even, *package_odd; #define ODD_COUNTERS thread_odd, core_odd, package_odd #define EVEN_COUNTERS thread_even, core_even, package_even #define GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no) \ ((thread_base) + \ ((pkg_no) * \ topo.nodes_per_pkg * topo.cores_per_node * topo.threads_per_core) + \ ((node_no) * topo.cores_per_node * topo.threads_per_core) + \ ((core_no) * topo.threads_per_core) + \ (thread_no)) #define GET_CORE(core_base, core_no, node_no, pkg_no) \ ((core_base) + \ ((pkg_no) * topo.nodes_per_pkg * topo.cores_per_node) + \ ((node_no) * topo.cores_per_node) + \ (core_no)) #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) /* * The accumulated sum of MSR is defined as a monotonic * increasing MSR, it will be accumulated periodically, * despite its register's bit width. */ enum { IDX_PKG_ENERGY, IDX_DRAM_ENERGY, IDX_PP0_ENERGY, IDX_PP1_ENERGY, IDX_PKG_PERF, IDX_DRAM_PERF, IDX_COUNT, }; int get_msr_sum(int cpu, off_t offset, unsigned long long *msr); struct msr_sum_array { /* get_msr_sum() = sum + (get_msr() - last) */ struct { /*The accumulated MSR value is updated by the timer */ unsigned long long sum; /*The MSR footprint recorded in last timer */ unsigned long long last; } entries[IDX_COUNT]; }; /* The percpu MSR sum array.*/ struct msr_sum_array *per_cpu_msr_sum; off_t idx_to_offset(int idx) { off_t offset; switch (idx) { case IDX_PKG_ENERGY: if (do_rapl & RAPL_AMD_F17H) offset = MSR_PKG_ENERGY_STAT; else offset = MSR_PKG_ENERGY_STATUS; break; case IDX_DRAM_ENERGY: offset = MSR_DRAM_ENERGY_STATUS; break; case IDX_PP0_ENERGY: offset = MSR_PP0_ENERGY_STATUS; break; case IDX_PP1_ENERGY: offset = MSR_PP1_ENERGY_STATUS; break; case IDX_PKG_PERF: offset = MSR_PKG_PERF_STATUS; break; case IDX_DRAM_PERF: offset = MSR_DRAM_PERF_STATUS; break; default: offset = -1; } return offset; } int offset_to_idx(off_t offset) { int idx; switch (offset) { case MSR_PKG_ENERGY_STATUS: case MSR_PKG_ENERGY_STAT: idx = IDX_PKG_ENERGY; break; case MSR_DRAM_ENERGY_STATUS: idx = IDX_DRAM_ENERGY; break; case MSR_PP0_ENERGY_STATUS: idx = IDX_PP0_ENERGY; break; case MSR_PP1_ENERGY_STATUS: idx = IDX_PP1_ENERGY; break; case MSR_PKG_PERF_STATUS: idx = IDX_PKG_PERF; break; case MSR_DRAM_PERF_STATUS: idx = IDX_DRAM_PERF; break; default: idx = -1; } return idx; } int idx_valid(int idx) { switch (idx) { case IDX_PKG_ENERGY: return do_rapl & (RAPL_PKG | RAPL_AMD_F17H); case IDX_DRAM_ENERGY: return do_rapl & RAPL_DRAM; case IDX_PP0_ENERGY: return do_rapl & RAPL_CORES_ENERGY_STATUS; case IDX_PP1_ENERGY: return do_rapl & RAPL_GFX; case IDX_PKG_PERF: return do_rapl & RAPL_PKG_PERF_STATUS; case IDX_DRAM_PERF: return do_rapl & RAPL_DRAM_PERF_STATUS; default: return 0; } } struct sys_counters { unsigned int added_thread_counters; unsigned int added_core_counters; unsigned int added_package_counters; struct msr_counter *tp; struct msr_counter *cp; struct msr_counter *pp; } sys; struct system_summary { struct thread_data threads; struct core_data cores; struct pkg_data packages; } average; struct cpu_topology { int physical_package_id; int die_id; int logical_cpu_id; int physical_node_id; int logical_node_id; /* 0-based count within the package */ int physical_core_id; int thread_id; cpu_set_t *put_ids; /* Processing Unit/Thread IDs */ } *cpus; struct topo_params { int num_packages; int num_die; int num_cpus; int num_cores; int max_cpu_num; int max_node_num; int nodes_per_pkg; int cores_per_node; int threads_per_core; } topo; struct timeval tv_even, tv_odd, tv_delta; int *irq_column_2_cpu; /* /proc/interrupts column numbers */ int *irqs_per_cpu; /* indexed by cpu_num */ void setup_all_buffers(void); char *sys_lpi_file; char *sys_lpi_file_sysfs = "/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us"; char *sys_lpi_file_debugfs = "/sys/kernel/debug/pmc_core/slp_s0_residency_usec"; int cpu_is_not_present(int cpu) { return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set); } /* * run func(thread, core, package) in topology order * skip non-present cpus */ int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pkg_data *), struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) { int retval, pkg_no, core_no, thread_no, node_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) { for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { for (thread_no = 0; thread_no < topo.threads_per_core; ++thread_no) { struct thread_data *t; struct core_data *c; struct pkg_data *p; t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no); if (cpu_is_not_present(t->cpu_id)) continue; c = GET_CORE(core_base, core_no, node_no, pkg_no); p = GET_PKG(pkg_base, pkg_no); retval = func(t, c, p); if (retval) return retval; } } } } return 0; } int cpu_migrate(int cpu) { CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) return -1; else return 0; } int get_msr_fd(int cpu) { char pathname[32]; int fd; fd = fd_percpu[cpu]; if (fd) return fd; sprintf(pathname, "/dev/cpu/%d/msr", cpu); fd = open(pathname, O_RDONLY); if (fd < 0) err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname); fd_percpu[cpu] = fd; return fd; } static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags) { return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags); } static int perf_instr_count_open(int cpu_num) { struct perf_event_attr pea; int fd; memset(&pea, 0, sizeof(struct perf_event_attr)); pea.type = PERF_TYPE_HARDWARE; pea.size = sizeof(struct perf_event_attr); pea.config = PERF_COUNT_HW_INSTRUCTIONS; /* counter for cpu_num, including user + kernel and all processes */ fd = perf_event_open(&pea, -1, cpu_num, -1, 0); if (fd == -1) { warnx("capget(CAP_PERFMON) failed, try \"# setcap cap_sys_admin=ep %s\"", progname); BIC_NOT_PRESENT(BIC_IPC); } return fd; } int get_instr_count_fd(int cpu) { if (fd_instr_count_percpu[cpu]) return fd_instr_count_percpu[cpu]; fd_instr_count_percpu[cpu] = perf_instr_count_open(cpu); return fd_instr_count_percpu[cpu]; } int get_msr(int cpu, off_t offset, unsigned long long *msr) { ssize_t retval; retval = pread(get_msr_fd(cpu), msr, sizeof(*msr), offset); if (retval != sizeof *msr) err(-1, "cpu%d: msr offset 0x%llx read failed", cpu, (unsigned long long)offset); return 0; } #define MAX_DEFERRED 16 char *deferred_add_names[MAX_DEFERRED]; char *deferred_skip_names[MAX_DEFERRED]; int deferred_add_index; int deferred_skip_index; /* * HIDE_LIST - hide this list of counters, show the rest [default] * SHOW_LIST - show this list of counters, hide the rest */ enum show_hide_mode { SHOW_LIST, HIDE_LIST } global_show_hide_mode = HIDE_LIST; void help(void) { fprintf(outf, "Usage: turbostat [OPTIONS][(--interval seconds) | COMMAND ...]\n" "\n" "Turbostat forks the specified COMMAND and prints statistics\n" "when COMMAND completes.\n" "If no COMMAND is specified, turbostat wakes every 5-seconds\n" "to print statistics, until interrupted.\n" " -a, --add add a counter\n" " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n" " -c, --cpu cpu-set limit output to summary plus cpu-set:\n" " {core | package | j,k,l..m,n-p }\n" " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n" " -D, --Dump displays the raw counter values\n" " -e, --enable [all | column]\n" " shows all or the specified disabled column\n" " -H, --hide [column|column,column,...]\n" " hide the specified column(s)\n" " -i, --interval sec.subsec\n" " Override default 5-second measurement interval\n" " -J, --Joules displays energy in Joules instead of Watts\n" " -l, --list list column headers only\n" " -n, --num_iterations num\n" " number of the measurement iterations\n" " -N, --header_iterations num\n" " print header every num iterations\n" " -o, --out file\n" " create or truncate \"file\" for all output\n" " -q, --quiet skip decoding system configuration header\n" " -s, --show [column|column,column,...]\n" " show only the specified column(s)\n" " -S, --Summary\n" " limits output to 1-line system summary per interval\n" " -T, --TCC temperature\n" " sets the Thermal Control Circuit temperature in\n" " degrees Celsius\n" " -h, --help print this help message\n" " -v, --version print version information\n" "\n" "For more help, run \"man turbostat\"\n"); } /* * bic_lookup * for all the strings in comma separate name_list, * set the approprate bit in return value. */ unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode) { unsigned int i; unsigned long long retval = 0; while (name_list) { char *comma; comma = strchr(name_list, ','); if (comma) *comma = '\0'; for (i = 0; i < MAX_BIC; ++i) { if (!strcmp(name_list, bic[i].name)) { retval |= (1ULL << i); break; } if (!strcmp(name_list, "all")) { retval |= ~0; break; } else if (!strcmp(name_list, "topology")) { retval |= BIC_TOPOLOGY; break; } else if (!strcmp(name_list, "power")) { retval |= BIC_THERMAL_PWR; break; } else if (!strcmp(name_list, "idle")) { retval |= BIC_IDLE; break; } else if (!strcmp(name_list, "frequency")) { retval |= BIC_FREQUENCY; break; } else if (!strcmp(name_list, "other")) { retval |= BIC_OTHER; break; } } if (i == MAX_BIC) { if (mode == SHOW_LIST) { deferred_add_names[deferred_add_index++] = name_list; if (deferred_add_index >= MAX_DEFERRED) { fprintf(stderr, "More than max %d un-recognized --add options '%s'\n", MAX_DEFERRED, name_list); help(); exit(1); } } else { deferred_skip_names[deferred_skip_index++] = name_list; if (debug) fprintf(stderr, "deferred \"%s\"\n", name_list); if (deferred_skip_index >= MAX_DEFERRED) { fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n", MAX_DEFERRED, name_list); help(); exit(1); } } } name_list = comma; if (name_list) name_list++; } return retval; } void print_header(char *delim) { struct msr_counter *mp; int printed = 0; if (DO_BIC(BIC_USEC)) outp += sprintf(outp, "%susec", (printed++ ? delim : "")); if (DO_BIC(BIC_TOD)) outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : "")); if (DO_BIC(BIC_Package)) outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); if (DO_BIC(BIC_Die)) outp += sprintf(outp, "%sDie", (printed++ ? delim : "")); if (DO_BIC(BIC_Node)) outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); if (DO_BIC(BIC_Core)) outp += sprintf(outp, "%sCore", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU)) outp += sprintf(outp, "%sCPU", (printed++ ? delim : "")); if (DO_BIC(BIC_APIC)) outp += sprintf(outp, "%sAPIC", (printed++ ? delim : "")); if (DO_BIC(BIC_X2APIC)) outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : "")); if (DO_BIC(BIC_Avg_MHz)) outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : "")); if (DO_BIC(BIC_Busy)) outp += sprintf(outp, "%sBusy%%", (printed++ ? delim : "")); if (DO_BIC(BIC_Bzy_MHz)) outp += sprintf(outp, "%sBzy_MHz", (printed++ ? delim : "")); if (DO_BIC(BIC_TSC_MHz)) outp += sprintf(outp, "%sTSC_MHz", (printed++ ? delim : "")); if (DO_BIC(BIC_IPC)) outp += sprintf(outp, "%sIPC", (printed++ ? delim : "")); if (DO_BIC(BIC_IRQ)) { if (sums_need_wide_columns) outp += sprintf(outp, "%s IRQ", (printed++ ? delim : "")); else outp += sprintf(outp, "%sIRQ", (printed++ ? delim : "")); } if (DO_BIC(BIC_SMI)) outp += sprintf(outp, "%sSMI", (printed++ ? delim : "")); for (mp = sys.tp; mp; mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 64) outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), mp->name); else outp += sprintf(outp, "%s%10.10s", (printed++ ? delim : ""), mp->name); } else { if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) outp += sprintf(outp, "%s%8s", (printed++ ? delim : ""), mp->name); else outp += sprintf(outp, "%s%s", (printed++ ? delim : ""), mp->name); } } if (DO_BIC(BIC_CPU_c1)) outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU_c3)) outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU_c6)) outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU_c7)) outp += sprintf(outp, "%sCPU%%c7", (printed++ ? delim : "")); if (DO_BIC(BIC_Mod_c6)) outp += sprintf(outp, "%sMod%%c6", (printed++ ? delim : "")); if (DO_BIC(BIC_CoreTmp)) outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); if (DO_BIC(BIC_CORE_THROT_CNT)) outp += sprintf(outp, "%sCoreThr", (printed++ ? delim : "")); if (do_rapl && !rapl_joules) { if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); } else if (do_rapl && rapl_joules) { if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); } for (mp = sys.cp; mp; mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 64) outp += sprintf(outp, "%s%18.18s", delim, mp->name); else outp += sprintf(outp, "%s%10.10s", delim, mp->name); } else { if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) outp += sprintf(outp, "%s%8s", delim, mp->name); else outp += sprintf(outp, "%s%s", delim, mp->name); } } if (DO_BIC(BIC_PkgTmp)) outp += sprintf(outp, "%sPkgTmp", (printed++ ? delim : "")); if (DO_BIC(BIC_GFX_rc6)) outp += sprintf(outp, "%sGFX%%rc6", (printed++ ? delim : "")); if (DO_BIC(BIC_GFXMHz)) outp += sprintf(outp, "%sGFXMHz", (printed++ ? delim : "")); if (DO_BIC(BIC_GFXACTMHz)) outp += sprintf(outp, "%sGFXAMHz", (printed++ ? delim : "")); if (DO_BIC(BIC_Totl_c0)) outp += sprintf(outp, "%sTotl%%C0", (printed++ ? delim : "")); if (DO_BIC(BIC_Any_c0)) outp += sprintf(outp, "%sAny%%C0", (printed++ ? delim : "")); if (DO_BIC(BIC_GFX_c0)) outp += sprintf(outp, "%sGFX%%C0", (printed++ ? delim : "")); if (DO_BIC(BIC_CPUGFX)) outp += sprintf(outp, "%sCPUGFX%%", (printed++ ? delim : "")); if (DO_BIC(BIC_Pkgpc2)) outp += sprintf(outp, "%sPkg%%pc2", (printed++ ? delim : "")); if (DO_BIC(BIC_Pkgpc3)) outp += sprintf(outp, "%sPkg%%pc3", (printed++ ? delim : "")); if (DO_BIC(BIC_Pkgpc6)) outp += sprintf(outp, "%sPkg%%pc6", (printed++ ? delim : "")); if (DO_BIC(BIC_Pkgpc7)) outp += sprintf(outp, "%sPkg%%pc7", (printed++ ? delim : "")); if (DO_BIC(BIC_Pkgpc8)) outp += sprintf(outp, "%sPkg%%pc8", (printed++ ? delim : "")); if (DO_BIC(BIC_Pkgpc9)) outp += sprintf(outp, "%sPkg%%pc9", (printed++ ? delim : "")); if (DO_BIC(BIC_Pkgpc10)) outp += sprintf(outp, "%sPk%%pc10", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU_LPI)) outp += sprintf(outp, "%sCPU%%LPI", (printed++ ? delim : "")); if (DO_BIC(BIC_SYS_LPI)) outp += sprintf(outp, "%sSYS%%LPI", (printed++ ? delim : "")); if (do_rapl && !rapl_joules) { if (DO_BIC(BIC_PkgWatt)) outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); if (DO_BIC(BIC_GFXWatt)) outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); if (DO_BIC(BIC_RAMWatt)) outp += sprintf(outp, "%sRAMWatt", (printed++ ? delim : "")); if (DO_BIC(BIC_PKG__)) outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : "")); if (DO_BIC(BIC_RAM__)) outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : "")); } else if (do_rapl && rapl_joules) { if (DO_BIC(BIC_Pkg_J)) outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); if (DO_BIC(BIC_GFX_J)) outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); if (DO_BIC(BIC_RAM_J)) outp += sprintf(outp, "%sRAM_J", (printed++ ? delim : "")); if (DO_BIC(BIC_PKG__)) outp += sprintf(outp, "%sPKG_%%", (printed++ ? delim : "")); if (DO_BIC(BIC_RAM__)) outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : "")); } if (DO_BIC(BIC_UNCORE_MHZ)) outp += sprintf(outp, "%sUncMHz", (printed++ ? delim : "")); for (mp = sys.pp; mp; mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 64) outp += sprintf(outp, "%s%18.18s", delim, mp->name); else outp += sprintf(outp, "%s%10.10s", delim, mp->name); } else { if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) outp += sprintf(outp, "%s%8s", delim, mp->name); else outp += sprintf(outp, "%s%s", delim, mp->name); } } outp += sprintf(outp, "\n"); } int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int i; struct msr_counter *mp; outp += sprintf(outp, "t %p, c %p, p %p\n", t, c, p); if (t) { outp += sprintf(outp, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags); outp += sprintf(outp, "TSC: %016llX\n", t->tsc); outp += sprintf(outp, "aperf: %016llX\n", t->aperf); outp += sprintf(outp, "mperf: %016llX\n", t->mperf); outp += sprintf(outp, "c1: %016llX\n", t->c1); if (DO_BIC(BIC_IPC)) outp += sprintf(outp, "IPC: %lld\n", t->instr_count); if (DO_BIC(BIC_IRQ)) outp += sprintf(outp, "IRQ: %lld\n", t->irq_count); if (DO_BIC(BIC_SMI)) outp += sprintf(outp, "SMI: %d\n", t->smi_count); for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { outp += sprintf(outp, "tADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, t->counter[i]); } } if (c) { outp += sprintf(outp, "core: %d\n", c->core_id); outp += sprintf(outp, "c3: %016llX\n", c->c3); outp += sprintf(outp, "c6: %016llX\n", c->c6); outp += sprintf(outp, "c7: %016llX\n", c->c7); outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); outp += sprintf(outp, "cpu_throt_count: %016llX\n", c->core_throt_cnt); outp += sprintf(outp, "Joules: %0X\n", c->core_energy); for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, c->counter[i]); } outp += sprintf(outp, "mc6_us: %016llX\n", c->mc6_us); } if (p) { outp += sprintf(outp, "package: %d\n", p->package_id); outp += sprintf(outp, "Weighted cores: %016llX\n", p->pkg_wtd_core_c0); outp += sprintf(outp, "Any cores: %016llX\n", p->pkg_any_core_c0); outp += sprintf(outp, "Any GFX: %016llX\n", p->pkg_any_gfxe_c0); outp += sprintf(outp, "CPU + GFX: %016llX\n", p->pkg_both_core_gfxe_c0); outp += sprintf(outp, "pc2: %016llX\n", p->pc2); if (DO_BIC(BIC_Pkgpc3)) outp += sprintf(outp, "pc3: %016llX\n", p->pc3); if (DO_BIC(BIC_Pkgpc6)) outp += sprintf(outp, "pc6: %016llX\n", p->pc6); if (DO_BIC(BIC_Pkgpc7)) outp += sprintf(outp, "pc7: %016llX\n", p->pc7); outp += sprintf(outp, "pc8: %016llX\n", p->pc8); outp += sprintf(outp, "pc9: %016llX\n", p->pc9); outp += sprintf(outp, "pc10: %016llX\n", p->pc10); outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi); outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi); outp += sprintf(outp, "Joules PKG: %0llX\n", p->energy_pkg); outp += sprintf(outp, "Joules COR: %0llX\n", p->energy_cores); outp += sprintf(outp, "Joules GFX: %0llX\n", p->energy_gfx); outp += sprintf(outp, "Joules RAM: %0llX\n", p->energy_dram); outp += sprintf(outp, "Throttle PKG: %0llX\n", p->rapl_pkg_perf_status); outp += sprintf(outp, "Throttle RAM: %0llX\n", p->rapl_dram_perf_status); outp += sprintf(outp, "PTM: %dC\n", p->pkg_temp_c); for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { outp += sprintf(outp, "pADDED [%d] msr0x%x: %08llX\n", i, mp->msr_num, p->counter[i]); } } outp += sprintf(outp, "\n"); return 0; } /* * column formatting convention & formats */ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { double interval_float, tsc; char *fmt8; int i; struct msr_counter *mp; char *delim = "\t"; int printed = 0; /* if showing only 1st thread in core and this isn't one, bail out */ if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; /* if showing only 1st thread in pkg and this isn't one, bail out */ if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; /*if not summary line and --cpu is used */ if ((t != &average.threads) && (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset))) return 0; if (DO_BIC(BIC_USEC)) { /* on each row, print how many usec each timestamp took to gather */ struct timeval tv; timersub(&t->tv_end, &t->tv_begin, &tv); outp += sprintf(outp, "%5ld\t", tv.tv_sec * 1000000 + tv.tv_usec); } /* Time_Of_Day_Seconds: on each row, print sec.usec last timestamp taken */ if (DO_BIC(BIC_TOD)) outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec); interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec / 1000000.0; tsc = t->tsc * tsc_tweak; /* topo columns, print blanks on 1st (average) line */ if (t == &average.threads) { if (DO_BIC(BIC_Package)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); if (DO_BIC(BIC_Die)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); if (DO_BIC(BIC_Node)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); if (DO_BIC(BIC_Core)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); if (DO_BIC(BIC_APIC)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); if (DO_BIC(BIC_X2APIC)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } else { if (DO_BIC(BIC_Package)) { if (p) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->package_id); else outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } if (DO_BIC(BIC_Die)) { if (c) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id); else outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } if (DO_BIC(BIC_Node)) { if (t) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].physical_node_id); else outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } if (DO_BIC(BIC_Core)) { if (c) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_id); else outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } if (DO_BIC(BIC_CPU)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id); if (DO_BIC(BIC_APIC)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id); if (DO_BIC(BIC_X2APIC)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id); } if (DO_BIC(BIC_Avg_MHz)) outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 / units * t->aperf / interval_float); if (DO_BIC(BIC_Busy)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->mperf / tsc); if (DO_BIC(BIC_Bzy_MHz)) { if (has_base_hz) outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), base_hz / units * t->aperf / t->mperf); else outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), tsc / units * t->aperf / t->mperf / interval_float); } if (DO_BIC(BIC_TSC_MHz)) outp += sprintf(outp, "%s%.0f", (printed++ ? delim : ""), 1.0 * t->tsc / units / interval_float); if (DO_BIC(BIC_IPC)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 1.0 * t->instr_count / t->aperf); /* IRQ */ if (DO_BIC(BIC_IRQ)) { if (sums_need_wide_columns) outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->irq_count); else outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->irq_count); } /* SMI */ if (DO_BIC(BIC_SMI)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->smi_count); /* Added counters */ for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)t->counter[i]); else outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), t->counter[i]); } else if (mp->format == FORMAT_DELTA) { if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), t->counter[i]); else outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), t->counter[i]); } else if (mp->format == FORMAT_PERCENT) { if (mp->type == COUNTER_USEC) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), t->counter[i] / interval_float / 10000); else outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->counter[i] / tsc); } } /* C1 */ if (DO_BIC(BIC_CPU_c1)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * t->c1 / tsc); /* print per-core data only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; if (DO_BIC(BIC_CPU_c3)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3 / tsc); if (DO_BIC(BIC_CPU_c6)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6 / tsc); if (DO_BIC(BIC_CPU_c7)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c7 / tsc); /* Mod%c6 */ if (DO_BIC(BIC_Mod_c6)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->mc6_us / tsc); if (DO_BIC(BIC_CoreTmp)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_temp_c); /* Core throttle count */ if (DO_BIC(BIC_CORE_THROT_CNT)) outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->core_throt_cnt); for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)c->counter[i]); else outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), c->counter[i]); } else if (mp->format == FORMAT_DELTA) { if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), c->counter[i]); else outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->counter[i]); } else if (mp->format == FORMAT_PERCENT) { outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->counter[i] / tsc); } } fmt8 = "%s%.2f"; if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float); if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units); /* print per-package data only for 1st core in package */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) goto done; /* PkgTmp */ if (DO_BIC(BIC_PkgTmp)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->pkg_temp_c); /* GFXrc6 */ if (DO_BIC(BIC_GFX_rc6)) { if (p->gfx_rc6_ms == -1) { /* detect GFX counter reset */ outp += sprintf(outp, "%s**.**", (printed++ ? delim : "")); } else { outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), p->gfx_rc6_ms / 10.0 / interval_float); } } /* GFXMHz */ if (DO_BIC(BIC_GFXMHz)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_mhz); /* GFXACTMHz */ if (DO_BIC(BIC_GFXACTMHz)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->gfx_act_mhz); /* Totl%C0, Any%C0 GFX%C0 CPUGFX% */ if (DO_BIC(BIC_Totl_c0)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_wtd_core_c0 / tsc); if (DO_BIC(BIC_Any_c0)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_core_c0 / tsc); if (DO_BIC(BIC_GFX_c0)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_any_gfxe_c0 / tsc); if (DO_BIC(BIC_CPUGFX)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pkg_both_core_gfxe_c0 / tsc); if (DO_BIC(BIC_Pkgpc2)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc2 / tsc); if (DO_BIC(BIC_Pkgpc3)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc3 / tsc); if (DO_BIC(BIC_Pkgpc6)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc6 / tsc); if (DO_BIC(BIC_Pkgpc7)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc7 / tsc); if (DO_BIC(BIC_Pkgpc8)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc8 / tsc); if (DO_BIC(BIC_Pkgpc9)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc9 / tsc); if (DO_BIC(BIC_Pkgpc10)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10 / tsc); if (DO_BIC(BIC_CPU_LPI)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->cpu_lpi / 1000000.0 / interval_float); if (DO_BIC(BIC_SYS_LPI)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); if (DO_BIC(BIC_PkgWatt)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); if (DO_BIC(BIC_GFXWatt)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); if (DO_BIC(BIC_RAMWatt)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); if (DO_BIC(BIC_Pkg_J)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); if (DO_BIC(BIC_GFX_J)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); if (DO_BIC(BIC_RAM_J)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units); if (DO_BIC(BIC_PKG__)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float); if (DO_BIC(BIC_RAM__)) outp += sprintf(outp, fmt8, (printed++ ? delim : ""), 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float); /* UncMHz */ if (DO_BIC(BIC_UNCORE_MHZ)) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->uncore_mhz); for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) { if (mp->width == 32) outp += sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)p->counter[i]); else outp += sprintf(outp, "%s0x%016llx", (printed++ ? delim : ""), p->counter[i]); } else if (mp->format == FORMAT_DELTA) { if ((mp->type == COUNTER_ITEMS) && sums_need_wide_columns) outp += sprintf(outp, "%s%8lld", (printed++ ? delim : ""), p->counter[i]); else outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), p->counter[i]); } else if (mp->format == FORMAT_PERCENT) { outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->counter[i] / tsc); } } done: if (*(outp - 1) != '\n') outp += sprintf(outp, "\n"); return 0; } void flush_output_stdout(void) { FILE *filep; if (outf == stderr) filep = stdout; else filep = outf; fputs(output_buffer, filep); fflush(filep); outp = output_buffer; } void flush_output_stderr(void) { fputs(output_buffer, outf); fflush(outf); outp = output_buffer; } void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { static int count; if ((!count || (header_iterations && !(count % header_iterations))) || !summary_only) print_header("\t"); format_counters(&average.threads, &average.cores, &average.packages); count++; if (summary_only) return; for_all_cpus(format_counters, t, c, p); } #define DELTA_WRAP32(new, old) \ old = ((((unsigned long long)new << 32) - ((unsigned long long)old << 32)) >> 32); int delta_package(struct pkg_data *new, struct pkg_data *old) { int i; struct msr_counter *mp; if (DO_BIC(BIC_Totl_c0)) old->pkg_wtd_core_c0 = new->pkg_wtd_core_c0 - old->pkg_wtd_core_c0; if (DO_BIC(BIC_Any_c0)) old->pkg_any_core_c0 = new->pkg_any_core_c0 - old->pkg_any_core_c0; if (DO_BIC(BIC_GFX_c0)) old->pkg_any_gfxe_c0 = new->pkg_any_gfxe_c0 - old->pkg_any_gfxe_c0; if (DO_BIC(BIC_CPUGFX)) old->pkg_both_core_gfxe_c0 = new->pkg_both_core_gfxe_c0 - old->pkg_both_core_gfxe_c0; old->pc2 = new->pc2 - old->pc2; if (DO_BIC(BIC_Pkgpc3)) old->pc3 = new->pc3 - old->pc3; if (DO_BIC(BIC_Pkgpc6)) old->pc6 = new->pc6 - old->pc6; if (DO_BIC(BIC_Pkgpc7)) old->pc7 = new->pc7 - old->pc7; old->pc8 = new->pc8 - old->pc8; old->pc9 = new->pc9 - old->pc9; old->pc10 = new->pc10 - old->pc10; old->cpu_lpi = new->cpu_lpi - old->cpu_lpi; old->sys_lpi = new->sys_lpi - old->sys_lpi; old->pkg_temp_c = new->pkg_temp_c; /* flag an error when rc6 counter resets/wraps */ if (old->gfx_rc6_ms > new->gfx_rc6_ms) old->gfx_rc6_ms = -1; else old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms; old->uncore_mhz = new->uncore_mhz; old->gfx_mhz = new->gfx_mhz; old->gfx_act_mhz = new->gfx_act_mhz; old->energy_pkg = new->energy_pkg - old->energy_pkg; old->energy_cores = new->energy_cores - old->energy_cores; old->energy_gfx = new->energy_gfx - old->energy_gfx; old->energy_dram = new->energy_dram - old->energy_dram; old->rapl_pkg_perf_status = new->rapl_pkg_perf_status - old->rapl_pkg_perf_status; old->rapl_dram_perf_status = new->rapl_dram_perf_status - old->rapl_dram_perf_status; for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) old->counter[i] = new->counter[i]; else old->counter[i] = new->counter[i] - old->counter[i]; } return 0; } void delta_core(struct core_data *new, struct core_data *old) { int i; struct msr_counter *mp; old->c3 = new->c3 - old->c3; old->c6 = new->c6 - old->c6; old->c7 = new->c7 - old->c7; old->core_temp_c = new->core_temp_c; old->core_throt_cnt = new->core_throt_cnt; old->mc6_us = new->mc6_us - old->mc6_us; DELTA_WRAP32(new->core_energy, old->core_energy); for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) old->counter[i] = new->counter[i]; else old->counter[i] = new->counter[i] - old->counter[i]; } } int soft_c1_residency_display(int bic) { if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr) return 0; return DO_BIC_READ(bic); } /* * old = new - old */ int delta_thread(struct thread_data *new, struct thread_data *old, struct core_data *core_delta) { int i; struct msr_counter *mp; /* we run cpuid just the 1st time, copy the results */ if (DO_BIC(BIC_APIC)) new->apic_id = old->apic_id; if (DO_BIC(BIC_X2APIC)) new->x2apic_id = old->x2apic_id; /* * the timestamps from start of measurement interval are in "old" * the timestamp from end of measurement interval are in "new" * over-write old w/ new so we can print end of interval values */ timersub(&new->tv_begin, &old->tv_begin, &old->tv_delta); old->tv_begin = new->tv_begin; old->tv_end = new->tv_end; old->tsc = new->tsc - old->tsc; /* check for TSC < 1 Mcycles over interval */ if (old->tsc < (1000 * 1000)) errx(-3, "Insanely slow TSC rate, TSC stops in idle?\n" "You can disable all c-states by booting with \"idle=poll\"\n" "or just the deep ones with \"processor.max_cstate=1\""); old->c1 = new->c1 - old->c1; if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || soft_c1_residency_display(BIC_Avg_MHz)) { if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { old->aperf = new->aperf - old->aperf; old->mperf = new->mperf - old->mperf; } else { return -1; } } if (use_c1_residency_msr) { /* * Some models have a dedicated C1 residency MSR, * which should be more accurate than the derivation below. */ } else { /* * As counter collection is not atomic, * it is possible for mperf's non-halted cycles + idle states * to exceed TSC's all cycles: show c1 = 0% in that case. */ if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak)) old->c1 = 0; else { /* normal case, derive c1 */ old->c1 = (old->tsc * tsc_tweak) - old->mperf - core_delta->c3 - core_delta->c6 - core_delta->c7; } } if (old->mperf == 0) { if (debug > 1) fprintf(outf, "cpu%d MPERF 0!\n", old->cpu_id); old->mperf = 1; /* divide by 0 protection */ } if (DO_BIC(BIC_IPC)) old->instr_count = new->instr_count - old->instr_count; if (DO_BIC(BIC_IRQ)) old->irq_count = new->irq_count - old->irq_count; if (DO_BIC(BIC_SMI)) old->smi_count = new->smi_count - old->smi_count; for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) old->counter[i] = new->counter[i]; else old->counter[i] = new->counter[i] - old->counter[i]; } return 0; } int delta_cpu(struct thread_data *t, struct core_data *c, struct pkg_data *p, struct thread_data *t2, struct core_data *c2, struct pkg_data *p2) { int retval = 0; /* calculate core delta only for 1st thread in core */ if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE) delta_core(c, c2); /* always calculate thread delta */ retval = delta_thread(t, t2, c2); /* c2 is core delta */ if (retval) return retval; /* calculate package delta only for 1st core in package */ if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE) retval = delta_package(p, p2); return retval; } void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int i; struct msr_counter *mp; t->tv_begin.tv_sec = 0; t->tv_begin.tv_usec = 0; t->tv_end.tv_sec = 0; t->tv_end.tv_usec = 0; t->tv_delta.tv_sec = 0; t->tv_delta.tv_usec = 0; t->tsc = 0; t->aperf = 0; t->mperf = 0; t->c1 = 0; t->instr_count = 0; t->irq_count = 0; t->smi_count = 0; /* tells format_counters to dump all fields from this set */ t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE; c->c3 = 0; c->c6 = 0; c->c7 = 0; c->mc6_us = 0; c->core_temp_c = 0; c->core_energy = 0; c->core_throt_cnt = 0; p->pkg_wtd_core_c0 = 0; p->pkg_any_core_c0 = 0; p->pkg_any_gfxe_c0 = 0; p->pkg_both_core_gfxe_c0 = 0; p->pc2 = 0; if (DO_BIC(BIC_Pkgpc3)) p->pc3 = 0; if (DO_BIC(BIC_Pkgpc6)) p->pc6 = 0; if (DO_BIC(BIC_Pkgpc7)) p->pc7 = 0; p->pc8 = 0; p->pc9 = 0; p->pc10 = 0; p->cpu_lpi = 0; p->sys_lpi = 0; p->energy_pkg = 0; p->energy_dram = 0; p->energy_cores = 0; p->energy_gfx = 0; p->rapl_pkg_perf_status = 0; p->rapl_dram_perf_status = 0; p->pkg_temp_c = 0; p->gfx_rc6_ms = 0; p->uncore_mhz = 0; p->gfx_mhz = 0; p->gfx_act_mhz = 0; for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) t->counter[i] = 0; for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) c->counter[i] = 0; for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) p->counter[i] = 0; } int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int i; struct msr_counter *mp; /* copy un-changing apic_id's */ if (DO_BIC(BIC_APIC)) average.threads.apic_id = t->apic_id; if (DO_BIC(BIC_X2APIC)) average.threads.x2apic_id = t->x2apic_id; /* remember first tv_begin */ if (average.threads.tv_begin.tv_sec == 0) average.threads.tv_begin = t->tv_begin; /* remember last tv_end */ average.threads.tv_end = t->tv_end; average.threads.tsc += t->tsc; average.threads.aperf += t->aperf; average.threads.mperf += t->mperf; average.threads.c1 += t->c1; average.threads.instr_count += t->instr_count; average.threads.irq_count += t->irq_count; average.threads.smi_count += t->smi_count; for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; average.threads.counter[i] += t->counter[i]; } /* sum per-core values only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; average.cores.c3 += c->c3; average.cores.c6 += c->c6; average.cores.c7 += c->c7; average.cores.mc6_us += c->mc6_us; average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); average.cores.core_throt_cnt = MAX(average.cores.core_throt_cnt, c->core_throt_cnt); average.cores.core_energy += c->core_energy; for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; average.cores.counter[i] += c->counter[i]; } /* sum per-pkg values only for 1st core in pkg */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (DO_BIC(BIC_Totl_c0)) average.packages.pkg_wtd_core_c0 += p->pkg_wtd_core_c0; if (DO_BIC(BIC_Any_c0)) average.packages.pkg_any_core_c0 += p->pkg_any_core_c0; if (DO_BIC(BIC_GFX_c0)) average.packages.pkg_any_gfxe_c0 += p->pkg_any_gfxe_c0; if (DO_BIC(BIC_CPUGFX)) average.packages.pkg_both_core_gfxe_c0 += p->pkg_both_core_gfxe_c0; average.packages.pc2 += p->pc2; if (DO_BIC(BIC_Pkgpc3)) average.packages.pc3 += p->pc3; if (DO_BIC(BIC_Pkgpc6)) average.packages.pc6 += p->pc6; if (DO_BIC(BIC_Pkgpc7)) average.packages.pc7 += p->pc7; average.packages.pc8 += p->pc8; average.packages.pc9 += p->pc9; average.packages.pc10 += p->pc10; average.packages.cpu_lpi = p->cpu_lpi; average.packages.sys_lpi = p->sys_lpi; average.packages.energy_pkg += p->energy_pkg; average.packages.energy_dram += p->energy_dram; average.packages.energy_cores += p->energy_cores; average.packages.energy_gfx += p->energy_gfx; average.packages.gfx_rc6_ms = p->gfx_rc6_ms; average.packages.uncore_mhz = p->uncore_mhz; average.packages.gfx_mhz = p->gfx_mhz; average.packages.gfx_act_mhz = p->gfx_act_mhz; average.packages.pkg_temp_c = MAX(average.packages.pkg_temp_c, p->pkg_temp_c); average.packages.rapl_pkg_perf_status += p->rapl_pkg_perf_status; average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status; for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; average.packages.counter[i] += p->counter[i]; } return 0; } /* * sum the counters for all cpus in the system * compute the weighted average */ void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int i; struct msr_counter *mp; clear_counters(&average.threads, &average.cores, &average.packages); for_all_cpus(sum_counters, t, c, p); /* Use the global time delta for the average. */ average.threads.tv_delta = tv_delta; average.threads.tsc /= topo.num_cpus; average.threads.aperf /= topo.num_cpus; average.threads.mperf /= topo.num_cpus; average.threads.instr_count /= topo.num_cpus; average.threads.c1 /= topo.num_cpus; if (average.threads.irq_count > 9999999) sums_need_wide_columns = 1; average.cores.c3 /= topo.num_cores; average.cores.c6 /= topo.num_cores; average.cores.c7 /= topo.num_cores; average.cores.mc6_us /= topo.num_cores; if (DO_BIC(BIC_Totl_c0)) average.packages.pkg_wtd_core_c0 /= topo.num_packages; if (DO_BIC(BIC_Any_c0)) average.packages.pkg_any_core_c0 /= topo.num_packages; if (DO_BIC(BIC_GFX_c0)) average.packages.pkg_any_gfxe_c0 /= topo.num_packages; if (DO_BIC(BIC_CPUGFX)) average.packages.pkg_both_core_gfxe_c0 /= topo.num_packages; average.packages.pc2 /= topo.num_packages; if (DO_BIC(BIC_Pkgpc3)) average.packages.pc3 /= topo.num_packages; if (DO_BIC(BIC_Pkgpc6)) average.packages.pc6 /= topo.num_packages; if (DO_BIC(BIC_Pkgpc7)) average.packages.pc7 /= topo.num_packages; average.packages.pc8 /= topo.num_packages; average.packages.pc9 /= topo.num_packages; average.packages.pc10 /= topo.num_packages; for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; if (mp->type == COUNTER_ITEMS) { if (average.threads.counter[i] > 9999999) sums_need_wide_columns = 1; continue; } average.threads.counter[i] /= topo.num_cpus; } for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; if (mp->type == COUNTER_ITEMS) { if (average.cores.counter[i] > 9999999) sums_need_wide_columns = 1; } average.cores.counter[i] /= topo.num_cores; } for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (mp->format == FORMAT_RAW) continue; if (mp->type == COUNTER_ITEMS) { if (average.packages.counter[i] > 9999999) sums_need_wide_columns = 1; } average.packages.counter[i] /= topo.num_packages; } } static unsigned long long rdtsc(void) { unsigned int low, high; asm volatile ("rdtsc":"=a" (low), "=d"(high)); return low | ((unsigned long long)high) << 32; } /* * Open a file, and exit on failure */ FILE *fopen_or_die(const char *path, const char *mode) { FILE *filep = fopen(path, mode); if (!filep) err(1, "%s: open failed", path); return filep; } /* * snapshot_sysfs_counter() * * return snapshot of given counter */ unsigned long long snapshot_sysfs_counter(char *path) { FILE *fp; int retval; unsigned long long counter; fp = fopen_or_die(path, "r"); retval = fscanf(fp, "%lld", &counter); if (retval != 1) err(1, "snapshot_sysfs_counter(%s)", path); fclose(fp); return counter; } int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp) { if (mp->msr_num != 0) { if (get_msr(cpu, mp->msr_num, counterp)) return -1; } else { char path[128 + PATH_BYTES]; if (mp->flags & SYSFS_PERCPU) { sprintf(path, "/sys/devices/system/cpu/cpu%d/%s", cpu, mp->path); *counterp = snapshot_sysfs_counter(path); } else { *counterp = snapshot_sysfs_counter(mp->path); } } return 0; } unsigned long long get_uncore_mhz(int package, int die) { char path[128]; sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/current_freq_khz", package, die); return (snapshot_sysfs_counter(path) / 1000); } int get_epb(int cpu) { char path[128 + PATH_BYTES]; unsigned long long msr; int ret, epb = -1; FILE *fp; sprintf(path, "/sys/devices/system/cpu/cpu%d/power/energy_perf_bias", cpu); fp = fopen(path, "r"); if (!fp) goto msr_fallback; ret = fscanf(fp, "%d", &epb); if (ret != 1) err(1, "%s(%s)", __func__, path); fclose(fp); return epb; msr_fallback: get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr); return msr & 0xf; } void get_apic_id(struct thread_data *t) { unsigned int eax, ebx, ecx, edx; if (DO_BIC(BIC_APIC)) { eax = ebx = ecx = edx = 0; __cpuid(1, eax, ebx, ecx, edx); t->apic_id = (ebx >> 24) & 0xff; } if (!DO_BIC(BIC_X2APIC)) return; if (authentic_amd || hygon_genuine) { unsigned int topology_extensions; if (max_extended_level < 0x8000001e) return; eax = ebx = ecx = edx = 0; __cpuid(0x80000001, eax, ebx, ecx, edx); topology_extensions = ecx & (1 << 22); if (topology_extensions == 0) return; eax = ebx = ecx = edx = 0; __cpuid(0x8000001e, eax, ebx, ecx, edx); t->x2apic_id = eax; return; } if (!genuine_intel) return; if (max_level < 0xb) return; ecx = 0; __cpuid(0xb, eax, ebx, ecx, edx); t->x2apic_id = edx; if (debug && (t->apic_id != (t->x2apic_id & 0xff))) fprintf(outf, "cpu%d: BIOS BUG: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); } int get_core_throt_cnt(int cpu, unsigned long long *cnt) { char path[128 + PATH_BYTES]; unsigned long long tmp; FILE *fp; int ret; sprintf(path, "/sys/devices/system/cpu/cpu%d/thermal_throttle/core_throttle_count", cpu); fp = fopen(path, "r"); if (!fp) return -1; ret = fscanf(fp, "%lld", &tmp); fclose(fp); if (ret != 1) return -1; *cnt = tmp; return 0; } /* * get_counters(...) * migrate to cpu * acquire and record local counters for that cpu */ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int cpu = t->cpu_id; unsigned long long msr; int aperf_mperf_retry_count = 0; struct msr_counter *mp; int i; if (cpu_migrate(cpu)) { fprintf(outf, "get_counters: Could not migrate to CPU %d\n", cpu); return -1; } gettimeofday(&t->tv_begin, (struct timezone *)NULL); if (first_counter_read) get_apic_id(t); retry: t->tsc = rdtsc(); /* we are running on local CPU of interest */ if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || soft_c1_residency_display(BIC_Avg_MHz)) { unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; /* * The TSC, APERF and MPERF must be read together for * APERF/MPERF and MPERF/TSC to give accurate results. * * Unfortunately, APERF and MPERF are read by * individual system call, so delays may occur * between them. If the time to read them * varies by a large amount, we re-read them. */ /* * This initial dummy APERF read has been seen to * reduce jitter in the subsequent reads. */ if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) return -3; t->tsc = rdtsc(); /* re-read close to APERF */ tsc_before = t->tsc; if (get_msr(cpu, MSR_IA32_APERF, &t->aperf)) return -3; tsc_between = rdtsc(); if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) return -4; tsc_after = rdtsc(); aperf_time = tsc_between - tsc_before; mperf_time = tsc_after - tsc_between; /* * If the system call latency to read APERF and MPERF * differ by more than 2x, then try again. */ if ((aperf_time > (2 * mperf_time)) || (mperf_time > (2 * aperf_time))) { aperf_mperf_retry_count++; if (aperf_mperf_retry_count < 5) goto retry; else warnx("cpu%d jitter %lld %lld", cpu, aperf_time, mperf_time); } aperf_mperf_retry_count = 0; t->aperf = t->aperf * aperf_mperf_multiplier; t->mperf = t->mperf * aperf_mperf_multiplier; } if (DO_BIC(BIC_IPC)) if (read(get_instr_count_fd(cpu), &t->instr_count, sizeof(long long)) != sizeof(long long)) return -4; if (DO_BIC(BIC_IRQ)) t->irq_count = irqs_per_cpu[cpu]; if (DO_BIC(BIC_SMI)) { if (get_msr(cpu, MSR_SMI_COUNT, &msr)) return -5; t->smi_count = msr & 0xFFFFFFFF; } if (DO_BIC(BIC_CPU_c1) && use_c1_residency_msr) { if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1)) return -6; } for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { if (get_mp(cpu, mp, &t->counter[i])) return -10; } /* collect core counters only for 1st thread in core */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; } if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) { if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) return -7; } if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7)) { if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) return -8; else if (t->is_atom) { /* * For Atom CPUs that has core cstate deeper than c6, * MSR_CORE_C6_RESIDENCY returns residency of cc6 and deeper. * Minus CC7 (and deeper cstates) residency to get * accturate cc6 residency. */ c->c6 -= c->c7; } } if (DO_BIC(BIC_Mod_c6)) if (get_msr(cpu, MSR_MODULE_C6_RES_MS, &c->mc6_us)) return -8; if (DO_BIC(BIC_CoreTmp)) { if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) return -9; c->core_temp_c = tj_max - ((msr >> 16) & 0x7F); } if (DO_BIC(BIC_CORE_THROT_CNT)) get_core_throt_cnt(cpu, &c->core_throt_cnt); if (do_rapl & RAPL_AMD_F17H) { if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr)) return -14; c->core_energy = msr & 0xFFFFFFFF; } for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { if (get_mp(cpu, mp, &c->counter[i])) return -10; } /* collect package counters only for 1st core in package */ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) goto done; if (DO_BIC(BIC_Totl_c0)) { if (get_msr(cpu, MSR_PKG_WEIGHTED_CORE_C0_RES, &p->pkg_wtd_core_c0)) return -10; } if (DO_BIC(BIC_Any_c0)) { if (get_msr(cpu, MSR_PKG_ANY_CORE_C0_RES, &p->pkg_any_core_c0)) return -11; } if (DO_BIC(BIC_GFX_c0)) { if (get_msr(cpu, MSR_PKG_ANY_GFXE_C0_RES, &p->pkg_any_gfxe_c0)) return -12; } if (DO_BIC(BIC_CPUGFX)) { if (get_msr(cpu, MSR_PKG_BOTH_CORE_GFXE_C0_RES, &p->pkg_both_core_gfxe_c0)) return -13; } if (DO_BIC(BIC_Pkgpc3)) if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3)) return -9; if (DO_BIC(BIC_Pkgpc6)) { if (do_slm_cstates) { if (get_msr(cpu, MSR_ATOM_PKG_C6_RESIDENCY, &p->pc6)) return -10; } else { if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6)) return -10; } } if (DO_BIC(BIC_Pkgpc2)) if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2)) return -11; if (DO_BIC(BIC_Pkgpc7)) if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7)) return -12; if (DO_BIC(BIC_Pkgpc8)) if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8)) return -13; if (DO_BIC(BIC_Pkgpc9)) if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9)) return -13; if (DO_BIC(BIC_Pkgpc10)) if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) return -13; if (DO_BIC(BIC_CPU_LPI)) p->cpu_lpi = cpuidle_cur_cpu_lpi_us; if (DO_BIC(BIC_SYS_LPI)) p->sys_lpi = cpuidle_cur_sys_lpi_us; if (do_rapl & RAPL_PKG) { if (get_msr_sum(cpu, MSR_PKG_ENERGY_STATUS, &msr)) return -13; p->energy_pkg = msr; } if (do_rapl & RAPL_CORES_ENERGY_STATUS) { if (get_msr_sum(cpu, MSR_PP0_ENERGY_STATUS, &msr)) return -14; p->energy_cores = msr; } if (do_rapl & RAPL_DRAM) { if (get_msr_sum(cpu, MSR_DRAM_ENERGY_STATUS, &msr)) return -15; p->energy_dram = msr; } if (do_rapl & RAPL_GFX) { if (get_msr_sum(cpu, MSR_PP1_ENERGY_STATUS, &msr)) return -16; p->energy_gfx = msr; } if (do_rapl & RAPL_PKG_PERF_STATUS) { if (get_msr_sum(cpu, MSR_PKG_PERF_STATUS, &msr)) return -16; p->rapl_pkg_perf_status = msr; } if (do_rapl & RAPL_DRAM_PERF_STATUS) { if (get_msr_sum(cpu, MSR_DRAM_PERF_STATUS, &msr)) return -16; p->rapl_dram_perf_status = msr; } if (do_rapl & RAPL_AMD_F17H) { if (get_msr_sum(cpu, MSR_PKG_ENERGY_STAT, &msr)) return -13; p->energy_pkg = msr; } if (DO_BIC(BIC_PkgTmp)) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return -17; p->pkg_temp_c = tj_max - ((msr >> 16) & 0x7F); } if (DO_BIC(BIC_GFX_rc6)) p->gfx_rc6_ms = gfx_cur_rc6_ms; /* n.b. assume die0 uncore frequency applies to whole package */ if (DO_BIC(BIC_UNCORE_MHZ)) p->uncore_mhz = get_uncore_mhz(p->package_id, 0); if (DO_BIC(BIC_GFXMHz)) p->gfx_mhz = gfx_cur_mhz; if (DO_BIC(BIC_GFXACTMHz)) p->gfx_act_mhz = gfx_act_mhz; for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { if (get_mp(cpu, mp, &p->counter[i])) return -10; } done: gettimeofday(&t->tv_end, (struct timezone *)NULL); return 0; } /* * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit: * If you change the values, note they are used both in comparisons * (>= PCL__7) and to index pkg_cstate_limit_strings[]. */ #define PCLUKN 0 /* Unknown */ #define PCLRSV 1 /* Reserved */ #define PCL__0 2 /* PC0 */ #define PCL__1 3 /* PC1 */ #define PCL__2 4 /* PC2 */ #define PCL__3 5 /* PC3 */ #define PCL__4 6 /* PC4 */ #define PCL__6 7 /* PC6 */ #define PCL_6N 8 /* PC6 No Retention */ #define PCL_6R 9 /* PC6 Retention */ #define PCL__7 10 /* PC7 */ #define PCL_7S 11 /* PC7 Shrink */ #define PCL__8 12 /* PC8 */ #define PCL__9 13 /* PC9 */ #define PCL_10 14 /* PC10 */ #define PCLUNL 15 /* Unlimited */ int pkg_cstate_limit = PCLUKN; char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2", "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "pc10", "unlimited" }; int nhm_pkg_cstate_limits[16] = { PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV }; int snb_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV }; int hsw_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV }; int slv_pkg_cstate_limits[16] = { PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7 }; int amt_pkg_cstate_limits[16] = { PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV }; int phi_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV }; int glm_pkg_cstate_limits[16] = { PCLUNL, PCL__1, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCL_10, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV }; int skx_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV }; int icx_pkg_cstate_limits[16] = { PCL__0, PCL__2, PCL__6, PCL__6, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV }; static void calculate_tsc_tweak() { tsc_tweak = base_hz / tsc_hz; } void prewake_cstate_probe(unsigned int family, unsigned int model); static void dump_nhm_platform_info(void) { unsigned long long msr; unsigned int ratio; get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); fprintf(outf, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 40) & 0xFF; fprintf(outf, "%d * %.1f = %.1f MHz max efficiency frequency\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", ratio, bclk, ratio * bclk); get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); fprintf(outf, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", base_cpu, msr, msr & 0x2 ? "EN" : "DIS"); /* C-state Pre-wake Disable (CSTATE_PREWAKE_DISABLE) */ if (dis_cstate_prewake) fprintf(outf, "C-state Pre-wake: %sabled\n", msr & 0x40000000 ? "DIS" : "EN"); return; } static void dump_hsw_turbo_ratio_limits(void) { unsigned long long msr; unsigned int ratio; get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 8) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 18 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 17 active cores\n", ratio, bclk, ratio * bclk); return; } static void dump_ivt_turbo_ratio_limits(void) { unsigned long long msr; unsigned int ratio; get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr); ratio = (msr >> 56) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 16 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 48) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 15 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 40) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 14 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 32) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 13 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 24) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 12 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 11 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 10 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 9 active cores\n", ratio, bclk, ratio * bclk); return; } int has_turbo_ratio_group_limits(int family, int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_ICELAKE_X: case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_FAM6_ATOM_GOLDMONT_D: case INTEL_FAM6_ATOM_TREMONT_D: return 1; default: return 0; } } static void dump_turbo_ratio_limits(int trl_msr_offset, int family, int model) { unsigned long long msr, core_counts; int shift; get_msr(base_cpu, trl_msr_offset, &msr); fprintf(outf, "cpu%d: MSR_%sTURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, trl_msr_offset == MSR_SECONDARY_TURBO_RATIO_LIMIT ? "SECONDARY_" : "", msr); if (has_turbo_ratio_group_limits(family, model)) { get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts); fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, core_counts); } else { core_counts = 0x0807060504030201; } for (shift = 56; shift >= 0; shift -= 8) { unsigned int ratio, group_size; ratio = (msr >> shift) & 0xFF; group_size = (core_counts >> shift) & 0xFF; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", ratio, bclk, ratio * bclk, group_size); } return; } static void dump_atom_turbo_ratio_limits(void) { unsigned long long msr; unsigned int ratio; get_msr(base_cpu, MSR_ATOM_CORE_RATIOS, &msr); fprintf(outf, "cpu%d: MSR_ATOM_CORE_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF); ratio = (msr >> 0) & 0x3F; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz minimum operating frequency\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0x3F; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz low frequency mode (LFM)\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0x3F; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz base frequency\n", ratio, bclk, ratio * bclk); get_msr(base_cpu, MSR_ATOM_CORE_TURBO_RATIOS, &msr); fprintf(outf, "cpu%d: MSR_ATOM_CORE_TURBO_RATIOS: 0x%08llx\n", base_cpu, msr & 0xFFFFFFFF); ratio = (msr >> 24) & 0x3F; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 4 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 16) & 0x3F; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 3 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 8) & 0x3F; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 2 active cores\n", ratio, bclk, ratio * bclk); ratio = (msr >> 0) & 0x3F; if (ratio) fprintf(outf, "%d * %.1f = %.1f MHz max turbo 1 active core\n", ratio, bclk, ratio * bclk); } static void dump_knl_turbo_ratio_limits(void) { const unsigned int buckets_no = 7; unsigned long long msr; int delta_cores, delta_ratio; int i, b_nr; unsigned int cores[buckets_no]; unsigned int ratio[buckets_no]; get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr); /* * Turbo encoding in KNL is as follows: * [0] -- Reserved * [7:1] -- Base value of number of active cores of bucket 1. * [15:8] -- Base value of freq ratio of bucket 1. * [20:16] -- +ve delta of number of active cores of bucket 2. * i.e. active cores of bucket 2 = * active cores of bucket 1 + delta * [23:21] -- Negative delta of freq ratio of bucket 2. * i.e. freq ratio of bucket 2 = * freq ratio of bucket 1 - delta * [28:24]-- +ve delta of number of active cores of bucket 3. * [31:29]-- -ve delta of freq ratio of bucket 3. * [36:32]-- +ve delta of number of active cores of bucket 4. * [39:37]-- -ve delta of freq ratio of bucket 4. * [44:40]-- +ve delta of number of active cores of bucket 5. * [47:45]-- -ve delta of freq ratio of bucket 5. * [52:48]-- +ve delta of number of active cores of bucket 6. * [55:53]-- -ve delta of freq ratio of bucket 6. * [60:56]-- +ve delta of number of active cores of bucket 7. * [63:61]-- -ve delta of freq ratio of bucket 7. */ b_nr = 0; cores[b_nr] = (msr & 0xFF) >> 1; ratio[b_nr] = (msr >> 8) & 0xFF; for (i = 16; i < 64; i += 8) { delta_cores = (msr >> i) & 0x1F; delta_ratio = (msr >> (i + 5)) & 0x7; cores[b_nr + 1] = cores[b_nr] + delta_cores; ratio[b_nr + 1] = ratio[b_nr] - delta_ratio; b_nr++; } for (i = buckets_no - 1; i >= 0; i--) if (i > 0 ? ratio[i] != ratio[i - 1] : 1) fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n", ratio[i], bclk, ratio[i] * bclk, cores[i]); } static void dump_nhm_cst_cfg(void) { unsigned long long msr; get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); fprintf(outf, "cpu%d: MSR_PKG_CST_CONFIG_CONTROL: 0x%08llx", base_cpu, msr); fprintf(outf, " (%s%s%s%s%slocked, pkg-cstate-limit=%d (%s)", (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "", (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "", (msr & NHM_C1_AUTO_DEMOTE) ? "demote-C1, " : "", (msr & (1 << 15)) ? "" : "UN", (unsigned int)msr & 0xF, pkg_cstate_limit_strings[pkg_cstate_limit]); #define AUTOMATIC_CSTATE_CONVERSION (1UL << 16) if (has_automatic_cstate_conversion) { fprintf(outf, ", automatic c-state conversion=%s", (msr & AUTOMATIC_CSTATE_CONVERSION) ? "on" : "off"); } fprintf(outf, ")\n"); return; } static void dump_config_tdp(void) { unsigned long long msr; get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr); fprintf(outf, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr); fprintf(outf, " (base_ratio=%d)\n", (unsigned int)msr & 0xFF); get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr); fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr); if (msr) { fprintf(outf, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0x7FFF); fprintf(outf, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0x7FFF); fprintf(outf, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF); fprintf(outf, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0x7FFF); } fprintf(outf, ")\n"); get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr); fprintf(outf, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr); if (msr) { fprintf(outf, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0x7FFF); fprintf(outf, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0x7FFF); fprintf(outf, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xFF); fprintf(outf, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0x7FFF); } fprintf(outf, ")\n"); get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr); fprintf(outf, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr); if ((msr) & 0x3) fprintf(outf, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3); fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); fprintf(outf, ")\n"); get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr); fprintf(outf, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr); fprintf(outf, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xFF); fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); fprintf(outf, ")\n"); } unsigned int irtl_time_units[] = { 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; void print_irtl(void) { unsigned long long msr; get_msr(base_cpu, MSR_PKGC3_IRTL, &msr); fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr); fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); get_msr(base_cpu, MSR_PKGC6_IRTL, &msr); fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr); fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); get_msr(base_cpu, MSR_PKGC7_IRTL, &msr); fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr); fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); if (!do_irtl_hsw) return; get_msr(base_cpu, MSR_PKGC8_IRTL, &msr); fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr); fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); get_msr(base_cpu, MSR_PKGC9_IRTL, &msr); fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr); fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); get_msr(base_cpu, MSR_PKGC10_IRTL, &msr); fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr); fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); } void free_fd_percpu(void) { int i; for (i = 0; i < topo.max_cpu_num + 1; ++i) { if (fd_percpu[i] != 0) close(fd_percpu[i]); } free(fd_percpu); } void free_all_buffers(void) { int i; CPU_FREE(cpu_present_set); cpu_present_set = NULL; cpu_present_setsize = 0; CPU_FREE(cpu_affinity_set); cpu_affinity_set = NULL; cpu_affinity_setsize = 0; free(thread_even); free(core_even); free(package_even); thread_even = NULL; core_even = NULL; package_even = NULL; free(thread_odd); free(core_odd); free(package_odd); thread_odd = NULL; core_odd = NULL; package_odd = NULL; free(output_buffer); output_buffer = NULL; outp = NULL; free_fd_percpu(); free(irq_column_2_cpu); free(irqs_per_cpu); for (i = 0; i <= topo.max_cpu_num; ++i) { if (cpus[i].put_ids) CPU_FREE(cpus[i].put_ids); } free(cpus); } /* * Parse a file containing a single int. * Return 0 if file can not be opened * Exit if file can be opened, but can not be parsed */ int parse_int_file(const char *fmt, ...) { va_list args; char path[PATH_MAX]; FILE *filep; int value; va_start(args, fmt); vsnprintf(path, sizeof(path), fmt, args); va_end(args); filep = fopen(path, "r"); if (!filep) return 0; if (fscanf(filep, "%d", &value) != 1) err(1, "%s: failed to parse number from file", path); fclose(filep); return value; } /* * cpu_is_first_core_in_package(cpu) * return 1 if given CPU is 1st core in package */ int cpu_is_first_core_in_package(int cpu) { return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu); } int get_physical_package_id(int cpu) { return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); } int get_die_id(int cpu) { return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu); } int get_core_id(int cpu) { return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); } void set_node_data(void) { int pkg, node, lnode, cpu, cpux; int cpu_count; /* initialize logical_node_id */ for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) cpus[cpu].logical_node_id = -1; cpu_count = 0; for (pkg = 0; pkg < topo.num_packages; pkg++) { lnode = 0; for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) { if (cpus[cpu].physical_package_id != pkg) continue; /* find a cpu with an unset logical_node_id */ if (cpus[cpu].logical_node_id != -1) continue; cpus[cpu].logical_node_id = lnode; node = cpus[cpu].physical_node_id; cpu_count++; /* * find all matching cpus on this pkg and set * the logical_node_id */ for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) { if ((cpus[cpux].physical_package_id == pkg) && (cpus[cpux].physical_node_id == node)) { cpus[cpux].logical_node_id = lnode; cpu_count++; } } lnode++; if (lnode > topo.nodes_per_pkg) topo.nodes_per_pkg = lnode; } if (cpu_count >= topo.max_cpu_num) break; } } int get_physical_node_id(struct cpu_topology *thiscpu) { char path[80]; FILE *filep; int i; int cpu = thiscpu->logical_cpu_id; for (i = 0; i <= topo.max_cpu_num; i++) { sprintf(path, "/sys/devices/system/cpu/cpu%d/node%i/cpulist", cpu, i); filep = fopen(path, "r"); if (!filep) continue; fclose(filep); return i; } return -1; } int get_thread_siblings(struct cpu_topology *thiscpu) { char path[80], character; FILE *filep; unsigned long map; int so, shift, sib_core; int cpu = thiscpu->logical_cpu_id; int offset = topo.max_cpu_num + 1; size_t size; int thread_id = 0; thiscpu->put_ids = CPU_ALLOC((topo.max_cpu_num + 1)); if (thiscpu->thread_id < 0) thiscpu->thread_id = thread_id++; if (!thiscpu->put_ids) return -1; size = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(size, thiscpu->put_ids); sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu); filep = fopen(path, "r"); if (!filep) { warnx("%s: open failed", path); return -1; } do { offset -= BITMASK_SIZE; if (fscanf(filep, "%lx%c", &map, &character) != 2) err(1, "%s: failed to parse file", path); for (shift = 0; shift < BITMASK_SIZE; shift++) { if ((map >> shift) & 0x1) { so = shift + offset; sib_core = get_core_id(so); if (sib_core == thiscpu->physical_core_id) { CPU_SET_S(so, size, thiscpu->put_ids); if ((so != cpu) && (cpus[so].thread_id < 0)) cpus[so].thread_id = thread_id++; } } } } while (character == ','); fclose(filep); return CPU_COUNT_S(size, thiscpu->put_ids); } /* * run func(thread, core, package) in topology order * skip non-present cpus */ int for_all_cpus_2(int (func) (struct thread_data *, struct core_data *, struct pkg_data *, struct thread_data *, struct core_data *, struct pkg_data *), struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base, struct thread_data *thread_base2, struct core_data *core_base2, struct pkg_data *pkg_base2) { int retval, pkg_no, node_no, core_no, thread_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { for (node_no = 0; node_no < topo.nodes_per_pkg; ++node_no) { for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { for (thread_no = 0; thread_no < topo.threads_per_core; ++thread_no) { struct thread_data *t, *t2; struct core_data *c, *c2; struct pkg_data *p, *p2; t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no); if (cpu_is_not_present(t->cpu_id)) continue; t2 = GET_THREAD(thread_base2, thread_no, core_no, node_no, pkg_no); c = GET_CORE(core_base, core_no, node_no, pkg_no); c2 = GET_CORE(core_base2, core_no, node_no, pkg_no); p = GET_PKG(pkg_base, pkg_no); p2 = GET_PKG(pkg_base2, pkg_no); retval = func(t, c, p, t2, c2, p2); if (retval) return retval; } } } } return 0; } /* * run func(cpu) on every cpu in /proc/stat * return max_cpu number */ int for_all_proc_cpus(int (func) (int)) { FILE *fp; int cpu_num; int retval; fp = fopen_or_die(proc_stat, "r"); retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n"); if (retval != 0) err(1, "%s: failed to parse format", proc_stat); while (1) { retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num); if (retval != 1) break; retval = func(cpu_num); if (retval) { fclose(fp); return (retval); } } fclose(fp); return 0; } void re_initialize(void) { free_all_buffers(); setup_all_buffers(); fprintf(outf, "turbostat: re-initialized with num_cpus %d\n", topo.num_cpus); } void set_max_cpu_num(void) { FILE *filep; int base_cpu; unsigned long dummy; char pathname[64]; base_cpu = sched_getcpu(); if (base_cpu < 0) err(1, "cannot find calling cpu ID"); sprintf(pathname, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", base_cpu); filep = fopen_or_die(pathname, "r"); topo.max_cpu_num = 0; while (fscanf(filep, "%lx,", &dummy) == 1) topo.max_cpu_num += BITMASK_SIZE; fclose(filep); topo.max_cpu_num--; /* 0 based */ } /* * count_cpus() * remember the last one seen, it will be the max */ int count_cpus(int cpu) { UNUSED(cpu); topo.num_cpus++; return 0; } int mark_cpu_present(int cpu) { CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); return 0; } int init_thread_id(int cpu) { cpus[cpu].thread_id = -1; return 0; } /* * snapshot_proc_interrupts() * * read and record summary of /proc/interrupts * * return 1 if config change requires a restart, else return 0 */ int snapshot_proc_interrupts(void) { static FILE *fp; int column, retval; if (fp == NULL) fp = fopen_or_die("/proc/interrupts", "r"); else rewind(fp); /* read 1st line of /proc/interrupts to get cpu* name for each column */ for (column = 0; column < topo.num_cpus; ++column) { int cpu_number; retval = fscanf(fp, " CPU%d", &cpu_number); if (retval != 1) break; if (cpu_number > topo.max_cpu_num) { warn("/proc/interrupts: cpu%d: > %d", cpu_number, topo.max_cpu_num); return 1; } irq_column_2_cpu[column] = cpu_number; irqs_per_cpu[cpu_number] = 0; } /* read /proc/interrupt count lines and sum up irqs per cpu */ while (1) { int column; char buf[64]; retval = fscanf(fp, " %s:", buf); /* flush irq# "N:" */ if (retval != 1) break; /* read the count per cpu */ for (column = 0; column < topo.num_cpus; ++column) { int cpu_number, irq_count; retval = fscanf(fp, " %d", &irq_count); if (retval != 1) break; cpu_number = irq_column_2_cpu[column]; irqs_per_cpu[cpu_number] += irq_count; } while (getc(fp) != '\n') ; /* flush interrupt description */ } return 0; } /* * snapshot_gfx_rc6_ms() * * record snapshot of * /sys/class/drm/card0/power/rc6_residency_ms * * return 1 if config change requires a restart, else return 0 */ int snapshot_gfx_rc6_ms(void) { FILE *fp; int retval; fp = fopen_or_die("/sys/class/drm/card0/power/rc6_residency_ms", "r"); retval = fscanf(fp, "%lld", &gfx_cur_rc6_ms); if (retval != 1) err(1, "GFX rc6"); fclose(fp); return 0; } /* * snapshot_gfx_mhz() * * record snapshot of * /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz * * return 1 if config change requires a restart, else return 0 */ int snapshot_gfx_mhz(void) { static FILE *fp; int retval; if (fp == NULL) fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); else { rewind(fp); fflush(fp); } retval = fscanf(fp, "%d", &gfx_cur_mhz); if (retval != 1) err(1, "GFX MHz"); return 0; } /* * snapshot_gfx_cur_mhz() * * record snapshot of * /sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz * * return 1 if config change requires a restart, else return 0 */ int snapshot_gfx_act_mhz(void) { static FILE *fp; int retval; if (fp == NULL) fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", "r"); else { rewind(fp); fflush(fp); } retval = fscanf(fp, "%d", &gfx_act_mhz); if (retval != 1) err(1, "GFX ACT MHz"); return 0; } /* * snapshot_cpu_lpi() * * record snapshot of * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us */ int snapshot_cpu_lpi_us(void) { FILE *fp; int retval; fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r"); retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us); if (retval != 1) { fprintf(stderr, "Disabling Low Power Idle CPU output\n"); BIC_NOT_PRESENT(BIC_CPU_LPI); fclose(fp); return -1; } fclose(fp); return 0; } /* * snapshot_sys_lpi() * * record snapshot of sys_lpi_file */ int snapshot_sys_lpi_us(void) { FILE *fp; int retval; fp = fopen_or_die(sys_lpi_file, "r"); retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); if (retval != 1) { fprintf(stderr, "Disabling Low Power Idle System output\n"); BIC_NOT_PRESENT(BIC_SYS_LPI); fclose(fp); return -1; } fclose(fp); return 0; } /* * snapshot /proc and /sys files * * return 1 if configuration restart needed, else return 0 */ int snapshot_proc_sysfs_files(void) { if (DO_BIC(BIC_IRQ)) if (snapshot_proc_interrupts()) return 1; if (DO_BIC(BIC_GFX_rc6)) snapshot_gfx_rc6_ms(); if (DO_BIC(BIC_GFXMHz)) snapshot_gfx_mhz(); if (DO_BIC(BIC_GFXACTMHz)) snapshot_gfx_act_mhz(); if (DO_BIC(BIC_CPU_LPI)) snapshot_cpu_lpi_us(); if (DO_BIC(BIC_SYS_LPI)) snapshot_sys_lpi_us(); return 0; } int exit_requested; static void signal_handler(int signal) { switch (signal) { case SIGINT: exit_requested = 1; if (debug) fprintf(stderr, " SIGINT\n"); break; case SIGUSR1: if (debug > 1) fprintf(stderr, "SIGUSR1\n"); break; } } void setup_signal_handler(void) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = &signal_handler; if (sigaction(SIGINT, &sa, NULL) < 0) err(1, "sigaction SIGINT"); if (sigaction(SIGUSR1, &sa, NULL) < 0) err(1, "sigaction SIGUSR1"); } void do_sleep(void) { struct timeval tout; struct timespec rest; fd_set readfds; int retval; FD_ZERO(&readfds); FD_SET(0, &readfds); if (ignore_stdin) { nanosleep(&interval_ts, NULL); return; } tout = interval_tv; retval = select(1, &readfds, NULL, NULL, &tout); if (retval == 1) { switch (getc(stdin)) { case 'q': exit_requested = 1; break; case EOF: /* * 'stdin' is a pipe closed on the other end. There * won't be any further input. */ ignore_stdin = 1; /* Sleep the rest of the time */ rest.tv_sec = (tout.tv_sec + tout.tv_usec / 1000000); rest.tv_nsec = (tout.tv_usec % 1000000) * 1000; nanosleep(&rest, NULL); } } } int get_msr_sum(int cpu, off_t offset, unsigned long long *msr) { int ret, idx; unsigned long long msr_cur, msr_last; if (!per_cpu_msr_sum) return 1; idx = offset_to_idx(offset); if (idx < 0) return idx; /* get_msr_sum() = sum + (get_msr() - last) */ ret = get_msr(cpu, offset, &msr_cur); if (ret) return ret; msr_last = per_cpu_msr_sum[cpu].entries[idx].last; DELTA_WRAP32(msr_cur, msr_last); *msr = msr_last + per_cpu_msr_sum[cpu].entries[idx].sum; return 0; } timer_t timerid; /* Timer callback, update the sum of MSRs periodically. */ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg_data *p) { int i, ret; int cpu = t->cpu_id; UNUSED(c); UNUSED(p); for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) { unsigned long long msr_cur, msr_last; off_t offset; if (!idx_valid(i)) continue; offset = idx_to_offset(i); if (offset < 0) continue; ret = get_msr(cpu, offset, &msr_cur); if (ret) { fprintf(outf, "Can not update msr(0x%llx)\n", (unsigned long long)offset); continue; } msr_last = per_cpu_msr_sum[cpu].entries[i].last; per_cpu_msr_sum[cpu].entries[i].last = msr_cur & 0xffffffff; DELTA_WRAP32(msr_cur, msr_last); per_cpu_msr_sum[cpu].entries[i].sum += msr_last; } return 0; } static void msr_record_handler(union sigval v) { UNUSED(v); for_all_cpus(update_msr_sum, EVEN_COUNTERS); } void msr_sum_record(void) { struct itimerspec its; struct sigevent sev; per_cpu_msr_sum = calloc(topo.max_cpu_num + 1, sizeof(struct msr_sum_array)); if (!per_cpu_msr_sum) { fprintf(outf, "Can not allocate memory for long time MSR.\n"); return; } /* * Signal handler might be restricted, so use thread notifier instead. */ memset(&sev, 0, sizeof(struct sigevent)); sev.sigev_notify = SIGEV_THREAD; sev.sigev_notify_function = msr_record_handler; sev.sigev_value.sival_ptr = &timerid; if (timer_create(CLOCK_REALTIME, &sev, &timerid) == -1) { fprintf(outf, "Can not create timer.\n"); goto release_msr; } its.it_value.tv_sec = 0; its.it_value.tv_nsec = 1; /* * A wraparound time has been calculated early. * Some sources state that the peak power for a * microprocessor is usually 1.5 times the TDP rating, * use 2 * TDP for safety. */ its.it_interval.tv_sec = rapl_joule_counter_range / 2; its.it_interval.tv_nsec = 0; if (timer_settime(timerid, 0, &its, NULL) == -1) { fprintf(outf, "Can not set timer.\n"); goto release_timer; } return; release_timer: timer_delete(timerid); release_msr: free(per_cpu_msr_sum); } /* * set_my_sched_priority(pri) * return previous */ int set_my_sched_priority(int priority) { int retval; int original_priority; errno = 0; original_priority = getpriority(PRIO_PROCESS, 0); if (errno && (original_priority == -1)) err(errno, "getpriority"); retval = setpriority(PRIO_PROCESS, 0, priority); if (retval) errx(retval, "capget(CAP_SYS_NICE) failed,try \"# setcap cap_sys_nice=ep %s\"", progname); errno = 0; retval = getpriority(PRIO_PROCESS, 0); if (retval != priority) err(retval, "getpriority(%d) != setpriority(%d)", retval, priority); return original_priority; } void turbostat_loop() { int retval; int restarted = 0; unsigned int done_iters = 0; setup_signal_handler(); /* * elevate own priority for interval mode */ set_my_sched_priority(-20); restart: restarted++; snapshot_proc_sysfs_files(); retval = for_all_cpus(get_counters, EVEN_COUNTERS); first_counter_read = 0; if (retval < -1) { exit(retval); } else if (retval == -1) { if (restarted > 10) { exit(retval); } re_initialize(); goto restart; } restarted = 0; done_iters = 0; gettimeofday(&tv_even, (struct timezone *)NULL); while (1) { if (for_all_proc_cpus(cpu_is_not_present)) { re_initialize(); goto restart; } do_sleep(); if (snapshot_proc_sysfs_files()) goto restart; retval = for_all_cpus(get_counters, ODD_COUNTERS); if (retval < -1) { exit(retval); } else if (retval == -1) { re_initialize(); goto restart; } gettimeofday(&tv_odd, (struct timezone *)NULL); timersub(&tv_odd, &tv_even, &tv_delta); if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) { re_initialize(); goto restart; } compute_average(EVEN_COUNTERS); format_all_counters(EVEN_COUNTERS); flush_output_stdout(); if (exit_requested) break; if (num_iterations && ++done_iters >= num_iterations) break; do_sleep(); if (snapshot_proc_sysfs_files()) goto restart; retval = for_all_cpus(get_counters, EVEN_COUNTERS); if (retval < -1) { exit(retval); } else if (retval == -1) { re_initialize(); goto restart; } gettimeofday(&tv_even, (struct timezone *)NULL); timersub(&tv_even, &tv_odd, &tv_delta); if (for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS)) { re_initialize(); goto restart; } compute_average(ODD_COUNTERS); format_all_counters(ODD_COUNTERS); flush_output_stdout(); if (exit_requested) break; if (num_iterations && ++done_iters >= num_iterations) break; } } void check_dev_msr() { struct stat sb; char pathname[32]; sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); if (stat(pathname, &sb)) if (system("/sbin/modprobe msr > /dev/null 2>&1")) err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); } /* * check for CAP_SYS_RAWIO * return 0 on success * return 1 on fail */ int check_for_cap_sys_rawio(void) { cap_t caps; cap_flag_value_t cap_flag_value; caps = cap_get_proc(); if (caps == NULL) err(-6, "cap_get_proc\n"); if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value)) err(-6, "cap_get\n"); if (cap_flag_value != CAP_SET) { warnx("capget(CAP_SYS_RAWIO) failed," " try \"# setcap cap_sys_rawio=ep %s\"", progname); return 1; } if (cap_free(caps) == -1) err(-6, "cap_free\n"); return 0; } void check_permissions(void) { int do_exit = 0; char pathname[32]; /* check for CAP_SYS_RAWIO */ do_exit += check_for_cap_sys_rawio(); /* test file permissions */ sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); if (euidaccess(pathname, R_OK)) { do_exit++; warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); } /* if all else fails, thell them to be root */ if (do_exit) if (getuid() != 0) warnx("... or simply run as root"); if (do_exit) exit(-6); } /* * NHM adds support for additional MSRs: * * MSR_SMI_COUNT 0x00000034 * * MSR_PLATFORM_INFO 0x000000ce * MSR_PKG_CST_CONFIG_CONTROL 0x000000e2 * * MSR_MISC_PWR_MGMT 0x000001aa * * MSR_PKG_C3_RESIDENCY 0x000003f8 * MSR_PKG_C6_RESIDENCY 0x000003f9 * MSR_CORE_C3_RESIDENCY 0x000003fc * MSR_CORE_C6_RESIDENCY 0x000003fd * * Side effect: * sets global pkg_cstate_limit to decode MSR_PKG_CST_CONFIG_CONTROL * sets has_misc_feature_control */ int probe_nhm_msrs(unsigned int family, unsigned int model) { unsigned long long msr; unsigned int base_ratio; int *pkg_cstate_limits; if (!genuine_intel) return 0; if (family != 6) return 0; bclk = discover_bclk(family, model); switch (model) { case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ pkg_cstate_limits = nhm_pkg_cstate_limits; break; case INTEL_FAM6_SANDYBRIDGE: /* SNB */ case INTEL_FAM6_SANDYBRIDGE_X: /* SNB Xeon */ case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ pkg_cstate_limits = snb_pkg_cstate_limits; has_misc_feature_control = 1; break; case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_HASWELL_L: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_G: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ pkg_cstate_limits = hsw_pkg_cstate_limits; has_misc_feature_control = 1; break; case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ pkg_cstate_limits = skx_pkg_cstate_limits; has_misc_feature_control = 1; break; case INTEL_FAM6_ICELAKE_X: /* ICX */ pkg_cstate_limits = icx_pkg_cstate_limits; has_misc_feature_control = 1; break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ no_MSR_MISC_PWR_MGMT = 1; /* FALLTHRU */ case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ pkg_cstate_limits = slv_pkg_cstate_limits; break; case INTEL_FAM6_ATOM_AIRMONT: /* AMT */ pkg_cstate_limits = amt_pkg_cstate_limits; no_MSR_MISC_PWR_MGMT = 1; break; case INTEL_FAM6_XEON_PHI_KNL: /* PHI */ pkg_cstate_limits = phi_pkg_cstate_limits; break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ case INTEL_FAM6_ATOM_TREMONT: /* EHL */ case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ pkg_cstate_limits = glm_pkg_cstate_limits; break; default: return 0; } get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); base_ratio = (msr >> 8) & 0xFF; base_hz = base_ratio * bclk * 1000000; has_base_hz = 1; return 1; } /* * SLV client has support for unique MSRs: * * MSR_CC6_DEMOTION_POLICY_CONFIG * MSR_MC6_DEMOTION_POLICY_CONFIG */ int has_slv_msrs(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_ATOM_SILVERMONT: case INTEL_FAM6_ATOM_SILVERMONT_MID: case INTEL_FAM6_ATOM_AIRMONT_MID: return 1; } return 0; } int is_dnv(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_ATOM_GOLDMONT_D: return 1; } return 0; } int is_bdx(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_BROADWELL_X: return 1; } return 0; } int is_skx(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_SKYLAKE_X: return 1; } return 0; } int is_icx(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_ICELAKE_X: return 1; } return 0; } int is_spr(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_SAPPHIRERAPIDS_X: return 1; } return 0; } int is_ehl(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_ATOM_TREMONT: return 1; } return 0; } int is_jvl(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_ATOM_TREMONT_D: return 1; } return 0; } int has_turbo_ratio_limit(unsigned int family, unsigned int model) { if (has_slv_msrs(family, model)) return 0; if (family != 6) return 0; switch (model) { /* Nehalem compatible, but do not include turbo-ratio limit support */ case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ case INTEL_FAM6_XEON_PHI_KNL: /* PHI - Knights Landing (different MSR definition) */ return 0; default: return 1; } } int has_atom_turbo_ratio_limit(unsigned int family, unsigned int model) { if (has_slv_msrs(family, model)) return 1; return 0; } int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ case INTEL_FAM6_HASWELL_X: /* HSW Xeon */ return 1; default: return 0; } } int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_HASWELL_X: /* HSW Xeon */ return 1; default: return 0; } } int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */ return 1; default: return 0; } } int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_ICELAKE_X: case INTEL_FAM6_SAPPHIRERAPIDS_X: return 1; default: return 0; } } int has_config_tdp(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_HASWELL_L: /* HSW */ case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_G: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_ICELAKE_X: /* ICX */ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */ return 1; default: return 0; } } /* * tcc_offset_bits: * 0: Tcc Offset not supported (Default) * 6: Bit 29:24 of MSR_PLATFORM_INFO * 4: Bit 27:24 of MSR_PLATFORM_INFO */ void check_tcc_offset(int model) { unsigned long long msr; if (!genuine_intel) return; switch (model) { case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_L: case INTEL_FAM6_KABYLAKE: case INTEL_FAM6_ICELAKE_L: case INTEL_FAM6_ICELAKE: case INTEL_FAM6_TIGERLAKE_L: case INTEL_FAM6_TIGERLAKE: case INTEL_FAM6_COMETLAKE: if (!get_msr(base_cpu, MSR_PLATFORM_INFO, &msr)) { msr = (msr >> 30) & 1; if (msr) tcc_offset_bits = 6; } return; default: return; } } static void remove_underbar(char *s) { char *to = s; while (*s) { if (*s != '_') *to++ = *s; s++; } *to = 0; } static void dump_turbo_ratio_info(unsigned int family, unsigned int model) { if (!has_turbo) return; if (has_hsw_turbo_ratio_limit(family, model)) dump_hsw_turbo_ratio_limits(); if (has_ivt_turbo_ratio_limit(family, model)) dump_ivt_turbo_ratio_limits(); if (has_turbo_ratio_limit(family, model)) { dump_turbo_ratio_limits(MSR_TURBO_RATIO_LIMIT, family, model); if (is_hybrid) dump_turbo_ratio_limits(MSR_SECONDARY_TURBO_RATIO_LIMIT, family, model); } if (has_atom_turbo_ratio_limit(family, model)) dump_atom_turbo_ratio_limits(); if (has_knl_turbo_ratio_limit(family, model)) dump_knl_turbo_ratio_limits(); if (has_config_tdp(family, model)) dump_config_tdp(); } static void dump_cstate_pstate_config_info(unsigned int family, unsigned int model) { if (!do_nhm_platform_info) return; dump_nhm_platform_info(); dump_turbo_ratio_info(family, model); dump_nhm_cst_cfg(); } static int read_sysfs_int(char *path) { FILE *input; int retval = -1; input = fopen(path, "r"); if (input == NULL) { if (debug) fprintf(outf, "NSFOD %s\n", path); return (-1); } if (fscanf(input, "%d", &retval) != 1) err(1, "%s: failed to read int from file", path); fclose(input); return (retval); } static void dump_sysfs_file(char *path) { FILE *input; char cpuidle_buf[64]; input = fopen(path, "r"); if (input == NULL) { if (debug) fprintf(outf, "NSFOD %s\n", path); return; } if (!fgets(cpuidle_buf, sizeof(cpuidle_buf), input)) err(1, "%s: failed to read file", path); fclose(input); fprintf(outf, "%s: %s", strrchr(path, '/') + 1, cpuidle_buf); } static void intel_uncore_frequency_probe(void) { int i, j; char path[128]; if (!genuine_intel) return; if (access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00", R_OK)) return; if (!access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/current_freq_khz", R_OK)) BIC_PRESENT(BIC_UNCORE_MHZ); if (quiet) return; for (i = 0; i < topo.num_packages; ++i) { for (j = 0; j < topo.num_die; ++j) { int k, l; sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/min_freq_khz", i, j); k = read_sysfs_int(path); sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/max_freq_khz", i, j); l = read_sysfs_int(path); fprintf(outf, "Uncore Frequency pkg%d die%d: %d - %d MHz ", i, j, k / 1000, l / 1000); sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/initial_min_freq_khz", i, j); k = read_sysfs_int(path); sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/initial_max_freq_khz", i, j); l = read_sysfs_int(path); fprintf(outf, "(%d - %d MHz)\n", k / 1000, l / 1000); } } } static void dump_sysfs_cstate_config(void) { char path[64]; char name_buf[16]; char desc[64]; FILE *input; int state; char *sp; if (access("/sys/devices/system/cpu/cpuidle", R_OK)) { fprintf(outf, "cpuidle not loaded\n"); return; } dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_driver"); dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_governor"); dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_governor_ro"); for (state = 0; state < 10; ++state) { sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state); input = fopen(path, "r"); if (input == NULL) continue; if (!fgets(name_buf, sizeof(name_buf), input)) err(1, "%s: failed to read file", path); /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ sp = strchr(name_buf, '-'); if (!sp) sp = strchrnul(name_buf, '\n'); *sp = '\0'; fclose(input); remove_underbar(name_buf); sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", base_cpu, state); input = fopen(path, "r"); if (input == NULL) continue; if (!fgets(desc, sizeof(desc), input)) err(1, "%s: failed to read file", path); fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); fclose(input); } } static void dump_sysfs_pstate_config(void) { char path[64]; char driver_buf[64]; char governor_buf[64]; FILE *input; int turbo; sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_driver", base_cpu); input = fopen(path, "r"); if (input == NULL) { fprintf(outf, "NSFOD %s\n", path); return; } if (!fgets(driver_buf, sizeof(driver_buf), input)) err(1, "%s: failed to read file", path); fclose(input); sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", base_cpu); input = fopen(path, "r"); if (input == NULL) { fprintf(outf, "NSFOD %s\n", path); return; } if (!fgets(governor_buf, sizeof(governor_buf), input)) err(1, "%s: failed to read file", path); fclose(input); fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); fprintf(outf, "cpu%d: cpufreq governor: %s", base_cpu, governor_buf); sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); input = fopen(path, "r"); if (input != NULL) { if (fscanf(input, "%d", &turbo) != 1) err(1, "%s: failed to parse number from file", path); fprintf(outf, "cpufreq boost: %d\n", turbo); fclose(input); } sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); input = fopen(path, "r"); if (input != NULL) { if (fscanf(input, "%d", &turbo) != 1) err(1, "%s: failed to parse number from file", path); fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); fclose(input); } } /* * print_epb() * Decode the ENERGY_PERF_BIAS MSR */ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) { char *epb_string; int cpu, epb; UNUSED(c); UNUSED(p); if (!has_epb) return 0; cpu = t->cpu_id; /* EPB is per-package */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (cpu_migrate(cpu)) { fprintf(outf, "print_epb: Could not migrate to CPU %d\n", cpu); return -1; } epb = get_epb(cpu); if (epb < 0) return 0; switch (epb) { case ENERGY_PERF_BIAS_PERFORMANCE: epb_string = "performance"; break; case ENERGY_PERF_BIAS_NORMAL: epb_string = "balanced"; break; case ENERGY_PERF_BIAS_POWERSAVE: epb_string = "powersave"; break; default: epb_string = "custom"; break; } fprintf(outf, "cpu%d: EPB: %d (%s)\n", cpu, epb, epb_string); return 0; } /* * print_hwp() * Decode the MSR_HWP_CAPABILITIES */ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; int cpu; UNUSED(c); UNUSED(p); if (!has_hwp) return 0; cpu = t->cpu_id; /* MSR_HWP_CAPABILITIES is per-package */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (cpu_migrate(cpu)) { fprintf(outf, "print_hwp: Could not migrate to CPU %d\n", cpu); return -1; } if (get_msr(cpu, MSR_PM_ENABLE, &msr)) return 0; fprintf(outf, "cpu%d: MSR_PM_ENABLE: 0x%08llx (%sHWP)\n", cpu, msr, (msr & (1 << 0)) ? "" : "No-"); /* MSR_PM_ENABLE[1] == 1 if HWP is enabled and MSRs visible */ if ((msr & (1 << 0)) == 0) return 0; if (get_msr(cpu, MSR_HWP_CAPABILITIES, &msr)) return 0; fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " "(high %d guar %d eff %d low %d)\n", cpu, msr, (unsigned int)HWP_HIGHEST_PERF(msr), (unsigned int)HWP_GUARANTEED_PERF(msr), (unsigned int)HWP_MOSTEFFICIENT_PERF(msr), (unsigned int)HWP_LOWEST_PERF(msr)); if (get_msr(cpu, MSR_HWP_REQUEST, &msr)) return 0; fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n", cpu, msr, (unsigned int)(((msr) >> 0) & 0xff), (unsigned int)(((msr) >> 8) & 0xff), (unsigned int)(((msr) >> 16) & 0xff), (unsigned int)(((msr) >> 24) & 0xff), (unsigned int)(((msr) >> 32) & 0xff3), (unsigned int)(((msr) >> 42) & 0x1)); if (has_hwp_pkg) { if (get_msr(cpu, MSR_HWP_REQUEST_PKG, &msr)) return 0; fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " "(min %d max %d des %d epp 0x%x window 0x%x)\n", cpu, msr, (unsigned int)(((msr) >> 0) & 0xff), (unsigned int)(((msr) >> 8) & 0xff), (unsigned int)(((msr) >> 16) & 0xff), (unsigned int)(((msr) >> 24) & 0xff), (unsigned int)(((msr) >> 32) & 0xff3)); } if (has_hwp_notify) { if (get_msr(cpu, MSR_HWP_INTERRUPT, &msr)) return 0; fprintf(outf, "cpu%d: MSR_HWP_INTERRUPT: 0x%08llx " "(%s_Guaranteed_Perf_Change, %s_Excursion_Min)\n", cpu, msr, ((msr) & 0x1) ? "EN" : "Dis", ((msr) & 0x2) ? "EN" : "Dis"); } if (get_msr(cpu, MSR_HWP_STATUS, &msr)) return 0; fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx " "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n", cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x4) ? "" : "No-"); return 0; } /* * print_perf_limit() */ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; int cpu; UNUSED(c); UNUSED(p); cpu = t->cpu_id; /* per-package */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; if (cpu_migrate(cpu)) { fprintf(outf, "print_perf_limit: Could not migrate to CPU %d\n", cpu); return -1; } if (do_core_perf_limit_reasons) { get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr); fprintf(outf, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); fprintf(outf, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)", (msr & 1 << 15) ? "bit15, " : "", (msr & 1 << 14) ? "bit14, " : "", (msr & 1 << 13) ? "Transitions, " : "", (msr & 1 << 12) ? "MultiCoreTurbo, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : "", (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 9) ? "CorePwr, " : "", (msr & 1 << 8) ? "Amps, " : "", (msr & 1 << 6) ? "VR-Therm, " : "", (msr & 1 << 5) ? "Auto-HWP, " : "", (msr & 1 << 4) ? "Graphics, " : "", (msr & 1 << 2) ? "bit2, " : "", (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 0) ? "PROCHOT, " : ""); fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", (msr & 1 << 31) ? "bit31, " : "", (msr & 1 << 30) ? "bit30, " : "", (msr & 1 << 29) ? "Transitions, " : "", (msr & 1 << 28) ? "MultiCoreTurbo, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : "", (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 25) ? "CorePwr, " : "", (msr & 1 << 24) ? "Amps, " : "", (msr & 1 << 22) ? "VR-Therm, " : "", (msr & 1 << 21) ? "Auto-HWP, " : "", (msr & 1 << 20) ? "Graphics, " : "", (msr & 1 << 18) ? "bit18, " : "", (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 16) ? "PROCHOT, " : ""); } if (do_gfx_perf_limit_reasons) { get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr); fprintf(outf, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); fprintf(outf, " (Active: %s%s%s%s%s%s%s%s)", (msr & 1 << 0) ? "PROCHOT, " : "", (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 4) ? "Graphics, " : "", (msr & 1 << 6) ? "VR-Therm, " : "", (msr & 1 << 8) ? "Amps, " : "", (msr & 1 << 9) ? "GFXPwr, " : "", (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : ""); fprintf(outf, " (Logged: %s%s%s%s%s%s%s%s)\n", (msr & 1 << 16) ? "PROCHOT, " : "", (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 20) ? "Graphics, " : "", (msr & 1 << 22) ? "VR-Therm, " : "", (msr & 1 << 24) ? "Amps, " : "", (msr & 1 << 25) ? "GFXPwr, " : "", (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : ""); } if (do_ring_perf_limit_reasons) { get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr); fprintf(outf, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr); fprintf(outf, " (Active: %s%s%s%s%s%s)", (msr & 1 << 0) ? "PROCHOT, " : "", (msr & 1 << 1) ? "ThermStatus, " : "", (msr & 1 << 6) ? "VR-Therm, " : "", (msr & 1 << 8) ? "Amps, " : "", (msr & 1 << 10) ? "PkgPwrL1, " : "", (msr & 1 << 11) ? "PkgPwrL2, " : ""); fprintf(outf, " (Logged: %s%s%s%s%s%s)\n", (msr & 1 << 16) ? "PROCHOT, " : "", (msr & 1 << 17) ? "ThermStatus, " : "", (msr & 1 << 22) ? "VR-Therm, " : "", (msr & 1 << 24) ? "Amps, " : "", (msr & 1 << 26) ? "PkgPwrL1, " : "", (msr & 1 << 27) ? "PkgPwrL2, " : ""); } return 0; } #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ double get_tdp_intel(unsigned int model) { unsigned long long msr; if (do_rapl & RAPL_PKG_POWER_INFO) if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; switch (model) { case INTEL_FAM6_ATOM_SILVERMONT: case INTEL_FAM6_ATOM_SILVERMONT_D: return 30.0; default: return 135.0; } } double get_tdp_amd(unsigned int family) { UNUSED(family); /* This is the max stock TDP of HEDT/Server Fam17h+ chips */ return 280.0; } /* * rapl_dram_energy_units_probe() * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. */ static double rapl_dram_energy_units_probe(int model, double rapl_energy_units) { /* only called for genuine_intel, family 6 */ switch (model) { case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ case INTEL_FAM6_ICELAKE_X: /* ICX */ return (rapl_dram_energy_units = 15.3 / 1000000); default: return (rapl_energy_units); } } void rapl_probe_intel(unsigned int family, unsigned int model) { unsigned long long msr; unsigned int time_unit; double tdp; if (family != 6) return; switch (model) { case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_IVYBRIDGE: case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_L: /* HSW */ case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_G: /* BDW */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); BIC_PRESENT(BIC_GFX_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_CorWatt); BIC_PRESENT(BIC_GFXWatt); } break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO; if (rapl_joules) BIC_PRESENT(BIC_Pkg_J); else BIC_PRESENT(BIC_PkgWatt); break; case INTEL_FAM6_ATOM_TREMONT: /* EHL */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); BIC_PRESENT(BIC_RAM_J); BIC_PRESENT(BIC_GFX_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_CorWatt); BIC_PRESENT(BIC_RAMWatt); BIC_PRESENT(BIC_GFXWatt); } break; case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ do_rapl = RAPL_PKG | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); if (rapl_joules) BIC_PRESENT(BIC_Pkg_J); else BIC_PRESENT(BIC_PkgWatt); break; case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); BIC_PRESENT(BIC_RAM_J); BIC_PRESENT(BIC_GFX_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_CorWatt); BIC_PRESENT(BIC_RAMWatt); BIC_PRESENT(BIC_GFXWatt); } break; case INTEL_FAM6_HASWELL_X: /* HSX */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_ICELAKE_X: /* ICX */ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_RAM_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_RAMWatt); } break; case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_IVYBRIDGE_X: do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); BIC_PRESENT(BIC_RAM_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_CorWatt); BIC_PRESENT(BIC_RAMWatt); } break; case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ do_rapl = RAPL_PKG | RAPL_CORES; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_CorWatt); } break; case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); BIC_PRESENT(BIC_RAM_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_CorWatt); BIC_PRESENT(BIC_RAMWatt); } break; default: return; } /* units on package 0, verify later other packages match */ if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) return; rapl_power_units = 1.0 / (1 << (msr & 0xF)); if (model == INTEL_FAM6_ATOM_SILVERMONT) rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; else rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); rapl_dram_energy_units = rapl_dram_energy_units_probe(model, rapl_energy_units); time_unit = msr >> 16 & 0xF; if (time_unit == 0) time_unit = 0xA; rapl_time_units = 1.0 / (1 << (time_unit)); tdp = get_tdp_intel(model); rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; if (!quiet) fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); } void rapl_probe_amd(unsigned int family, unsigned int model) { unsigned long long msr; unsigned int eax, ebx, ecx, edx; unsigned int has_rapl = 0; double tdp; UNUSED(model); if (max_extended_level >= 0x80000007) { __cpuid(0x80000007, eax, ebx, ecx, edx); /* RAPL (Fam 17h+) */ has_rapl = edx & (1 << 14); } if (!has_rapl || family < 0x17) return; do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY; if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_CorWatt); } if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr)) return; rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf)); rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f)); rapl_power_units = ldexp(1.0, -(msr & 0xf)); tdp = get_tdp_amd(family); rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; if (!quiet) fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); } /* * rapl_probe() * * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units */ void rapl_probe(unsigned int family, unsigned int model) { if (genuine_intel) rapl_probe_intel(family, model); if (authentic_amd || hygon_genuine) rapl_probe_amd(family, model); } void perf_limit_reasons_probe(unsigned int family, unsigned int model) { if (!genuine_intel) return; if (family != 6) return; switch (model) { case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_L: /* HSW */ case INTEL_FAM6_HASWELL_G: /* HSW */ do_gfx_perf_limit_reasons = 1; /* FALLTHRU */ case INTEL_FAM6_HASWELL_X: /* HSX */ do_core_perf_limit_reasons = 1; do_ring_perf_limit_reasons = 1; default: return; } } void automatic_cstate_conversion_probe(unsigned int family, unsigned int model) { if (family != 6) return; switch (model) { case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_SKYLAKE_X: has_automatic_cstate_conversion = 1; } } void prewake_cstate_probe(unsigned int family, unsigned int model) { if (is_icx(family, model) || is_spr(family, model)) dis_cstate_prewake = 1; } int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; unsigned int dts, dts2; int cpu; UNUSED(c); UNUSED(p); if (!(do_dts || do_ptm)) return 0; cpu = t->cpu_id; /* DTS is per-core, no need to print for each thread */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; if (cpu_migrate(cpu)) { fprintf(outf, "print_thermal: Could not migrate to CPU %d\n", cpu); return -1; } if (do_ptm && (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) { if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) return 0; dts = (msr >> 16) & 0x7F; fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", cpu, msr, tj_max - dts); if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) return 0; dts = (msr >> 16) & 0x7F; dts2 = (msr >> 8) & 0x7F; fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tj_max - dts, tj_max - dts2); } if (do_dts && debug) { unsigned int resolution; if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) return 0; dts = (msr >> 16) & 0x7F; resolution = (msr >> 27) & 0xF; fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", cpu, msr, tj_max - dts, resolution); if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) return 0; dts = (msr >> 16) & 0x7F; dts2 = (msr >> 8) & 0x7F; fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tj_max - dts, tj_max - dts2); } return 0; } void print_power_limit_msr(int cpu, unsigned long long msr, char *label) { fprintf(outf, "cpu%d: %s: %sabled (%0.3f Watts, %f sec, clamp %sabled)\n", cpu, label, ((msr >> 15) & 1) ? "EN" : "DIS", ((msr >> 0) & 0x7FFF) * rapl_power_units, (1.0 + (((msr >> 22) & 0x3) / 4.0)) * (1 << ((msr >> 17) & 0x1F)) * rapl_time_units, (((msr >> 16) & 1) ? "EN" : "DIS")); return; } int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; const char *msr_name; int cpu; UNUSED(c); UNUSED(p); if (!do_rapl) return 0; /* RAPL counters are per package, so print only for 1st thread/package */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; cpu = t->cpu_id; if (cpu_migrate(cpu)) { fprintf(outf, "print_rapl: Could not migrate to CPU %d\n", cpu); return -1; } if (do_rapl & RAPL_AMD_F17H) { msr_name = "MSR_RAPL_PWR_UNIT"; if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr)) return -1; } else { msr_name = "MSR_RAPL_POWER_UNIT"; if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) return -1; } fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr, rapl_power_units, rapl_energy_units, rapl_time_units); if (do_rapl & RAPL_PKG_POWER_INFO) { if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr)) return -5; fprintf(outf, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", cpu, msr, ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); } if (do_rapl & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 63) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "PKG Limit #1"); fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%0.3f Watts, %f* sec, clamp %sabled)\n", cpu, ((msr >> 47) & 1) ? "EN" : "DIS", ((msr >> 32) & 0x7FFF) * rapl_power_units, (1.0 + (((msr >> 54) & 0x3) / 4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units, ((msr >> 48) & 1) ? "EN" : "DIS"); if (get_msr(cpu, MSR_VR_CURRENT_CONFIG, &msr)) return -9; fprintf(outf, "cpu%d: MSR_VR_CURRENT_CONFIG: 0x%08llx\n", cpu, msr); fprintf(outf, "cpu%d: PKG Limit #4: %f Watts (%slocked)\n", cpu, ((msr >> 0) & 0x1FFF) * rapl_power_units, (msr >> 31) & 1 ? "" : "UN"); } if (do_rapl & RAPL_DRAM_POWER_INFO) { if (get_msr(cpu, MSR_DRAM_POWER_INFO, &msr)) return -6; fprintf(outf, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n", cpu, msr, ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 16) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units, ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units); } if (do_rapl & RAPL_DRAM) { if (get_msr(cpu, MSR_DRAM_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "DRAM Limit"); } if (do_rapl & RAPL_CORE_POLICY) { if (get_msr(cpu, MSR_PP0_POLICY, &msr)) return -7; fprintf(outf, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF); } if (do_rapl & RAPL_CORES_POWER_LIMIT) { if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "Cores Limit"); } if (do_rapl & RAPL_GFX) { if (get_msr(cpu, MSR_PP1_POLICY, &msr)) return -8; fprintf(outf, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu, msr & 0xF); if (get_msr(cpu, MSR_PP1_POWER_LIMIT, &msr)) return -9; fprintf(outf, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n", cpu, msr, (msr >> 31) & 1 ? "" : "UN"); print_power_limit_msr(cpu, msr, "GFX Limit"); } return 0; } /* * SNB adds support for additional MSRs: * * MSR_PKG_C7_RESIDENCY 0x000003fa * MSR_CORE_C7_RESIDENCY 0x000003fe * MSR_PKG_C2_RESIDENCY 0x0000060d */ int has_snb_msrs(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_SANDYBRIDGE: case INTEL_FAM6_SANDYBRIDGE_X: case INTEL_FAM6_IVYBRIDGE: /* IVB */ case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ case INTEL_FAM6_HASWELL: /* HSW */ case INTEL_FAM6_HASWELL_X: /* HSW */ case INTEL_FAM6_HASWELL_L: /* HSW */ case INTEL_FAM6_HASWELL_G: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_BROADWELL_G: /* BDW */ case INTEL_FAM6_BROADWELL_X: /* BDX */ case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_ICELAKE_X: /* ICX */ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ case INTEL_FAM6_ATOM_TREMONT: /* EHL */ case INTEL_FAM6_ATOM_TREMONT_D: /* JVL */ return 1; } return 0; } /* * HSW ULT added support for C8/C9/C10 MSRs: * * MSR_PKG_C8_RESIDENCY 0x00000630 * MSR_PKG_C9_RESIDENCY 0x00000631 * MSR_PKG_C10_RESIDENCY 0x00000632 * * MSR_PKGC8_IRTL 0x00000633 * MSR_PKGC9_IRTL 0x00000634 * MSR_PKGC10_IRTL 0x00000635 * */ int has_c8910_msrs(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_HASWELL_L: /* HSW */ case INTEL_FAM6_BROADWELL: /* BDW */ case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: case INTEL_FAM6_ATOM_TREMONT: /* EHL */ return 1; } return 0; } /* * SKL adds support for additional MSRS: * * MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 * MSR_PKG_ANY_CORE_C0_RES 0x00000659 * MSR_PKG_ANY_GFXE_C0_RES 0x0000065A * MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B */ int has_skl_msrs(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_SKYLAKE_L: /* SKL */ case INTEL_FAM6_CANNONLAKE_L: /* CNL */ return 1; } return 0; } int is_slm(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */ return 1; } return 0; } int is_knl(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_XEON_PHI_KNL: /* KNL */ return 1; } return 0; } int is_cnl(unsigned int family, unsigned int model) { if (!genuine_intel) return 0; if (family != 6) return 0; switch (model) { case INTEL_FAM6_CANNONLAKE_L: /* CNL */ return 1; } return 0; } unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) { if (is_knl(family, model)) return 1024; return 1; } #define SLM_BCLK_FREQS 5 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0 }; double slm_bclk(void) { unsigned long long msr = 3; unsigned int i; double freq; if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) fprintf(outf, "SLM BCLK: unknown\n"); i = msr & 0xf; if (i >= SLM_BCLK_FREQS) { fprintf(outf, "SLM BCLK[%d] invalid\n", i); i = 3; } freq = slm_freq_table[i]; if (!quiet) fprintf(outf, "SLM BCLK: %.1f Mhz\n", freq); return freq; } double discover_bclk(unsigned int family, unsigned int model) { if (has_snb_msrs(family, model) || is_knl(family, model)) return 100.00; else if (is_slm(family, model)) return slm_bclk(); else return 133.33; } int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned int eax, ebx, ecx, edx; UNUSED(c); UNUSED(p); if (!genuine_intel) return 0; if (cpu_migrate(t->cpu_id)) { fprintf(outf, "Could not migrate to CPU %d\n", t->cpu_id); return -1; } if (max_level < 0x1a) return 0; __cpuid(0x1a, eax, ebx, ecx, edx); eax = (eax >> 24) & 0xFF; if (eax == 0x20) t->is_atom = true; return 0; } /* * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where * the Thermal Control Circuit (TCC) activates. * This is usually equal to tjMax. * * Older processors do not have this MSR, so there we guess, * but also allow cmdline over-ride with -T. * * Several MSR temperature values are in units of degrees-C * below this value, including the Digital Thermal Sensor (DTS), * Package Thermal Management Sensor (PTM), and thermal event thresholds. */ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; unsigned int tcc_default, tcc_offset; int cpu; UNUSED(c); UNUSED(p); /* tj_max is used only for dts or ptm */ if (!(do_dts || do_ptm)) return 0; /* this is a per-package concept */ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) return 0; cpu = t->cpu_id; if (cpu_migrate(cpu)) { fprintf(outf, "Could not migrate to CPU %d\n", cpu); return -1; } if (tj_max_override != 0) { tj_max = tj_max_override; fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n", cpu, tj_max); return 0; } /* Temperature Target MSR is Nehalem and newer only */ if (!do_nhm_platform_info) goto guess; if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) goto guess; tcc_default = (msr >> 16) & 0xFF; if (!quiet) { switch (tcc_offset_bits) { case 4: tcc_offset = (msr >> 24) & 0xF; fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n", cpu, msr, tcc_default - tcc_offset, tcc_default, tcc_offset); break; case 6: tcc_offset = (msr >> 24) & 0x3F; fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n", cpu, msr, tcc_default - tcc_offset, tcc_default, tcc_offset); break; default: fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n", cpu, msr, tcc_default); break; } } if (!tcc_default) goto guess; tj_max = tcc_default; return 0; guess: tj_max = TJMAX_DEFAULT; fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n", cpu, tj_max); return 0; } void decode_feature_control_msr(void) { unsigned long long msr; if (!get_msr(base_cpu, MSR_IA32_FEAT_CTL, &msr)) fprintf(outf, "cpu%d: MSR_IA32_FEATURE_CONTROL: 0x%08llx (%sLocked %s)\n", base_cpu, msr, msr & FEAT_CTL_LOCKED ? "" : "UN-", msr & (1 << 18) ? "SGX" : ""); } void decode_misc_enable_msr(void) { unsigned long long msr; if (!genuine_intel) return; if (!get_msr(base_cpu, MSR_IA32_MISC_ENABLE, &msr)) fprintf(outf, "cpu%d: MSR_IA32_MISC_ENABLE: 0x%08llx (%sTCC %sEIST %sMWAIT %sPREFETCH %sTURBO)\n", base_cpu, msr, msr & MSR_IA32_MISC_ENABLE_TM1 ? "" : "No-", msr & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP ? "" : "No-", msr & MSR_IA32_MISC_ENABLE_MWAIT ? "" : "No-", msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "", msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : ""); } void decode_misc_feature_control(void) { unsigned long long msr; if (!has_misc_feature_control) return; if (!get_msr(base_cpu, MSR_MISC_FEATURE_CONTROL, &msr)) fprintf(outf, "cpu%d: MSR_MISC_FEATURE_CONTROL: 0x%08llx (%sL2-Prefetch %sL2-Prefetch-pair %sL1-Prefetch %sL1-IP-Prefetch)\n", base_cpu, msr, msr & (0 << 0) ? "No-" : "", msr & (1 << 0) ? "No-" : "", msr & (2 << 0) ? "No-" : "", msr & (3 << 0) ? "No-" : ""); } /* * Decode MSR_MISC_PWR_MGMT * * Decode the bits according to the Nehalem documentation * bit[0] seems to continue to have same meaning going forward * bit[1] less so... */ void decode_misc_pwr_mgmt_msr(void) { unsigned long long msr; if (!do_nhm_platform_info) return; if (no_MSR_MISC_PWR_MGMT) return; if (!get_msr(base_cpu, MSR_MISC_PWR_MGMT, &msr)) fprintf(outf, "cpu%d: MSR_MISC_PWR_MGMT: 0x%08llx (%sable-EIST_Coordination %sable-EPB %sable-OOB)\n", base_cpu, msr, msr & (1 << 0) ? "DIS" : "EN", msr & (1 << 1) ? "EN" : "DIS", msr & (1 << 8) ? "EN" : "DIS"); } /* * Decode MSR_CC6_DEMOTION_POLICY_CONFIG, MSR_MC6_DEMOTION_POLICY_CONFIG * * This MSRs are present on Silvermont processors, * Intel Atom processor E3000 series (Baytrail), and friends. */ void decode_c6_demotion_policy_msr(void) { unsigned long long msr; if (!get_msr(base_cpu, MSR_CC6_DEMOTION_POLICY_CONFIG, &msr)) fprintf(outf, "cpu%d: MSR_CC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-CC6-Demotion)\n", base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); if (!get_msr(base_cpu, MSR_MC6_DEMOTION_POLICY_CONFIG, &msr)) fprintf(outf, "cpu%d: MSR_MC6_DEMOTION_POLICY_CONFIG: 0x%08llx (%sable-MC6-Demotion)\n", base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS"); } /* * When models are the same, for the purpose of turbostat, reuse */ unsigned int intel_model_duplicates(unsigned int model) { switch (model) { case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */ case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */ case 0x1F: /* Core i7 and i5 Processor - Nehalem */ case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */ case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */ return INTEL_FAM6_NEHALEM; case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */ case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */ return INTEL_FAM6_NEHALEM_EX; case INTEL_FAM6_XEON_PHI_KNM: return INTEL_FAM6_XEON_PHI_KNL; case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_BROADWELL_D: /* BDX-DE */ return INTEL_FAM6_BROADWELL_X; case INTEL_FAM6_SKYLAKE_L: case INTEL_FAM6_SKYLAKE: case INTEL_FAM6_KABYLAKE_L: case INTEL_FAM6_KABYLAKE: case INTEL_FAM6_COMETLAKE_L: case INTEL_FAM6_COMETLAKE: return INTEL_FAM6_SKYLAKE_L; case INTEL_FAM6_ICELAKE_L: case INTEL_FAM6_ICELAKE_NNPI: case INTEL_FAM6_TIGERLAKE_L: case INTEL_FAM6_TIGERLAKE: case INTEL_FAM6_ROCKETLAKE: case INTEL_FAM6_LAKEFIELD: case INTEL_FAM6_ALDERLAKE: case INTEL_FAM6_ALDERLAKE_L: case INTEL_FAM6_ATOM_GRACEMONT: case INTEL_FAM6_RAPTORLAKE: case INTEL_FAM6_RAPTORLAKE_P: case INTEL_FAM6_RAPTORLAKE_S: case INTEL_FAM6_METEORLAKE: case INTEL_FAM6_METEORLAKE_L: return INTEL_FAM6_CANNONLAKE_L; case INTEL_FAM6_ATOM_TREMONT_L: return INTEL_FAM6_ATOM_TREMONT; case INTEL_FAM6_ICELAKE_D: return INTEL_FAM6_ICELAKE_X; case INTEL_FAM6_EMERALDRAPIDS_X: return INTEL_FAM6_SAPPHIRERAPIDS_X; } return model; } void print_dev_latency(void) { char *path = "/dev/cpu_dma_latency"; int fd; int value; int retval; fd = open(path, O_RDONLY); if (fd < 0) { warnx("capget(CAP_SYS_ADMIN) failed, try \"# setcap cap_sys_admin=ep %s\"", progname); return; } retval = read(fd, (void *)&value, sizeof(int)); if (retval != sizeof(int)) { warn("read failed %s", path); close(fd); return; } fprintf(outf, "/dev/cpu_dma_latency: %d usec (%s)\n", value, value == 2000000000 ? "default" : "constrained"); close(fd); } /* * Linux-perf manages the HW instructions-retired counter * by enabling when requested, and hiding rollover */ void linux_perf_init(void) { if (!BIC_IS_ENABLED(BIC_IPC)) return; if (access("/proc/sys/kernel/perf_event_paranoid", F_OK)) return; fd_instr_count_percpu = calloc(topo.max_cpu_num + 1, sizeof(int)); if (fd_instr_count_percpu == NULL) err(-1, "calloc fd_instr_count_percpu"); BIC_PRESENT(BIC_IPC); } void process_cpuid() { unsigned int eax, ebx, ecx, edx; unsigned int fms, family, model, stepping, ecx_flags, edx_flags; unsigned long long ucode_patch = 0; eax = ebx = ecx = edx = 0; __cpuid(0, max_level, ebx, ecx, edx); if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69) genuine_intel = 1; else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) authentic_amd = 1; else if (ebx == 0x6f677948 && ecx == 0x656e6975 && edx == 0x6e65476e) hygon_genuine = 1; if (!quiet) fprintf(outf, "CPUID(0): %.4s%.4s%.4s 0x%x CPUID levels\n", (char *)&ebx, (char *)&edx, (char *)&ecx, max_level); __cpuid(1, fms, ebx, ecx, edx); family = (fms >> 8) & 0xf; model = (fms >> 4) & 0xf; stepping = fms & 0xf; if (family == 0xf) family += (fms >> 20) & 0xff; if (family >= 6) model += ((fms >> 16) & 0xf) << 4; ecx_flags = ecx; edx_flags = edx; if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch)) warnx("get_msr(UCODE)"); /* * check max extended function levels of CPUID. * This is needed to check for invariant TSC. * This check is valid for both Intel and AMD. */ ebx = ecx = edx = 0; __cpuid(0x80000000, max_extended_level, ebx, ecx, edx); if (!quiet) { fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d) microcode 0x%x\n", family, model, stepping, family, model, stepping, (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF)); fprintf(outf, "CPUID(0x80000000): max_extended_levels: 0x%x\n", max_extended_level); fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n", ecx_flags & (1 << 0) ? "SSE3" : "-", ecx_flags & (1 << 3) ? "MONITOR" : "-", ecx_flags & (1 << 6) ? "SMX" : "-", ecx_flags & (1 << 7) ? "EIST" : "-", ecx_flags & (1 << 8) ? "TM2" : "-", edx_flags & (1 << 4) ? "TSC" : "-", edx_flags & (1 << 5) ? "MSR" : "-", edx_flags & (1 << 22) ? "ACPI-TM" : "-", edx_flags & (1 << 28) ? "HT" : "-", edx_flags & (1 << 29) ? "TM" : "-"); } if (genuine_intel) { model_orig = model; model = intel_model_duplicates(model); } if (!(edx_flags & (1 << 5))) errx(1, "CPUID: no MSR"); if (max_extended_level >= 0x80000007) { /* * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8 * this check is valid for both Intel and AMD */ __cpuid(0x80000007, eax, ebx, ecx, edx); has_invariant_tsc = edx & (1 << 8); } /* * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0 * this check is valid for both Intel and AMD */ __cpuid(0x6, eax, ebx, ecx, edx); has_aperf = ecx & (1 << 0); if (has_aperf) { BIC_PRESENT(BIC_Avg_MHz); BIC_PRESENT(BIC_Busy); BIC_PRESENT(BIC_Bzy_MHz); } do_dts = eax & (1 << 0); if (do_dts) BIC_PRESENT(BIC_CoreTmp); has_turbo = eax & (1 << 1); do_ptm = eax & (1 << 6); if (do_ptm) BIC_PRESENT(BIC_PkgTmp); has_hwp = eax & (1 << 7); has_hwp_notify = eax & (1 << 8); has_hwp_activity_window = eax & (1 << 9); has_hwp_epp = eax & (1 << 10); has_hwp_pkg = eax & (1 << 11); has_epb = ecx & (1 << 3); if (!quiet) fprintf(outf, "CPUID(6): %sAPERF, %sTURBO, %sDTS, %sPTM, %sHWP, " "%sHWPnotify, %sHWPwindow, %sHWPepp, %sHWPpkg, %sEPB\n", has_aperf ? "" : "No-", has_turbo ? "" : "No-", do_dts ? "" : "No-", do_ptm ? "" : "No-", has_hwp ? "" : "No-", has_hwp_notify ? "" : "No-", has_hwp_activity_window ? "" : "No-", has_hwp_epp ? "" : "No-", has_hwp_pkg ? "" : "No-", has_epb ? "" : "No-"); if (!quiet) decode_misc_enable_msr(); if (max_level >= 0x7 && !quiet) { int has_sgx; ecx = 0; __cpuid_count(0x7, 0, eax, ebx, ecx, edx); has_sgx = ebx & (1 << 2); is_hybrid = edx & (1 << 15); fprintf(outf, "CPUID(7): %sSGX %sHybrid\n", has_sgx ? "" : "No-", is_hybrid ? "" : "No-"); if (has_sgx) decode_feature_control_msr(); } if (max_level >= 0x15) { unsigned int eax_crystal; unsigned int ebx_tsc; /* * CPUID 15H TSC/Crystal ratio, possibly Crystal Hz */ eax_crystal = ebx_tsc = crystal_hz = edx = 0; __cpuid(0x15, eax_crystal, ebx_tsc, crystal_hz, edx); if (ebx_tsc != 0) { if (!quiet && (ebx != 0)) fprintf(outf, "CPUID(0x15): eax_crystal: %d ebx_tsc: %d ecx_crystal_hz: %d\n", eax_crystal, ebx_tsc, crystal_hz); if (crystal_hz == 0) switch (model) { case INTEL_FAM6_SKYLAKE_L: /* SKL */ crystal_hz = 24000000; /* 24.0 MHz */ break; case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */ crystal_hz = 25000000; /* 25.0 MHz */ break; case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: crystal_hz = 19200000; /* 19.2 MHz */ break; default: crystal_hz = 0; } if (crystal_hz) { tsc_hz = (unsigned long long)crystal_hz *ebx_tsc / eax_crystal; if (!quiet) fprintf(outf, "TSC: %lld MHz (%d Hz * %d / %d / 1000000)\n", tsc_hz / 1000000, crystal_hz, ebx_tsc, eax_crystal); } } } if (max_level >= 0x16) { unsigned int base_mhz, max_mhz, bus_mhz, edx; /* * CPUID 16H Base MHz, Max MHz, Bus MHz */ base_mhz = max_mhz = bus_mhz = edx = 0; __cpuid(0x16, base_mhz, max_mhz, bus_mhz, edx); if (!quiet) fprintf(outf, "CPUID(0x16): base_mhz: %d max_mhz: %d bus_mhz: %d\n", base_mhz, max_mhz, bus_mhz); } if (has_aperf) aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); BIC_PRESENT(BIC_IRQ); BIC_PRESENT(BIC_TSC_MHz); if (probe_nhm_msrs(family, model)) { do_nhm_platform_info = 1; BIC_PRESENT(BIC_CPU_c1); BIC_PRESENT(BIC_CPU_c3); BIC_PRESENT(BIC_CPU_c6); BIC_PRESENT(BIC_SMI); } do_snb_cstates = has_snb_msrs(family, model); if (do_snb_cstates) BIC_PRESENT(BIC_CPU_c7); do_irtl_snb = has_snb_msrs(family, model); if (do_snb_cstates && (pkg_cstate_limit >= PCL__2)) BIC_PRESENT(BIC_Pkgpc2); if (pkg_cstate_limit >= PCL__3) BIC_PRESENT(BIC_Pkgpc3); if (pkg_cstate_limit >= PCL__6) BIC_PRESENT(BIC_Pkgpc6); if (do_snb_cstates && (pkg_cstate_limit >= PCL__7)) BIC_PRESENT(BIC_Pkgpc7); if (has_slv_msrs(family, model)) { BIC_NOT_PRESENT(BIC_Pkgpc2); BIC_NOT_PRESENT(BIC_Pkgpc3); BIC_PRESENT(BIC_Pkgpc6); BIC_NOT_PRESENT(BIC_Pkgpc7); BIC_PRESENT(BIC_Mod_c6); use_c1_residency_msr = 1; } if (is_jvl(family, model)) { BIC_NOT_PRESENT(BIC_CPU_c3); BIC_NOT_PRESENT(BIC_CPU_c7); BIC_NOT_PRESENT(BIC_Pkgpc2); BIC_NOT_PRESENT(BIC_Pkgpc3); BIC_NOT_PRESENT(BIC_Pkgpc6); BIC_NOT_PRESENT(BIC_Pkgpc7); } if (is_dnv(family, model)) { BIC_PRESENT(BIC_CPU_c1); BIC_NOT_PRESENT(BIC_CPU_c3); BIC_NOT_PRESENT(BIC_Pkgpc3); BIC_NOT_PRESENT(BIC_CPU_c7); BIC_NOT_PRESENT(BIC_Pkgpc7); use_c1_residency_msr = 1; } if (is_skx(family, model) || is_icx(family, model) || is_spr(family, model)) { BIC_NOT_PRESENT(BIC_CPU_c3); BIC_NOT_PRESENT(BIC_Pkgpc3); BIC_NOT_PRESENT(BIC_CPU_c7); BIC_NOT_PRESENT(BIC_Pkgpc7); } if (is_bdx(family, model)) { BIC_NOT_PRESENT(BIC_CPU_c7); BIC_NOT_PRESENT(BIC_Pkgpc7); } if (has_c8910_msrs(family, model)) { if (pkg_cstate_limit >= PCL__8) BIC_PRESENT(BIC_Pkgpc8); if (pkg_cstate_limit >= PCL__9) BIC_PRESENT(BIC_Pkgpc9); if (pkg_cstate_limit >= PCL_10) BIC_PRESENT(BIC_Pkgpc10); } do_irtl_hsw = has_c8910_msrs(family, model); if (has_skl_msrs(family, model)) { BIC_PRESENT(BIC_Totl_c0); BIC_PRESENT(BIC_Any_c0); BIC_PRESENT(BIC_GFX_c0); BIC_PRESENT(BIC_CPUGFX); } do_slm_cstates = is_slm(family, model); do_knl_cstates = is_knl(family, model); if (do_slm_cstates || do_knl_cstates || is_cnl(family, model) || is_ehl(family, model)) BIC_NOT_PRESENT(BIC_CPU_c3); if (!quiet) decode_misc_pwr_mgmt_msr(); if (!quiet && has_slv_msrs(family, model)) decode_c6_demotion_policy_msr(); rapl_probe(family, model); perf_limit_reasons_probe(family, model); automatic_cstate_conversion_probe(family, model); check_tcc_offset(model_orig); if (!quiet) dump_cstate_pstate_config_info(family, model); intel_uncore_frequency_probe(); if (!quiet) print_dev_latency(); if (!quiet) dump_sysfs_cstate_config(); if (!quiet) dump_sysfs_pstate_config(); if (has_skl_msrs(family, model) || is_ehl(family, model)) calculate_tsc_tweak(); if (!access("/sys/class/drm/card0/power/rc6_residency_ms", R_OK)) BIC_PRESENT(BIC_GFX_rc6); if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK)) BIC_PRESENT(BIC_GFXMHz); if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_act_freq_mhz", R_OK)) BIC_PRESENT(BIC_GFXACTMHz); if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", R_OK)) BIC_PRESENT(BIC_CPU_LPI); else BIC_NOT_PRESENT(BIC_CPU_LPI); if (!access("/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count", R_OK)) BIC_PRESENT(BIC_CORE_THROT_CNT); else BIC_NOT_PRESENT(BIC_CORE_THROT_CNT); if (!access(sys_lpi_file_sysfs, R_OK)) { sys_lpi_file = sys_lpi_file_sysfs; BIC_PRESENT(BIC_SYS_LPI); } else if (!access(sys_lpi_file_debugfs, R_OK)) { sys_lpi_file = sys_lpi_file_debugfs; BIC_PRESENT(BIC_SYS_LPI); } else { sys_lpi_file_sysfs = NULL; BIC_NOT_PRESENT(BIC_SYS_LPI); } if (!quiet) decode_misc_feature_control(); return; } /* * in /dev/cpu/ return success for names that are numbers * ie. filter out ".", "..", "microcode". */ int dir_filter(const struct dirent *dirp) { if (isdigit(dirp->d_name[0])) return 1; else return 0; } void topology_probe() { int i; int max_core_id = 0; int max_package_id = 0; int max_die_id = 0; int max_siblings = 0; /* Initialize num_cpus, max_cpu_num */ set_max_cpu_num(); topo.num_cpus = 0; for_all_proc_cpus(count_cpus); if (!summary_only && topo.num_cpus > 1) BIC_PRESENT(BIC_CPU); if (debug > 1) fprintf(outf, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num); cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology)); if (cpus == NULL) err(1, "calloc cpus"); /* * Allocate and initialize cpu_present_set */ cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1)); if (cpu_present_set == NULL) err(3, "CPU_ALLOC"); cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(cpu_present_setsize, cpu_present_set); for_all_proc_cpus(mark_cpu_present); /* * Validate that all cpus in cpu_subset are also in cpu_present_set */ for (i = 0; i < CPU_SUBSET_MAXCPUS; ++i) { if (CPU_ISSET_S(i, cpu_subset_size, cpu_subset)) if (!CPU_ISSET_S(i, cpu_present_setsize, cpu_present_set)) err(1, "cpu%d not present", i); } /* * Allocate and initialize cpu_affinity_set */ cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1)); if (cpu_affinity_set == NULL) err(3, "CPU_ALLOC"); cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); for_all_proc_cpus(init_thread_id); /* * For online cpus * find max_core_id, max_package_id */ for (i = 0; i <= topo.max_cpu_num; ++i) { int siblings; if (cpu_is_not_present(i)) { if (debug > 1) fprintf(outf, "cpu%d NOT PRESENT\n", i); continue; } cpus[i].logical_cpu_id = i; /* get package information */ cpus[i].physical_package_id = get_physical_package_id(i); if (cpus[i].physical_package_id > max_package_id) max_package_id = cpus[i].physical_package_id; /* get die information */ cpus[i].die_id = get_die_id(i); if (cpus[i].die_id > max_die_id) max_die_id = cpus[i].die_id; /* get numa node information */ cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); if (cpus[i].physical_node_id > topo.max_node_num) topo.max_node_num = cpus[i].physical_node_id; /* get core information */ cpus[i].physical_core_id = get_core_id(i); if (cpus[i].physical_core_id > max_core_id) max_core_id = cpus[i].physical_core_id; /* get thread information */ siblings = get_thread_siblings(&cpus[i]); if (siblings > max_siblings) max_siblings = siblings; if (cpus[i].thread_id == 0) topo.num_cores++; } topo.cores_per_node = max_core_id + 1; if (debug > 1) fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", max_core_id, topo.cores_per_node); if (!summary_only && topo.cores_per_node > 1) BIC_PRESENT(BIC_Core); topo.num_die = max_die_id + 1; if (debug > 1) fprintf(outf, "max_die_id %d, sizing for %d die\n", max_die_id, topo.num_die); if (!summary_only && topo.num_die > 1) BIC_PRESENT(BIC_Die); topo.num_packages = max_package_id + 1; if (debug > 1) fprintf(outf, "max_package_id %d, sizing for %d packages\n", max_package_id, topo.num_packages); if (!summary_only && topo.num_packages > 1) BIC_PRESENT(BIC_Package); set_node_data(); if (debug > 1) fprintf(outf, "nodes_per_pkg %d\n", topo.nodes_per_pkg); if (!summary_only && topo.nodes_per_pkg > 1) BIC_PRESENT(BIC_Node); topo.threads_per_core = max_siblings; if (debug > 1) fprintf(outf, "max_siblings %d\n", max_siblings); if (debug < 1) return; for (i = 0; i <= topo.max_cpu_num; ++i) { if (cpu_is_not_present(i)) continue; fprintf(outf, "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n", i, cpus[i].physical_package_id, cpus[i].die_id, cpus[i].physical_node_id, cpus[i].logical_node_id, cpus[i].physical_core_id, cpus[i].thread_id); } } void allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p) { int i; int num_cores = topo.cores_per_node * topo.nodes_per_pkg * topo.num_packages; int num_threads = topo.threads_per_core * num_cores; *t = calloc(num_threads, sizeof(struct thread_data)); if (*t == NULL) goto error; for (i = 0; i < num_threads; i++) (*t)[i].cpu_id = -1; *c = calloc(num_cores, sizeof(struct core_data)); if (*c == NULL) goto error; for (i = 0; i < num_cores; i++) (*c)[i].core_id = -1; *p = calloc(topo.num_packages, sizeof(struct pkg_data)); if (*p == NULL) goto error; for (i = 0; i < topo.num_packages; i++) (*p)[i].package_id = i; return; error: err(1, "calloc counters"); } /* * init_counter() * * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE */ void init_counter(struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base, int cpu_id) { int pkg_id = cpus[cpu_id].physical_package_id; int node_id = cpus[cpu_id].logical_node_id; int core_id = cpus[cpu_id].physical_core_id; int thread_id = cpus[cpu_id].thread_id; struct thread_data *t; struct core_data *c; struct pkg_data *p; /* Workaround for systems where physical_node_id==-1 * and logical_node_id==(-1 - topo.num_cpus) */ if (node_id < 0) node_id = 0; t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id); c = GET_CORE(core_base, core_id, node_id, pkg_id); p = GET_PKG(pkg_base, pkg_id); t->cpu_id = cpu_id; if (thread_id == 0) { t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; if (cpu_is_first_core_in_package(cpu_id)) t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; } c->core_id = core_id; p->package_id = pkg_id; } int initialize_counters(int cpu_id) { init_counter(EVEN_COUNTERS, cpu_id); init_counter(ODD_COUNTERS, cpu_id); return 0; } void allocate_output_buffer() { output_buffer = calloc(1, (1 + topo.num_cpus) * 2048); outp = output_buffer; if (outp == NULL) err(-1, "calloc output buffer"); } void allocate_fd_percpu(void) { fd_percpu = calloc(topo.max_cpu_num + 1, sizeof(int)); if (fd_percpu == NULL) err(-1, "calloc fd_percpu"); } void allocate_irq_buffers(void) { irq_column_2_cpu = calloc(topo.num_cpus, sizeof(int)); if (irq_column_2_cpu == NULL) err(-1, "calloc %d", topo.num_cpus); irqs_per_cpu = calloc(topo.max_cpu_num + 1, sizeof(int)); if (irqs_per_cpu == NULL) err(-1, "calloc %d", topo.max_cpu_num + 1); } void setup_all_buffers(void) { topology_probe(); allocate_irq_buffers(); allocate_fd_percpu(); allocate_counters(&thread_even, &core_even, &package_even); allocate_counters(&thread_odd, &core_odd, &package_odd); allocate_output_buffer(); for_all_proc_cpus(initialize_counters); } void set_base_cpu(void) { base_cpu = sched_getcpu(); if (base_cpu < 0) err(-ENODEV, "No valid cpus found"); if (debug > 1) fprintf(outf, "base_cpu = %d\n", base_cpu); } void turbostat_init() { setup_all_buffers(); set_base_cpu(); check_dev_msr(); check_permissions(); process_cpuid(); linux_perf_init(); if (!quiet) for_all_cpus(print_hwp, ODD_COUNTERS); if (!quiet) for_all_cpus(print_epb, ODD_COUNTERS); if (!quiet) for_all_cpus(print_perf_limit, ODD_COUNTERS); if (!quiet) for_all_cpus(print_rapl, ODD_COUNTERS); for_all_cpus(set_temperature_target, ODD_COUNTERS); for_all_cpus(get_cpu_type, ODD_COUNTERS); for_all_cpus(get_cpu_type, EVEN_COUNTERS); if (!quiet) for_all_cpus(print_thermal, ODD_COUNTERS); if (!quiet && do_irtl_snb) print_irtl(); if (DO_BIC(BIC_IPC)) (void)get_instr_count_fd(base_cpu); } int fork_it(char **argv) { pid_t child_pid; int status; snapshot_proc_sysfs_files(); status = for_all_cpus(get_counters, EVEN_COUNTERS); first_counter_read = 0; if (status) exit(status); /* clear affinity side-effect of get_counters() */ sched_setaffinity(0, cpu_present_setsize, cpu_present_set); gettimeofday(&tv_even, (struct timezone *)NULL); child_pid = fork(); if (!child_pid) { /* child */ execvp(argv[0], argv); err(errno, "exec %s", argv[0]); } else { /* parent */ if (child_pid == -1) err(1, "fork"); signal(SIGINT, SIG_IGN); signal(SIGQUIT, SIG_IGN); if (waitpid(child_pid, &status, 0) == -1) err(status, "waitpid"); if (WIFEXITED(status)) status = WEXITSTATUS(status); } /* * n.b. fork_it() does not check for errors from for_all_cpus() * because re-starting is problematic when forking */ snapshot_proc_sysfs_files(); for_all_cpus(get_counters, ODD_COUNTERS); gettimeofday(&tv_odd, (struct timezone *)NULL); timersub(&tv_odd, &tv_even, &tv_delta); if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) fprintf(outf, "%s: Counter reset detected\n", progname); else { compute_average(EVEN_COUNTERS); format_all_counters(EVEN_COUNTERS); } fprintf(outf, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec / 1000000.0); flush_output_stderr(); return status; } int get_and_dump_counters(void) { int status; snapshot_proc_sysfs_files(); status = for_all_cpus(get_counters, ODD_COUNTERS); if (status) return status; status = for_all_cpus(dump_counters, ODD_COUNTERS); if (status) return status; flush_output_stdout(); return status; } void print_version() { fprintf(outf, "turbostat version 2023.03.17 - Len Brown <[email protected]>\n"); } #define COMMAND_LINE_SIZE 2048 void print_bootcmd(void) { char bootcmd[COMMAND_LINE_SIZE]; FILE *fp; int ret; memset(bootcmd, 0, COMMAND_LINE_SIZE); fp = fopen("/proc/cmdline", "r"); if (!fp) return; ret = fread(bootcmd, sizeof(char), COMMAND_LINE_SIZE - 1, fp); if (ret) { bootcmd[ret] = '\0'; /* the last character is already '\n' */ fprintf(outf, "Kernel command line: %s", bootcmd); } fclose(fp); } int add_counter(unsigned int msr_num, char *path, char *name, unsigned int width, enum counter_scope scope, enum counter_type type, enum counter_format format, int flags) { struct msr_counter *msrp; msrp = calloc(1, sizeof(struct msr_counter)); if (msrp == NULL) { perror("calloc"); exit(1); } msrp->msr_num = msr_num; strncpy(msrp->name, name, NAME_BYTES - 1); if (path) strncpy(msrp->path, path, PATH_BYTES - 1); msrp->width = width; msrp->type = type; msrp->format = format; msrp->flags = flags; switch (scope) { case SCOPE_CPU: msrp->next = sys.tp; sys.tp = msrp; sys.added_thread_counters++; if (sys.added_thread_counters > MAX_ADDED_THREAD_COUNTERS) { fprintf(stderr, "exceeded max %d added thread counters\n", MAX_ADDED_COUNTERS); exit(-1); } break; case SCOPE_CORE: msrp->next = sys.cp; sys.cp = msrp; sys.added_core_counters++; if (sys.added_core_counters > MAX_ADDED_COUNTERS) { fprintf(stderr, "exceeded max %d added core counters\n", MAX_ADDED_COUNTERS); exit(-1); } break; case SCOPE_PACKAGE: msrp->next = sys.pp; sys.pp = msrp; sys.added_package_counters++; if (sys.added_package_counters > MAX_ADDED_COUNTERS) { fprintf(stderr, "exceeded max %d added package counters\n", MAX_ADDED_COUNTERS); exit(-1); } break; } return 0; } void parse_add_command(char *add_command) { int msr_num = 0; char *path = NULL; char name_buffer[NAME_BYTES] = ""; int width = 64; int fail = 0; enum counter_scope scope = SCOPE_CPU; enum counter_type type = COUNTER_CYCLES; enum counter_format format = FORMAT_DELTA; while (add_command) { if (sscanf(add_command, "msr0x%x", &msr_num) == 1) goto next; if (sscanf(add_command, "msr%d", &msr_num) == 1) goto next; if (*add_command == '/') { path = add_command; goto next; } if (sscanf(add_command, "u%d", &width) == 1) { if ((width == 32) || (width == 64)) goto next; width = 64; } if (!strncmp(add_command, "cpu", strlen("cpu"))) { scope = SCOPE_CPU; goto next; } if (!strncmp(add_command, "core", strlen("core"))) { scope = SCOPE_CORE; goto next; } if (!strncmp(add_command, "package", strlen("package"))) { scope = SCOPE_PACKAGE; goto next; } if (!strncmp(add_command, "cycles", strlen("cycles"))) { type = COUNTER_CYCLES; goto next; } if (!strncmp(add_command, "seconds", strlen("seconds"))) { type = COUNTER_SECONDS; goto next; } if (!strncmp(add_command, "usec", strlen("usec"))) { type = COUNTER_USEC; goto next; } if (!strncmp(add_command, "raw", strlen("raw"))) { format = FORMAT_RAW; goto next; } if (!strncmp(add_command, "delta", strlen("delta"))) { format = FORMAT_DELTA; goto next; } if (!strncmp(add_command, "percent", strlen("percent"))) { format = FORMAT_PERCENT; goto next; } if (sscanf(add_command, "%18s,%*s", name_buffer) == 1) { /* 18 < NAME_BYTES */ char *eos; eos = strchr(name_buffer, ','); if (eos) *eos = '\0'; goto next; } next: add_command = strchr(add_command, ','); if (add_command) { *add_command = '\0'; add_command++; } } if ((msr_num == 0) && (path == NULL)) { fprintf(stderr, "--add: (msrDDD | msr0xXXX | /path_to_counter ) required\n"); fail++; } /* generate default column header */ if (*name_buffer == '\0') { if (width == 32) sprintf(name_buffer, "M0x%x%s", msr_num, format == FORMAT_PERCENT ? "%" : ""); else sprintf(name_buffer, "M0X%x%s", msr_num, format == FORMAT_PERCENT ? "%" : ""); } if (add_counter(msr_num, path, name_buffer, width, scope, type, format, 0)) fail++; if (fail) { help(); exit(1); } } int is_deferred_add(char *name) { int i; for (i = 0; i < deferred_add_index; ++i) if (!strcmp(name, deferred_add_names[i])) return 1; return 0; } int is_deferred_skip(char *name) { int i; for (i = 0; i < deferred_skip_index; ++i) if (!strcmp(name, deferred_skip_names[i])) return 1; return 0; } void probe_sysfs(void) { char path[64]; char name_buf[16]; FILE *input; int state; char *sp; for (state = 10; state >= 0; --state) { sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state); input = fopen(path, "r"); if (input == NULL) continue; if (!fgets(name_buf, sizeof(name_buf), input)) err(1, "%s: failed to read file", path); /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ sp = strchr(name_buf, '-'); if (!sp) sp = strchrnul(name_buf, '\n'); *sp = '%'; *(sp + 1) = '\0'; remove_underbar(name_buf); fclose(input); sprintf(path, "cpuidle/state%d/time", state); if (!DO_BIC(BIC_sysfs) && !is_deferred_add(name_buf)) continue; if (is_deferred_skip(name_buf)) continue; add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_USEC, FORMAT_PERCENT, SYSFS_PERCPU); } for (state = 10; state >= 0; --state) { sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state); input = fopen(path, "r"); if (input == NULL) continue; if (!fgets(name_buf, sizeof(name_buf), input)) err(1, "%s: failed to read file", path); /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ sp = strchr(name_buf, '-'); if (!sp) sp = strchrnul(name_buf, '\n'); *sp = '\0'; fclose(input); remove_underbar(name_buf); sprintf(path, "cpuidle/state%d/usage", state); if (!DO_BIC(BIC_sysfs) && !is_deferred_add(name_buf)) continue; if (is_deferred_skip(name_buf)) continue; add_counter(0, path, name_buf, 64, SCOPE_CPU, COUNTER_ITEMS, FORMAT_DELTA, SYSFS_PERCPU); } } /* * parse cpuset with following syntax * 1,2,4..6,8-10 and set bits in cpu_subset */ void parse_cpu_command(char *optarg) { unsigned int start, end; char *next; if (!strcmp(optarg, "core")) { if (cpu_subset) goto error; show_core_only++; return; } if (!strcmp(optarg, "package")) { if (cpu_subset) goto error; show_pkg_only++; return; } if (show_core_only || show_pkg_only) goto error; cpu_subset = CPU_ALLOC(CPU_SUBSET_MAXCPUS); if (cpu_subset == NULL) err(3, "CPU_ALLOC"); cpu_subset_size = CPU_ALLOC_SIZE(CPU_SUBSET_MAXCPUS); CPU_ZERO_S(cpu_subset_size, cpu_subset); next = optarg; while (next && *next) { if (*next == '-') /* no negative cpu numbers */ goto error; start = strtoul(next, &next, 10); if (start >= CPU_SUBSET_MAXCPUS) goto error; CPU_SET_S(start, cpu_subset_size, cpu_subset); if (*next == '\0') break; if (*next == ',') { next += 1; continue; } if (*next == '-') { next += 1; /* start range */ } else if (*next == '.') { next += 1; if (*next == '.') next += 1; /* start range */ else goto error; } end = strtoul(next, &next, 10); if (end <= start) goto error; while (++start <= end) { if (start >= CPU_SUBSET_MAXCPUS) goto error; CPU_SET_S(start, cpu_subset_size, cpu_subset); } if (*next == ',') next += 1; else if (*next != '\0') goto error; } return; error: fprintf(stderr, "\"--cpu %s\" malformed\n", optarg); help(); exit(-1); } void cmdline(int argc, char **argv) { int opt; int option_index = 0; static struct option long_options[] = { { "add", required_argument, 0, 'a' }, { "cpu", required_argument, 0, 'c' }, { "Dump", no_argument, 0, 'D' }, { "debug", no_argument, 0, 'd' }, /* internal, not documented */ { "enable", required_argument, 0, 'e' }, { "interval", required_argument, 0, 'i' }, { "IPC", no_argument, 0, 'I' }, { "num_iterations", required_argument, 0, 'n' }, { "header_iterations", required_argument, 0, 'N' }, { "help", no_argument, 0, 'h' }, { "hide", required_argument, 0, 'H' }, // meh, -h taken by --help { "Joules", no_argument, 0, 'J' }, { "list", no_argument, 0, 'l' }, { "out", required_argument, 0, 'o' }, { "quiet", no_argument, 0, 'q' }, { "show", required_argument, 0, 's' }, { "Summary", no_argument, 0, 'S' }, { "TCC", required_argument, 0, 'T' }, { "version", no_argument, 0, 'v' }, { 0, 0, 0, 0 } }; progname = argv[0]; while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:o:qST:v", long_options, &option_index)) != -1) { switch (opt) { case 'a': parse_add_command(optarg); break; case 'c': parse_cpu_command(optarg); break; case 'D': dump_only++; break; case 'e': /* --enable specified counter */ bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST); break; case 'd': debug++; ENABLE_BIC(BIC_DISABLED_BY_DEFAULT); break; case 'H': /* * --hide: do not show those specified * multiple invocations simply clear more bits in enabled mask */ bic_enabled &= ~bic_lookup(optarg, HIDE_LIST); break; case 'h': default: help(); exit(1); case 'i': { double interval = strtod(optarg, NULL); if (interval < 0.001) { fprintf(outf, "interval %f seconds is too small\n", interval); exit(2); } interval_tv.tv_sec = interval_ts.tv_sec = interval; interval_tv.tv_usec = (interval - interval_tv.tv_sec) * 1000000; interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000; } break; case 'J': rapl_joules++; break; case 'l': ENABLE_BIC(BIC_DISABLED_BY_DEFAULT); list_header_only++; quiet++; break; case 'o': outf = fopen_or_die(optarg, "w"); break; case 'q': quiet = 1; break; case 'n': num_iterations = strtod(optarg, NULL); if (num_iterations <= 0) { fprintf(outf, "iterations %d should be positive number\n", num_iterations); exit(2); } break; case 'N': header_iterations = strtod(optarg, NULL); if (header_iterations <= 0) { fprintf(outf, "iterations %d should be positive number\n", header_iterations); exit(2); } break; case 's': /* * --show: show only those specified * The 1st invocation will clear and replace the enabled mask * subsequent invocations can add to it. */ if (shown == 0) bic_enabled = bic_lookup(optarg, SHOW_LIST); else bic_enabled |= bic_lookup(optarg, SHOW_LIST); shown = 1; break; case 'S': summary_only++; break; case 'T': tj_max_override = atoi(optarg); break; case 'v': print_version(); exit(0); break; } } } int main(int argc, char **argv) { outf = stderr; cmdline(argc, argv); if (!quiet) { print_version(); print_bootcmd(); } probe_sysfs(); turbostat_init(); msr_sum_record(); /* dump counters and exit */ if (dump_only) return get_and_dump_counters(); /* list header and exit */ if (list_header_only) { print_header(","); flush_output_stdout(); return 0; } /* * if any params left, it must be a command to fork */ if (argc - optind) return fork_it(argv + optind); else turbostat_loop(); return 0; }
linux-master
tools/power/x86/turbostat/turbostat.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Speed Select -- Enumerate and control features for TPMI Interface * Copyright (c) 2022 Intel Corporation. */ #include <linux/isst_if.h> #include "isst.h" int tpmi_process_ioctl(int ioctl_no, void *info) { const char *pathname = "/dev/isst_interface"; int fd; if (is_debug_enabled()) { debug_printf("Issue IOCTL: "); switch (ioctl_no) { case ISST_IF_CORE_POWER_STATE: debug_printf("ISST_IF_CORE_POWER_STATE\n"); break; case ISST_IF_CLOS_PARAM: debug_printf("ISST_IF_CLOS_PARAM\n"); break; case ISST_IF_CLOS_ASSOC: debug_printf("ISST_IF_CLOS_ASSOC\n"); break; case ISST_IF_PERF_LEVELS: debug_printf("ISST_IF_PERF_LEVELS\n"); break; case ISST_IF_PERF_SET_LEVEL: debug_printf("ISST_IF_PERF_SET_LEVEL\n"); break; case ISST_IF_PERF_SET_FEATURE: debug_printf("ISST_IF_PERF_SET_FEATURE\n"); break; case ISST_IF_GET_PERF_LEVEL_INFO: debug_printf("ISST_IF_GET_PERF_LEVEL_INFO\n"); break; case ISST_IF_GET_PERF_LEVEL_CPU_MASK: debug_printf("ISST_IF_GET_PERF_LEVEL_CPU_MASK\n"); break; case ISST_IF_GET_BASE_FREQ_INFO: debug_printf("ISST_IF_GET_BASE_FREQ_INFO\n"); break; case ISST_IF_GET_BASE_FREQ_CPU_MASK: debug_printf("ISST_IF_GET_BASE_FREQ_CPU_MASK\n"); break; case ISST_IF_GET_TURBO_FREQ_INFO: debug_printf("ISST_IF_GET_TURBO_FREQ_INFO\n"); break; case ISST_IF_COUNT_TPMI_INSTANCES: debug_printf("ISST_IF_COUNT_TPMI_INSTANCES\n"); break; default: debug_printf("%d\n", ioctl_no); break; } } fd = open(pathname, O_RDWR); if (fd < 0) return -1; if (ioctl(fd, ioctl_no, info) == -1) { debug_printf("IOCTL %d Failed\n", ioctl_no); close(fd); return -1; } close(fd); return 0; } static int tpmi_get_disp_freq_multiplier(void) { return 1; } static int tpmi_get_trl_max_levels(void) { return TRL_MAX_LEVELS; } static char *tpmi_get_trl_level_name(int level) { switch (level) { case 0: return "level-0"; case 1: return "level-1"; case 2: return "level-2"; case 3: return "level-3"; case 4: return "level-4"; case 5: return "level-5"; case 6: return "level-6"; case 7: return "level-7"; default: return NULL; } } static void tpmi_update_platform_param(enum isst_platform_param param, int value) { /* No params need to be updated for now */ } static int tpmi_is_punit_valid(struct isst_id *id) { struct isst_tpmi_instance_count info; int ret; if (id->punit < 0) return 0; info.socket_id = id->pkg; ret = tpmi_process_ioctl(ISST_IF_COUNT_TPMI_INSTANCES, &info); if (ret == -1) return 0; if (info.valid_mask & BIT(id->punit)) return 1; return 0; } static int tpmi_read_pm_config(struct isst_id *id, int *cp_state, int *cp_cap) { struct isst_core_power info; int ret; info.get_set = 0; info.socket_id = id->pkg; info.power_domain_id = id->punit; ret = tpmi_process_ioctl(ISST_IF_CORE_POWER_STATE, &info); if (ret == -1) return ret; *cp_state = info.enable; *cp_cap = info.supported; return 0; } int tpmi_get_config_levels(struct isst_id *id, struct isst_pkg_ctdp *pkg_dev) { struct isst_perf_level_info info; int ret; info.socket_id = id->pkg; info.power_domain_id = id->punit; ret = tpmi_process_ioctl(ISST_IF_PERF_LEVELS, &info); if (ret == -1) return ret; pkg_dev->version = info.feature_rev; pkg_dev->levels = info.max_level; pkg_dev->locked = info.locked; pkg_dev->current_level = info.current_level; pkg_dev->locked = info.locked; pkg_dev->enabled = info.enabled; return 0; } static int tpmi_get_ctdp_control(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { struct isst_core_power core_power_info; struct isst_perf_level_info info; int level_mask; int ret; info.socket_id = id->pkg; info.power_domain_id = id->punit; ret = tpmi_process_ioctl(ISST_IF_PERF_LEVELS, &info); if (ret == -1) return -1; if (config_index != 0xff) level_mask = 1 << config_index; else level_mask = config_index; if (!(info.level_mask & level_mask)) return -1; ctdp_level->fact_support = info.sst_tf_support; ctdp_level->pbf_support = info.sst_bf_support; ctdp_level->fact_enabled = !!(info.feature_state & BIT(1)); ctdp_level->pbf_enabled = !!(info.feature_state & BIT(0)); core_power_info.get_set = 0; core_power_info.socket_id = id->pkg; core_power_info.power_domain_id = id->punit; ret = tpmi_process_ioctl(ISST_IF_CORE_POWER_STATE, &core_power_info); if (ret == -1) return ret; ctdp_level->sst_cp_support = core_power_info.supported; ctdp_level->sst_cp_enabled = core_power_info.enable; debug_printf ("cpu:%d CONFIG_TDP_GET_TDP_CONTROL fact_support:%d pbf_support: %d fact_enabled:%d pbf_enabled:%d\n", id->cpu, ctdp_level->fact_support, ctdp_level->pbf_support, ctdp_level->fact_enabled, ctdp_level->pbf_enabled); return 0; } static int tpmi_get_tdp_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { struct isst_perf_level_data_info info; int ret; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = config_index; ret = tpmi_process_ioctl(ISST_IF_GET_PERF_LEVEL_INFO, &info); if (ret == -1) return ret; ctdp_level->pkg_tdp = info.thermal_design_power_w; ctdp_level->tdp_ratio = info.tdp_ratio; ctdp_level->sse_p1 = info.base_freq_mhz; ctdp_level->avx2_p1 = info.base_freq_avx2_mhz; ctdp_level->avx512_p1 = info.base_freq_avx512_mhz; ctdp_level->amx_p1 = info.base_freq_amx_mhz; ctdp_level->t_proc_hot = info.tjunction_max_c; ctdp_level->mem_freq = info.max_memory_freq_mhz; ctdp_level->cooling_type = info.cooling_type; ctdp_level->uncore_p0 = info.p0_fabric_freq_mhz; ctdp_level->uncore_p1 = info.p1_fabric_freq_mhz; ctdp_level->uncore_pm = info.pm_fabric_freq_mhz; debug_printf ("cpu:%d ctdp:%d CONFIG_TDP_GET_TDP_INFO tdp_ratio:%d pkg_tdp:%d ctdp_level->t_proc_hot:%d\n", id->cpu, config_index, ctdp_level->tdp_ratio, ctdp_level->pkg_tdp, ctdp_level->t_proc_hot); return 0; } static int tpmi_get_pwr_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { /* TBD */ ctdp_level->pkg_max_power = 0; ctdp_level->pkg_min_power = 0; debug_printf ("cpu:%d ctdp:%d CONFIG_TDP_GET_PWR_INFO pkg_max_power:%d pkg_min_power:%d\n", id->cpu, config_index, ctdp_level->pkg_max_power, ctdp_level->pkg_min_power); return 0; } int tpmi_get_coremask_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { struct isst_perf_level_cpu_mask info; int ret, cpu_count; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = config_index; info.punit_cpu_map = 1; ret = tpmi_process_ioctl(ISST_IF_GET_PERF_LEVEL_CPU_MASK, &info); if (ret == -1) return ret; set_cpu_mask_from_punit_coremask(id, info.mask, ctdp_level->core_cpumask_size, ctdp_level->core_cpumask, &cpu_count); ctdp_level->cpu_count = cpu_count; debug_printf("cpu:%d ctdp:%d core_mask ino cpu count:%d\n", id->cpu, config_index, ctdp_level->cpu_count); return 0; } static int tpmi_get_get_trls(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { struct isst_perf_level_data_info info; int ret, i, j; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = config_index; ret = tpmi_process_ioctl(ISST_IF_GET_PERF_LEVEL_INFO, &info); if (ret == -1) return ret; if (info.max_buckets > TRL_MAX_BUCKETS) info.max_buckets = TRL_MAX_BUCKETS; if (info.max_trl_levels > TRL_MAX_LEVELS) info.max_trl_levels = TRL_MAX_LEVELS; for (i = 0; i < info.max_trl_levels; ++i) for (j = 0; j < info.max_buckets; ++j) ctdp_level->trl_ratios[i][j] = info.trl_freq_mhz[i][j]; return 0; } static int tpmi_get_get_trl(struct isst_id *id, int level, int config_index, int *trl) { struct isst_pkg_ctdp_level_info ctdp_level; int ret, i; ret = tpmi_get_get_trls(id, config_index, &ctdp_level); if (ret) return ret; /* FIX ME: Just return for level 0 */ for (i = 0; i < 8; ++i) trl[i] = ctdp_level.trl_ratios[0][i]; return 0; } static int tpmi_get_trl_bucket_info(struct isst_id *id, int config_index, unsigned long long *buckets_info) { struct isst_perf_level_data_info info; unsigned char *mask = (unsigned char *)buckets_info; int ret, i; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = config_index; ret = tpmi_process_ioctl(ISST_IF_GET_PERF_LEVEL_INFO, &info); if (ret == -1) return ret; if (info.max_buckets > TRL_MAX_BUCKETS) info.max_buckets = TRL_MAX_BUCKETS; for (i = 0; i < info.max_buckets; ++i) mask[i] = info.bucket_core_counts[i]; debug_printf("cpu:%d TRL bucket info: 0x%llx\n", id->cpu, *buckets_info); return 0; } static int tpmi_set_tdp_level(struct isst_id *id, int tdp_level) { struct isst_perf_level_control info; int ret; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = tdp_level; ret = tpmi_process_ioctl(ISST_IF_PERF_SET_LEVEL, &info); if (ret == -1) return ret; return 0; } static int _pbf_get_coremask_info(struct isst_id *id, int config_index, struct isst_pbf_info *pbf_info) { struct isst_perf_level_cpu_mask info; int ret, cpu_count; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = config_index; info.punit_cpu_map = 1; ret = tpmi_process_ioctl(ISST_IF_GET_BASE_FREQ_CPU_MASK, &info); if (ret == -1) return ret; set_cpu_mask_from_punit_coremask(id, info.mask, pbf_info->core_cpumask_size, pbf_info->core_cpumask, &cpu_count); debug_printf("cpu:%d ctdp:%d pbf core_mask info cpu count:%d\n", id->cpu, config_index, cpu_count); return 0; } static int tpmi_get_pbf_info(struct isst_id *id, int level, struct isst_pbf_info *pbf_info) { struct isst_base_freq_info info; int ret; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = level; ret = tpmi_process_ioctl(ISST_IF_GET_BASE_FREQ_INFO, &info); if (ret == -1) return ret; pbf_info->p1_low = info.low_base_freq_mhz; pbf_info->p1_high = info.high_base_freq_mhz; pbf_info->tdp = info.thermal_design_power_w; pbf_info->t_prochot = info.tjunction_max_c; debug_printf("cpu:%d ctdp:%d pbf info:%d:%d:%d:%d\n", id->cpu, level, pbf_info->p1_low, pbf_info->p1_high, pbf_info->tdp, pbf_info->t_prochot); return _pbf_get_coremask_info(id, level, pbf_info); } static int tpmi_set_pbf_fact_status(struct isst_id *id, int pbf, int enable) { struct isst_pkg_ctdp pkg_dev; struct isst_pkg_ctdp_level_info ctdp_level; int current_level; struct isst_perf_feature_control info; int ret; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) debug_printf("cpu:%d No support for dynamic ISST\n", id->cpu); current_level = pkg_dev.current_level; ret = isst_get_ctdp_control(id, current_level, &ctdp_level); if (ret) return ret; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.feature = 0; if (pbf) { if (ctdp_level.fact_enabled) info.feature |= BIT(1); if (enable) info.feature |= BIT(0); else info.feature &= ~BIT(0); } else { if (enable && !ctdp_level.sst_cp_enabled) isst_display_error_info_message(0, "Make sure to execute before: core-power enable", 0, 0); if (ctdp_level.pbf_enabled) info.feature |= BIT(0); if (enable) info.feature |= BIT(1); else info.feature &= ~BIT(1); } ret = tpmi_process_ioctl(ISST_IF_PERF_SET_FEATURE, &info); if (ret == -1) return ret; return 0; } static int tpmi_get_fact_info(struct isst_id *id, int level, int fact_bucket, struct isst_fact_info *fact_info) { struct isst_turbo_freq_info info; int i, j; int ret; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = level; ret = tpmi_process_ioctl(ISST_IF_GET_TURBO_FREQ_INFO, &info); if (ret == -1) return ret; for (i = 0; i < info.max_clip_freqs; ++i) fact_info->lp_ratios[i] = info.lp_clip_freq_mhz[i]; if (info.max_buckets > TRL_MAX_BUCKETS) info.max_buckets = TRL_MAX_BUCKETS; if (info.max_trl_levels > TRL_MAX_LEVELS) info.max_trl_levels = TRL_MAX_LEVELS; for (i = 0; i < info.max_trl_levels; ++i) { for (j = 0; j < info.max_buckets; ++j) fact_info->bucket_info[j].hp_ratios[i] = info.trl_freq_mhz[i][j]; } for (i = 0; i < info.max_buckets; ++i) fact_info->bucket_info[i].hp_cores = info.bucket_core_counts[i]; return 0; } static void _set_uncore_min_max(struct isst_id *id, int max, int freq) { DIR *dir; FILE *filep; struct dirent *entry; char buffer[512]; unsigned int tmp_id; int ret; dir = opendir("/sys/devices/system/cpu/intel_uncore_frequency/"); if (!dir) return; while ((entry = readdir(dir)) != NULL ) { /* Check domain_id */ snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/intel_uncore_frequency/%s/domain_id", entry->d_name); filep = fopen(buffer, "r"); if (!filep) goto end; ret = fscanf(filep, "%u", &tmp_id); fclose(filep); if (ret != 1) goto end; if (tmp_id != id->punit) continue; /* Check package_id */ snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/intel_uncore_frequency/%s/package_id", entry->d_name); filep = fopen(buffer, "r"); if (!filep) goto end; ret = fscanf(filep, "%u", &tmp_id); fclose(filep); if (ret != 1) goto end; if (tmp_id != id->pkg) continue; /* Found the right sysfs path, adjust and quit */ if (max) snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/intel_uncore_frequency/%s/max_freq_khz", entry->d_name); else snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/intel_uncore_frequency/%s/min_freq_khz", entry->d_name); filep = fopen(buffer, "w"); if (!filep) goto end; fprintf(filep, "%d\n", freq); fclose(filep); break; } end: closedir(dir); } static void tpmi_adjust_uncore_freq(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { struct isst_perf_level_data_info info; int ret; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = config_index; ret = tpmi_process_ioctl(ISST_IF_GET_PERF_LEVEL_INFO, &info); if (ret == -1) return; ctdp_level->uncore_p0 = info.p0_fabric_freq_mhz; ctdp_level->uncore_p1 = info.p1_fabric_freq_mhz; ctdp_level->uncore_pm = info.pm_fabric_freq_mhz; if (ctdp_level->uncore_pm) _set_uncore_min_max(id, 0, ctdp_level->uncore_pm * 100000); if (ctdp_level->uncore_p0) _set_uncore_min_max(id, 1, ctdp_level->uncore_p0 * 100000); return; } static int tpmi_get_clos_information(struct isst_id *id, int *enable, int *type) { struct isst_core_power info; int ret; info.get_set = 0; info.socket_id = id->pkg; info.power_domain_id = id->punit; ret = tpmi_process_ioctl(ISST_IF_CORE_POWER_STATE, &info); if (ret == -1) return ret; *enable = info.enable; *type = info.priority_type; return 0; } static int tpmi_pm_qos_config(struct isst_id *id, int enable_clos, int priority_type) { struct isst_core_power info; int i, ret, saved_punit; info.get_set = 1; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.enable = enable_clos; info.priority_type = priority_type; saved_punit = id->punit; /* Set for all other dies also. This is per package setting */ for (i = 0; i < MAX_PUNIT_PER_DIE; i++) { id->punit = i; if (isst_is_punit_valid(id)) { info.power_domain_id = i; ret = tpmi_process_ioctl(ISST_IF_CORE_POWER_STATE, &info); if (ret == -1) { id->punit = saved_punit; return ret; } } } id->punit = saved_punit; return 0; } int tpmi_pm_get_clos(struct isst_id *id, int clos, struct isst_clos_config *clos_config) { struct isst_clos_param info; int ret; info.get_set = 0; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.clos = clos; ret = tpmi_process_ioctl(ISST_IF_CLOS_PARAM, &info); if (ret == -1) return ret; clos_config->epp = 0; clos_config->clos_prop_prio = info.prop_prio; clos_config->clos_min = info.min_freq_mhz; clos_config->clos_max = info.max_freq_mhz; clos_config->clos_desired = 0; debug_printf("cpu:%d clos:%d min:%d max:%d\n", id->cpu, clos, clos_config->clos_min, clos_config->clos_max); return 0; } int tpmi_set_clos(struct isst_id *id, int clos, struct isst_clos_config *clos_config) { struct isst_clos_param info; int i, ret, saved_punit; info.get_set = 1; info.socket_id = id->pkg; info.power_domain_id = id->punit; info.clos = clos; info.prop_prio = clos_config->clos_prop_prio; info.min_freq_mhz = clos_config->clos_min; info.max_freq_mhz = clos_config->clos_max; if (info.min_freq_mhz <= 0xff) info.min_freq_mhz *= 100; if (info.max_freq_mhz <= 0xff) info.max_freq_mhz *= 100; saved_punit = id->punit; /* Set for all other dies also. This is per package setting */ for (i = 0; i < MAX_PUNIT_PER_DIE; i++) { id->punit = i; if (isst_is_punit_valid(id)) { info.power_domain_id = i; ret = tpmi_process_ioctl(ISST_IF_CLOS_PARAM, &info); if (ret == -1) { id->punit = saved_punit; return ret; } } } id->punit = saved_punit; debug_printf("set cpu:%d clos:%d min:%d max:%d\n", id->cpu, clos, clos_config->clos_min, clos_config->clos_max); return 0; } static int tpmi_clos_get_assoc_status(struct isst_id *id, int *clos_id) { struct isst_if_clos_assoc_cmds assoc_cmds; int ret; assoc_cmds.cmd_count = 1; assoc_cmds.get_set = 0; assoc_cmds.punit_cpu_map = 1; assoc_cmds.assoc_info[0].logical_cpu = find_phy_core_num(id->cpu); assoc_cmds.assoc_info[0].socket_id = id->pkg; assoc_cmds.assoc_info[0].power_domain_id = id->punit; ret = tpmi_process_ioctl(ISST_IF_CLOS_ASSOC, &assoc_cmds); if (ret == -1) return ret; *clos_id = assoc_cmds.assoc_info[0].clos; return 0; } static int tpmi_clos_associate(struct isst_id *id, int clos_id) { struct isst_if_clos_assoc_cmds assoc_cmds; int ret; assoc_cmds.cmd_count = 1; assoc_cmds.get_set = 1; assoc_cmds.punit_cpu_map = 1; assoc_cmds.assoc_info[0].logical_cpu = find_phy_core_num(id->cpu); assoc_cmds.assoc_info[0].clos = clos_id; assoc_cmds.assoc_info[0].socket_id = id->pkg; assoc_cmds.assoc_info[0].power_domain_id = id->punit; ret = tpmi_process_ioctl(ISST_IF_CLOS_ASSOC, &assoc_cmds); if (ret == -1) return ret; return 0; } static struct isst_platform_ops tpmi_ops = { .get_disp_freq_multiplier = tpmi_get_disp_freq_multiplier, .get_trl_max_levels = tpmi_get_trl_max_levels, .get_trl_level_name = tpmi_get_trl_level_name, .update_platform_param = tpmi_update_platform_param, .is_punit_valid = tpmi_is_punit_valid, .read_pm_config = tpmi_read_pm_config, .get_config_levels = tpmi_get_config_levels, .get_ctdp_control = tpmi_get_ctdp_control, .get_tdp_info = tpmi_get_tdp_info, .get_pwr_info = tpmi_get_pwr_info, .get_coremask_info = tpmi_get_coremask_info, .get_get_trl = tpmi_get_get_trl, .get_get_trls = tpmi_get_get_trls, .get_trl_bucket_info = tpmi_get_trl_bucket_info, .set_tdp_level = tpmi_set_tdp_level, .get_pbf_info = tpmi_get_pbf_info, .set_pbf_fact_status = tpmi_set_pbf_fact_status, .get_fact_info = tpmi_get_fact_info, .adjust_uncore_freq = tpmi_adjust_uncore_freq, .get_clos_information = tpmi_get_clos_information, .pm_qos_config = tpmi_pm_qos_config, .pm_get_clos = tpmi_pm_get_clos, .set_clos = tpmi_set_clos, .clos_get_assoc_status = tpmi_clos_get_assoc_status, .clos_associate = tpmi_clos_associate, }; struct isst_platform_ops *tpmi_get_platform_ops(void) { return &tpmi_ops; }
linux-master
tools/power/x86/intel-speed-select/isst-core-tpmi.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Speed Select -- Enumerate and control features * Copyright (c) 2019 Intel Corporation. */ #include "isst.h" static struct isst_platform_ops *isst_ops; #define CHECK_CB(_name) \ do { \ if (!isst_ops || !isst_ops->_name) { \ fprintf(stderr, "Invalid ops\n"); \ exit(0); \ } \ } while (0) int isst_set_platform_ops(int api_version) { switch (api_version) { case 1: isst_ops = mbox_get_platform_ops(); break; case 2: isst_ops = tpmi_get_platform_ops(); break; default: isst_ops = NULL; break; } if (!isst_ops) return -1; return 0; } void isst_update_platform_param(enum isst_platform_param param, int value) { CHECK_CB(update_platform_param); isst_ops->update_platform_param(param, value); } int isst_get_disp_freq_multiplier(void) { CHECK_CB(get_disp_freq_multiplier); return isst_ops->get_disp_freq_multiplier(); } int isst_get_trl_max_levels(void) { CHECK_CB(get_trl_max_levels); return isst_ops->get_trl_max_levels(); } char *isst_get_trl_level_name(int level) { CHECK_CB(get_trl_level_name); return isst_ops->get_trl_level_name(level); } int isst_is_punit_valid(struct isst_id *id) { CHECK_CB(is_punit_valid); return isst_ops->is_punit_valid(id); } int isst_send_msr_command(unsigned int cpu, unsigned int msr, int write, unsigned long long *req_resp) { struct isst_if_msr_cmds msr_cmds; const char *pathname = "/dev/isst_interface"; FILE *outf = get_output_file(); int fd; fd = open(pathname, O_RDWR); if (fd < 0) err(-1, "%s open failed", pathname); msr_cmds.cmd_count = 1; msr_cmds.msr_cmd[0].logical_cpu = cpu; msr_cmds.msr_cmd[0].msr = msr; msr_cmds.msr_cmd[0].read_write = write; if (write) msr_cmds.msr_cmd[0].data = *req_resp; if (ioctl(fd, ISST_IF_MSR_COMMAND, &msr_cmds) == -1) { perror("ISST_IF_MSR_COMMAND"); fprintf(outf, "Error: msr_cmd cpu:%d msr:%x read_write:%d\n", cpu, msr, write); } else { if (!write) *req_resp = msr_cmds.msr_cmd[0].data; debug_printf( "msr_cmd response: cpu:%d msr:%x rd_write:%x resp:%llx %llx\n", cpu, msr, write, *req_resp, msr_cmds.msr_cmd[0].data); } close(fd); return 0; } int isst_read_pm_config(struct isst_id *id, int *cp_state, int *cp_cap) { CHECK_CB(read_pm_config); return isst_ops->read_pm_config(id, cp_state, cp_cap); } int isst_get_ctdp_levels(struct isst_id *id, struct isst_pkg_ctdp *pkg_dev) { CHECK_CB(get_config_levels); return isst_ops->get_config_levels(id, pkg_dev); } int isst_get_ctdp_control(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { CHECK_CB(get_ctdp_control); return isst_ops->get_ctdp_control(id, config_index, ctdp_level); } int isst_get_tdp_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { CHECK_CB(get_tdp_info); return isst_ops->get_tdp_info(id, config_index, ctdp_level); } int isst_get_pwr_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { CHECK_CB(get_pwr_info); return isst_ops->get_pwr_info(id, config_index, ctdp_level); } int isst_get_coremask_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { CHECK_CB(get_coremask_info); return isst_ops->get_coremask_info(id, config_index, ctdp_level); } int isst_get_get_trl_from_msr(struct isst_id *id, int *trl) { unsigned long long msr_trl; int ret; ret = isst_send_msr_command(id->cpu, 0x1AD, 0, &msr_trl); if (ret) return ret; trl[0] = msr_trl & GENMASK(7, 0); trl[1] = (msr_trl & GENMASK(15, 8)) >> 8; trl[2] = (msr_trl & GENMASK(23, 16)) >> 16; trl[3] = (msr_trl & GENMASK(31, 24)) >> 24; trl[4] = (msr_trl & GENMASK(39, 32)) >> 32; trl[5] = (msr_trl & GENMASK(47, 40)) >> 40; trl[6] = (msr_trl & GENMASK(55, 48)) >> 48; trl[7] = (msr_trl & GENMASK(63, 56)) >> 56; return 0; } int isst_get_get_trl(struct isst_id *id, int level, int avx_level, int *trl) { CHECK_CB(get_get_trl); return isst_ops->get_get_trl(id, level, avx_level, trl); } int isst_get_get_trls(struct isst_id *id, int level, struct isst_pkg_ctdp_level_info *ctdp_level) { CHECK_CB(get_get_trls); return isst_ops->get_get_trls(id, level, ctdp_level); } int isst_get_trl_bucket_info(struct isst_id *id, int level, unsigned long long *buckets_info) { CHECK_CB(get_trl_bucket_info); return isst_ops->get_trl_bucket_info(id, level, buckets_info); } int isst_set_tdp_level(struct isst_id *id, int tdp_level) { CHECK_CB(set_tdp_level); return isst_ops->set_tdp_level(id, tdp_level); } int isst_get_pbf_info(struct isst_id *id, int level, struct isst_pbf_info *pbf_info) { struct isst_pkg_ctdp_level_info ctdp_level; struct isst_pkg_ctdp pkg_dev; int ret; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) { isst_display_error_info_message(1, "Failed to get number of levels", 0, 0); return ret; } if (level > pkg_dev.levels) { isst_display_error_info_message(1, "Invalid level", 1, level); return -1; } ret = isst_get_ctdp_control(id, level, &ctdp_level); if (ret) return ret; if (!ctdp_level.pbf_support) { isst_display_error_info_message(1, "base-freq feature is not present at this level", 1, level); return -1; } pbf_info->core_cpumask_size = alloc_cpu_set(&pbf_info->core_cpumask); CHECK_CB(get_pbf_info); return isst_ops->get_pbf_info(id, level, pbf_info); } int isst_set_pbf_fact_status(struct isst_id *id, int pbf, int enable) { CHECK_CB(set_pbf_fact_status); return isst_ops->set_pbf_fact_status(id, pbf, enable); } int isst_get_fact_info(struct isst_id *id, int level, int fact_bucket, struct isst_fact_info *fact_info) { struct isst_pkg_ctdp_level_info ctdp_level; struct isst_pkg_ctdp pkg_dev; int ret; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) { isst_display_error_info_message(1, "Failed to get number of levels", 0, 0); return ret; } if (level > pkg_dev.levels) { isst_display_error_info_message(1, "Invalid level", 1, level); return -1; } ret = isst_get_ctdp_control(id, level, &ctdp_level); if (ret) return ret; if (!ctdp_level.fact_support) { isst_display_error_info_message(1, "turbo-freq feature is not present at this level", 1, level); return -1; } CHECK_CB(get_fact_info); return isst_ops->get_fact_info(id, level, fact_bucket, fact_info); } int isst_get_trl(struct isst_id *id, unsigned long long *trl) { int ret; ret = isst_send_msr_command(id->cpu, 0x1AD, 0, trl); if (ret) return ret; return 0; } int isst_set_trl(struct isst_id *id, unsigned long long trl) { int ret; if (!trl) trl = 0xFFFFFFFFFFFFFFFFULL; ret = isst_send_msr_command(id->cpu, 0x1AD, 1, &trl); if (ret) return ret; return 0; } int isst_set_trl_from_current_tdp(struct isst_id *id, unsigned long long trl) { unsigned long long msr_trl; int ret; if (id->cpu < 0) return 0; if (trl) { msr_trl = trl; } else { struct isst_pkg_ctdp pkg_dev; int trl[8]; int i; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) return ret; ret = isst_get_get_trl(id, pkg_dev.current_level, 0, trl); if (ret) return ret; msr_trl = 0; for (i = 0; i < 8; ++i) { unsigned long long _trl = trl[i]; msr_trl |= (_trl << (i * 8)); } } ret = isst_send_msr_command(id->cpu, 0x1AD, 1, &msr_trl); if (ret) return ret; return 0; } /* Return 1 if locked */ int isst_get_config_tdp_lock_status(struct isst_id *id) { unsigned long long tdp_control = 0; int ret; ret = isst_send_msr_command(id->cpu, 0x64b, 0, &tdp_control); if (ret) return ret; ret = !!(tdp_control & BIT(31)); return ret; } void isst_get_process_ctdp_complete(struct isst_id *id, struct isst_pkg_ctdp *pkg_dev) { int i; if (!pkg_dev->processed) return; for (i = 0; i < pkg_dev->levels; ++i) { struct isst_pkg_ctdp_level_info *ctdp_level; ctdp_level = &pkg_dev->ctdp_level[i]; if (ctdp_level->pbf_support) free_cpu_set(ctdp_level->pbf_info.core_cpumask); free_cpu_set(ctdp_level->core_cpumask); } } void isst_adjust_uncore_freq(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { CHECK_CB(adjust_uncore_freq); return isst_ops->adjust_uncore_freq(id, config_index, ctdp_level); } int isst_get_process_ctdp(struct isst_id *id, int tdp_level, struct isst_pkg_ctdp *pkg_dev) { int i, ret, valid = 0; if (pkg_dev->processed) return 0; ret = isst_get_ctdp_levels(id, pkg_dev); if (ret) return ret; debug_printf("cpu: %d ctdp enable:%d current level: %d levels:%d\n", id->cpu, pkg_dev->enabled, pkg_dev->current_level, pkg_dev->levels); if (tdp_level != 0xff && tdp_level > pkg_dev->levels) { isst_display_error_info_message(1, "Invalid level", 0, 0); return -1; } if (!pkg_dev->enabled) isst_display_error_info_message(0, "perf-profile feature is not supported, just base-config level 0 is valid", 0, 0); for (i = 0; i <= pkg_dev->levels; ++i) { struct isst_pkg_ctdp_level_info *ctdp_level; if (tdp_level != 0xff && i != tdp_level) continue; debug_printf("cpu:%d Get Information for TDP level:%d\n", id->cpu, i); ctdp_level = &pkg_dev->ctdp_level[i]; ctdp_level->level = i; ctdp_level->control_cpu = id->cpu; ctdp_level->pkg_id = id->pkg; ctdp_level->die_id = id->die; ret = isst_get_ctdp_control(id, i, ctdp_level); if (ret) continue; valid = 1; pkg_dev->processed = 1; ctdp_level->processed = 1; if (ctdp_level->pbf_support) { ret = isst_get_pbf_info(id, i, &ctdp_level->pbf_info); if (!ret) ctdp_level->pbf_found = 1; } if (ctdp_level->fact_support) { ret = isst_get_fact_info(id, i, 0xff, &ctdp_level->fact_info); if (ret) return ret; } if (!pkg_dev->enabled && is_skx_based_platform()) { int freq; freq = get_cpufreq_base_freq(id->cpu); if (freq > 0) { ctdp_level->sse_p1 = freq / 100000; ctdp_level->tdp_ratio = ctdp_level->sse_p1; } isst_get_get_trl_from_msr(id, ctdp_level->trl_ratios[0]); isst_get_trl_bucket_info(id, i, &ctdp_level->trl_cores); continue; } ret = isst_get_tdp_info(id, i, ctdp_level); if (ret) return ret; ret = isst_get_pwr_info(id, i, ctdp_level); if (ret) return ret; ctdp_level->core_cpumask_size = alloc_cpu_set(&ctdp_level->core_cpumask); ret = isst_get_coremask_info(id, i, ctdp_level); if (ret) return ret; ret = isst_get_trl_bucket_info(id, i, &ctdp_level->trl_cores); if (ret) return ret; ret = isst_get_get_trls(id, i, ctdp_level); if (ret) return ret; } if (!valid) isst_display_error_info_message(0, "Invalid level, Can't get TDP control information at specified levels on cpu", 1, id->cpu); return 0; } int isst_clos_get_clos_information(struct isst_id *id, int *enable, int *type) { CHECK_CB(get_clos_information); return isst_ops->get_clos_information(id, enable, type); } int isst_pm_qos_config(struct isst_id *id, int enable_clos, int priority_type) { CHECK_CB(pm_qos_config); return isst_ops->pm_qos_config(id, enable_clos, priority_type); } int isst_pm_get_clos(struct isst_id *id, int clos, struct isst_clos_config *clos_config) { CHECK_CB(pm_get_clos); return isst_ops->pm_get_clos(id, clos, clos_config); } int isst_set_clos(struct isst_id *id, int clos, struct isst_clos_config *clos_config) { CHECK_CB(set_clos); return isst_ops->set_clos(id, clos, clos_config); } int isst_clos_get_assoc_status(struct isst_id *id, int *clos_id) { CHECK_CB(clos_get_assoc_status); return isst_ops->clos_get_assoc_status(id, clos_id); } int isst_clos_associate(struct isst_id *id, int clos_id) { CHECK_CB(clos_associate); return isst_ops->clos_associate(id, clos_id); }
linux-master
tools/power/x86/intel-speed-select/isst-core.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Speed Select -- Enumerate and control features * Copyright (c) 2019 Intel Corporation. */ #include <linux/isst_if.h> #include <sys/utsname.h> #include "isst.h" struct process_cmd_struct { char *feature; char *command; void (*process_fn)(int arg); int arg; }; static const char *version_str = "v1.17"; static const int supported_api_ver = 2; static struct isst_if_platform_info isst_platform_info; static char *progname; static int debug_flag; static FILE *outf; static int cpu_model; static int cpu_stepping; #define MAX_CPUS_IN_ONE_REQ 256 static short max_target_cpus; static unsigned short target_cpus[MAX_CPUS_IN_ONE_REQ]; static int topo_max_cpus; static size_t present_cpumask_size; static cpu_set_t *present_cpumask; static size_t target_cpumask_size; static cpu_set_t *target_cpumask; static int tdp_level = 0xFF; static int fact_bucket = 0xFF; static int fact_avx = 0xFF; static unsigned long long fact_trl; static int out_format_json; static int cmd_help; static int force_online_offline; static int auto_mode; static int fact_enable_fail; static int cgroupv2; /* clos related */ static int current_clos = -1; static int clos_epp = -1; static int clos_prop_prio = -1; static int clos_min = -1; static int clos_max = -1; static int clos_desired = -1; static int clos_priority_type; struct _cpu_map { unsigned short core_id; unsigned short pkg_id; unsigned short die_id; unsigned short punit_id; unsigned short punit_cpu; unsigned short punit_cpu_core; unsigned short initialized; }; struct _cpu_map *cpu_map; struct cpu_topology { short cpu; short core_id; short pkg_id; short die_id; }; FILE *get_output_file(void) { return outf; } int is_debug_enabled(void) { return debug_flag; } void debug_printf(const char *format, ...) { va_list args; va_start(args, format); if (debug_flag) vprintf(format, args); va_end(args); } int is_clx_n_platform(void) { if (cpu_model == 0x55) if (cpu_stepping == 0x6 || cpu_stepping == 0x7) return 1; return 0; } int is_skx_based_platform(void) { if (cpu_model == 0x55) return 1; return 0; } int is_spr_platform(void) { if (cpu_model == 0x8F) return 1; return 0; } int is_emr_platform(void) { if (cpu_model == 0xCF) return 1; return 0; } int is_icx_platform(void) { if (cpu_model == 0x6A || cpu_model == 0x6C) return 1; return 0; } static int update_cpu_model(void) { unsigned int ebx, ecx, edx; unsigned int fms, family; __cpuid(1, fms, ebx, ecx, edx); family = (fms >> 8) & 0xf; cpu_model = (fms >> 4) & 0xf; if (family == 6 || family == 0xf) cpu_model += ((fms >> 16) & 0xf) << 4; cpu_stepping = fms & 0xf; /* only three CascadeLake-N models are supported */ if (is_clx_n_platform()) { FILE *fp; size_t n = 0; char *line = NULL; int ret = 1; fp = fopen("/proc/cpuinfo", "r"); if (!fp) err(-1, "cannot open /proc/cpuinfo\n"); while (getline(&line, &n, fp) > 0) { if (strstr(line, "model name")) { if (strstr(line, "6252N") || strstr(line, "6230N") || strstr(line, "5218N")) ret = 0; break; } } free(line); fclose(fp); return ret; } return 0; } int api_version(void) { return isst_platform_info.api_version; } /* Open a file, and exit on failure */ static FILE *fopen_or_exit(const char *path, const char *mode) { FILE *filep = fopen(path, mode); if (!filep) err(1, "%s: open failed", path); return filep; } /* Parse a file containing a single int */ static int parse_int_file(int fatal, const char *fmt, ...) { va_list args; char path[PATH_MAX]; FILE *filep; int value; va_start(args, fmt); vsnprintf(path, sizeof(path), fmt, args); va_end(args); if (fatal) { filep = fopen_or_exit(path, "r"); } else { filep = fopen(path, "r"); if (!filep) return -1; } if (fscanf(filep, "%d", &value) != 1) err(1, "%s: failed to parse number from file", path); fclose(filep); return value; } int cpufreq_sysfs_present(void) { DIR *dir; dir = opendir("/sys/devices/system/cpu/cpu0/cpufreq"); if (dir) { closedir(dir); return 1; } return 0; } int out_format_is_json(void) { return out_format_json; } static int get_stored_topology_info(int cpu, int *core_id, int *pkg_id, int *die_id) { const char *pathname = "/var/run/isst_cpu_topology.dat"; struct cpu_topology cpu_top; FILE *fp; int ret; fp = fopen(pathname, "rb"); if (!fp) return -1; ret = fseek(fp, cpu * sizeof(cpu_top), SEEK_SET); if (ret) goto err_ret; ret = fread(&cpu_top, sizeof(cpu_top), 1, fp); if (ret != 1) { ret = -1; goto err_ret; } *pkg_id = cpu_top.pkg_id; *core_id = cpu_top.core_id; *die_id = cpu_top.die_id; ret = 0; err_ret: fclose(fp); return ret; } static void store_cpu_topology(void) { const char *pathname = "/var/run/isst_cpu_topology.dat"; FILE *fp; int i; fp = fopen(pathname, "rb"); if (fp) { /* Mapping already exists */ fclose(fp); return; } fp = fopen(pathname, "wb"); if (!fp) { fprintf(stderr, "Can't create file:%s\n", pathname); return; } fprintf(stderr, "Caching topology information\n"); for (i = 0; i < topo_max_cpus; ++i) { struct cpu_topology cpu_top; cpu_top.core_id = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/topology/core_id", i); if (cpu_top.core_id < 0) cpu_top.core_id = -1; cpu_top.pkg_id = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", i); if (cpu_top.pkg_id < 0) cpu_top.pkg_id = -1; cpu_top.die_id = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/topology/die_id", i); if (cpu_top.die_id < 0) cpu_top.die_id = -1; cpu_top.cpu = i; if (fwrite(&cpu_top, sizeof(cpu_top), 1, fp) != 1) { fprintf(stderr, "Can't write to:%s\n", pathname); break; } } fclose(fp); } static int get_physical_package_id(int cpu) { int ret; if (cpu < 0) return -1; if (cpu_map && cpu_map[cpu].initialized) return cpu_map[cpu].pkg_id; ret = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); if (ret < 0) { int core_id, pkg_id, die_id; ret = get_stored_topology_info(cpu, &core_id, &pkg_id, &die_id); if (!ret) return pkg_id; } return ret; } static int get_physical_core_id(int cpu) { int ret; if (cpu < 0) return -1; if (cpu_map && cpu_map[cpu].initialized) return cpu_map[cpu].core_id; ret = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); if (ret < 0) { int core_id, pkg_id, die_id; ret = get_stored_topology_info(cpu, &core_id, &pkg_id, &die_id); if (!ret) return core_id; } return ret; } static int get_physical_die_id(int cpu) { int ret; if (cpu < 0) return -1; if (cpu_map && cpu_map[cpu].initialized) return cpu_map[cpu].die_id; ret = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/topology/die_id", cpu); if (ret < 0) { int core_id, pkg_id, die_id; ret = get_stored_topology_info(cpu, &core_id, &pkg_id, &die_id); if (!ret) { if (die_id < 0) die_id = 0; return die_id; } } if (ret < 0) ret = 0; return ret; } static int get_physical_punit_id(int cpu) { if (cpu < 0) return -1; if (cpu_map && cpu_map[cpu].initialized) return cpu_map[cpu].punit_id; return -1; } void set_isst_id(struct isst_id *id, int cpu) { id->cpu = cpu; id->pkg = get_physical_package_id(cpu); if (id->pkg >= MAX_PACKAGE_COUNT) id->pkg = -1; id->die = get_physical_die_id(cpu); if (id->die >= MAX_DIE_PER_PACKAGE) id->die = -1; id->punit = get_physical_punit_id(cpu); if (id->punit >= MAX_PUNIT_PER_DIE) id->punit = -1; } int is_cpu_in_power_domain(int cpu, struct isst_id *id) { struct isst_id tid; set_isst_id(&tid, cpu); if (id->pkg == tid.pkg && id->die == tid.die && id->punit == tid.punit) return 1; return 0; } int get_cpufreq_base_freq(int cpu) { return parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/base_frequency", cpu); } int get_topo_max_cpus(void) { return topo_max_cpus; } static unsigned int is_cpu_online(int cpu) { char buffer[128]; int fd, ret; unsigned char online; snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/online", cpu); fd = open(buffer, O_RDONLY); if (fd < 0) return fd; ret = read(fd, &online, sizeof(online)); close(fd); if (ret == -1) return ret; if (online == '1') online = 1; else online = 0; return online; } static int get_kernel_version(int *major, int *minor) { struct utsname buf; int ret; ret = uname(&buf); if (ret) return ret; ret = sscanf(buf.release, "%d.%d", major, minor); if (ret != 2) return ret; return 0; } #define CPU0_HOTPLUG_DEPRECATE_MAJOR_VER 6 #define CPU0_HOTPLUG_DEPRECATE_MINOR_VER 5 void set_cpu_online_offline(int cpu, int state) { char buffer[128]; int fd, ret; if (!cpu) { int major, minor; ret = get_kernel_version(&major, &minor); if (!ret) { if (major > CPU0_HOTPLUG_DEPRECATE_MAJOR_VER || (major == CPU0_HOTPLUG_DEPRECATE_MAJOR_VER && minor >= CPU0_HOTPLUG_DEPRECATE_MINOR_VER)) { debug_printf("Ignore CPU 0 offline/online for kernel version >= %d.%d\n", major, minor); debug_printf("Use cgroups to isolate CPU 0\n"); return; } } } snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/online", cpu); fd = open(buffer, O_WRONLY); if (fd < 0) { if (!cpu && state) { fprintf(stderr, "This system is not configured for CPU 0 online/offline\n"); fprintf(stderr, "Ignoring online request for CPU 0 as this is already online\n"); return; } err(-1, "%s open failed", buffer); } if (state) ret = write(fd, "1\n", 2); else ret = write(fd, "0\n", 2); if (ret == -1) perror("Online/Offline: Operation failed\n"); close(fd); } static void force_all_cpus_online(void) { int i; fprintf(stderr, "Forcing all CPUs online\n"); for (i = 0; i < topo_max_cpus; ++i) set_cpu_online_offline(i, 1); unlink("/var/run/isst_cpu_topology.dat"); } void for_each_online_power_domain_in_set(void (*callback)(struct isst_id *, void *, void *, void *, void *), void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_id id; int cpus[MAX_PACKAGE_COUNT][MAX_DIE_PER_PACKAGE][MAX_PUNIT_PER_DIE]; int valid_mask[MAX_PACKAGE_COUNT][MAX_DIE_PER_PACKAGE] = {0}; int i, j, k; memset(cpus, -1, sizeof(cpus)); for (i = 0; i < topo_max_cpus; ++i) { int online; if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask)) continue; online = parse_int_file( i != 0, "/sys/devices/system/cpu/cpu%d/online", i); if (online < 0) online = 1; /* online entry for CPU 0 needs some special configs */ if (!online) continue; set_isst_id(&id, i); if (id.pkg < 0 || id.die < 0 || id.punit < 0) continue; valid_mask[id.pkg][id.die] = 1; if (cpus[id.pkg][id.die][id.punit] == -1) cpus[id.pkg][id.die][id.punit] = i; } for (i = 0; i < MAX_PACKAGE_COUNT; i++) { for (j = 0; j < MAX_DIE_PER_PACKAGE; j++) { /* * Fix me: * How to check a non-cpu die for a package/die with all cpu offlined? */ if (!valid_mask[i][j]) continue; for (k = 0; k < MAX_PUNIT_PER_DIE; k++) { id.cpu = cpus[i][j][k]; id.pkg = i; id.die = j; id.punit = k; if (isst_is_punit_valid(&id)) callback(&id, arg1, arg2, arg3, arg4); } } } } static void for_each_online_target_cpu_in_set( void (*callback)(struct isst_id *, void *, void *, void *, void *), void *arg1, void *arg2, void *arg3, void *arg4) { int i, found = 0; struct isst_id id; for (i = 0; i < topo_max_cpus; ++i) { int online; if (!CPU_ISSET_S(i, target_cpumask_size, target_cpumask)) continue; if (i) online = parse_int_file( 1, "/sys/devices/system/cpu/cpu%d/online", i); else online = 1; /* online entry for CPU 0 needs some special configs */ set_isst_id(&id, i); if (online && callback) { callback(&id, arg1, arg2, arg3, arg4); found = 1; } } if (!found) fprintf(stderr, "No valid CPU in the list\n"); } #define BITMASK_SIZE 32 static void set_max_cpu_num(void) { FILE *filep; unsigned long dummy; int i; topo_max_cpus = 0; for (i = 0; i < 256; ++i) { char path[256]; snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", i); filep = fopen(path, "r"); if (filep) break; } if (!filep) { fprintf(stderr, "Can't get max cpu number\n"); exit(0); } while (fscanf(filep, "%lx,", &dummy) == 1) topo_max_cpus += BITMASK_SIZE; fclose(filep); debug_printf("max cpus %d\n", topo_max_cpus); } size_t alloc_cpu_set(cpu_set_t **cpu_set) { cpu_set_t *_cpu_set; size_t size; _cpu_set = CPU_ALLOC((topo_max_cpus + 1)); if (_cpu_set == NULL) err(3, "CPU_ALLOC"); size = CPU_ALLOC_SIZE((topo_max_cpus + 1)); CPU_ZERO_S(size, _cpu_set); *cpu_set = _cpu_set; return size; } void free_cpu_set(cpu_set_t *cpu_set) { CPU_FREE(cpu_set); } static int cpu_cnt[MAX_PACKAGE_COUNT][MAX_DIE_PER_PACKAGE][MAX_PUNIT_PER_DIE]; int get_max_punit_core_id(struct isst_id *id) { int max_id = 0; int i; for (i = 0; i < topo_max_cpus; ++i) { if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask)) continue; if (is_cpu_in_power_domain(i, id) && cpu_map[i].punit_cpu_core > max_id) max_id = cpu_map[i].punit_cpu_core; } return max_id; } int get_cpu_count(struct isst_id *id) { if (id->pkg < 0 || id->die < 0 || id->punit < 0) return 0; return cpu_cnt[id->pkg][id->die][id->punit]; } static void update_punit_cpu_info(__u32 physical_cpu, struct _cpu_map *cpu_map) { if (api_version() > 1) { /* * MSR 0x54 format * [15:11] PM_DOMAIN_ID * [10:3] MODULE_ID (aka IDI_AGENT_ID) * [2:0] LP_ID (We don't care about these bits we only * care die and core id * For Atom: * [2] Always 0 * [1:0] core ID within module * For Core * [2:1] Always 0 * [0] thread ID */ cpu_map->punit_id = (physical_cpu >> 11) & 0x1f; cpu_map->punit_cpu_core = (physical_cpu >> 3) & 0xff; cpu_map->punit_cpu = physical_cpu & 0x7ff; } else { int punit_id; /* * MSR 0x53 format * Format * Bit 0 – thread ID * Bit 8:1 – core ID * Bit 13:9 – punit ID */ cpu_map->punit_cpu = physical_cpu & 0x1ff; cpu_map->punit_cpu_core = (cpu_map->punit_cpu >> 1); // shift to get core id punit_id = (physical_cpu >> 9) & 0x1f; if (punit_id >= MAX_PUNIT_PER_DIE) punit_id = 0; cpu_map->punit_id = punit_id; } } static void create_cpu_map(void) { const char *pathname = "/dev/isst_interface"; size_t size; DIR *dir; int i, fd = 0; struct isst_if_cpu_maps map; /* Use calloc to make sure the memory is initialized to Zero */ cpu_map = calloc(topo_max_cpus, sizeof(*cpu_map)); if (!cpu_map) err(3, "cpumap"); fd = open(pathname, O_RDWR); if (fd < 0 && !is_clx_n_platform()) err(-1, "%s open failed", pathname); size = alloc_cpu_set(&present_cpumask); present_cpumask_size = size; for (i = 0; i < topo_max_cpus; ++i) { char buffer[256]; int pkg_id, die_id, core_id, punit_id; /* check if CPU is online */ snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d", i); dir = opendir(buffer); if (!dir) continue; closedir(dir); CPU_SET_S(i, size, present_cpumask); pkg_id = get_physical_package_id(i); die_id = get_physical_die_id(i); core_id = get_physical_core_id(i); if (pkg_id < 0 || die_id < 0 || core_id < 0) continue; cpu_map[i].pkg_id = pkg_id; cpu_map[i].die_id = die_id; cpu_map[i].core_id = core_id; punit_id = 0; if (fd >= 0) { map.cmd_count = 1; map.cpu_map[0].logical_cpu = i; debug_printf(" map logical_cpu:%d\n", map.cpu_map[0].logical_cpu); if (ioctl(fd, ISST_IF_GET_PHY_ID, &map) == -1) { perror("ISST_IF_GET_PHY_ID"); fprintf(outf, "Error: map logical_cpu:%d\n", map.cpu_map[0].logical_cpu); } else { update_punit_cpu_info(map.cpu_map[0].physical_cpu, &cpu_map[i]); punit_id = cpu_map[i].punit_id; } } cpu_map[i].initialized = 1; cpu_cnt[pkg_id][die_id][punit_id]++; debug_printf( "map logical_cpu:%d core: %d die:%d pkg:%d punit:%d punit_cpu:%d punit_core:%d\n", i, cpu_map[i].core_id, cpu_map[i].die_id, cpu_map[i].pkg_id, cpu_map[i].punit_id, cpu_map[i].punit_cpu, cpu_map[i].punit_cpu_core); } if (fd >= 0) close(fd); size = alloc_cpu_set(&target_cpumask); target_cpumask_size = size; for (i = 0; i < max_target_cpus; ++i) { if (!CPU_ISSET_S(target_cpus[i], present_cpumask_size, present_cpumask)) continue; CPU_SET_S(target_cpus[i], size, target_cpumask); } } void set_cpu_mask_from_punit_coremask(struct isst_id *id, unsigned long long core_mask, size_t core_cpumask_size, cpu_set_t *core_cpumask, int *cpu_cnt) { int i, cnt = 0; if (id->cpu < 0) return; *cpu_cnt = 0; for (i = 0; i < 64; ++i) { if (core_mask & BIT_ULL(i)) { int j; for (j = 0; j < topo_max_cpus; ++j) { if (!CPU_ISSET_S(j, present_cpumask_size, present_cpumask)) continue; if (is_cpu_in_power_domain(j, id) && cpu_map[j].punit_cpu_core == i) { CPU_SET_S(j, core_cpumask_size, core_cpumask); ++cnt; } } } } *cpu_cnt = cnt; } int find_phy_core_num(int logical_cpu) { if (logical_cpu < topo_max_cpus) return cpu_map[logical_cpu].punit_cpu_core; return -EINVAL; } int use_cgroupv2(void) { return cgroupv2; } int enable_cpuset_controller(void) { int fd, ret; fd = open("/sys/fs/cgroup/cgroup.subtree_control", O_RDWR, 0); if (fd < 0) { debug_printf("Can't activate cpuset controller\n"); debug_printf("Either you are not root user or CGroup v2 is not supported\n"); return fd; } ret = write(fd, " +cpuset", strlen(" +cpuset")); close(fd); if (ret == -1) { debug_printf("Can't activate cpuset controller: Write failed\n"); return ret; } return 0; } int isolate_cpus(struct isst_id *id, int mask_size, cpu_set_t *cpu_mask, int level) { int i, first, curr_index, index, ret, fd; static char str[512], dir_name[64]; static char cpuset_cpus[128]; int str_len = sizeof(str); DIR *dir; snprintf(dir_name, sizeof(dir_name), "/sys/fs/cgroup/%d-%d-%d", id->pkg, id->die, id->punit); dir = opendir(dir_name); if (!dir) { ret = mkdir(dir_name, 0744); if (ret) { debug_printf("Can't create dir:%s errno:%d\n", dir_name, errno); return ret; } } closedir(dir); if (!level) { sprintf(cpuset_cpus, "%s/cpuset.cpus.partition", dir_name); fd = open(cpuset_cpus, O_RDWR, 0); if (fd < 0) { return fd; } ret = write(fd, "member", strlen("member")); if (ret == -1) { printf("Can't update to member\n"); return ret; } return 0; } if (!CPU_COUNT_S(mask_size, cpu_mask)) { return -1; } curr_index = 0; first = 1; str[0] = '\0'; for (i = 0; i < get_topo_max_cpus(); ++i) { if (!is_cpu_in_power_domain(i, id)) continue; if (CPU_ISSET_S(i, mask_size, cpu_mask)) continue; if (!first) { index = snprintf(&str[curr_index], str_len - curr_index, ","); curr_index += index; if (curr_index >= str_len) break; } index = snprintf(&str[curr_index], str_len - curr_index, "%d", i); curr_index += index; if (curr_index >= str_len) break; first = 0; } debug_printf("isolated CPUs list: package:%d curr_index:%d [%s]\n", id->pkg, curr_index ,str); snprintf(cpuset_cpus, sizeof(cpuset_cpus), "%s/cpuset.cpus", dir_name); fd = open(cpuset_cpus, O_RDWR, 0); if (fd < 0) { return fd; } ret = write(fd, str, strlen(str)); close(fd); if (ret == -1) { debug_printf("Can't activate cpuset controller: Write failed\n"); return ret; } snprintf(cpuset_cpus, sizeof(cpuset_cpus), "%s/cpuset.cpus.partition", dir_name); fd = open(cpuset_cpus, O_RDWR, 0); if (fd < 0) { return fd; } ret = write(fd, "isolated", strlen("isolated")); if (ret == -1) { debug_printf("Can't update to isolated\n"); ret = write(fd, "root", strlen("root")); if (ret == -1) debug_printf("Can't update to root\n"); } close(fd); if (ret < 0) return ret; return 0; } static int isst_fill_platform_info(void) { const char *pathname = "/dev/isst_interface"; int fd; if (is_clx_n_platform()) { isst_platform_info.api_version = 1; goto set_platform_ops; } fd = open(pathname, O_RDWR); if (fd < 0) err(-1, "%s open failed", pathname); if (ioctl(fd, ISST_IF_GET_PLATFORM_INFO, &isst_platform_info) == -1) { perror("ISST_IF_GET_PLATFORM_INFO"); close(fd); return -1; } close(fd); if (isst_platform_info.api_version > supported_api_ver) { printf("Incompatible API versions; Upgrade of tool is required\n"); return -1; } set_platform_ops: if (isst_set_platform_ops(isst_platform_info.api_version)) { fprintf(stderr, "Failed to set platform callbacks\n"); exit(0); } return 0; } void get_isst_status(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_pkg_ctdp pkg_dev; struct isst_id *tid = (struct isst_id *)arg2; int *mask = (int *)arg3; int *max_level = (int *)arg4; int j, ret; /* Only check the first cpu power domain */ if (id->cpu < 0 || tid->cpu >= 0) return; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) return; if (pkg_dev.enabled) *mask |= BIT(0); if (pkg_dev.locked) *mask |= BIT(1); if (*max_level < pkg_dev.levels) *max_level = pkg_dev.levels; for (j = 0; j <= pkg_dev.levels; ++j) { struct isst_pkg_ctdp_level_info ctdp_level; ret = isst_get_ctdp_control(id, j, &ctdp_level); if (ret) continue; if (ctdp_level.fact_support) *mask |= BIT(2); if (ctdp_level.pbf_support) *mask |= BIT(3); } tid->cpu = id->cpu; tid->pkg = id->pkg; tid->die = id->die; tid->punit = id->punit; } static void isst_print_extended_platform_info(void) { int cp_state, cp_cap; struct isst_id id; int mask = 0, max_level = 0; id.cpu = -1; for_each_online_power_domain_in_set(get_isst_status, NULL, &id, &mask, &max_level); if (mask & BIT(0)) { fprintf(outf, "Intel(R) SST-PP (feature perf-profile) is supported\n"); } else { fprintf(outf, "Intel(R) SST-PP (feature perf-profile) is not supported\n"); fprintf(outf, "Only performance level 0 (base level) is present\n"); } if (mask & BIT(1)) fprintf(outf, "TDP level change control is locked\n"); else fprintf(outf, "TDP level change control is unlocked, max level: %d\n", max_level); if (mask & BIT(2)) fprintf(outf, "Intel(R) SST-TF (feature turbo-freq) is supported\n"); else fprintf(outf, "Intel(R) SST-TF (feature turbo-freq) is not supported\n"); if (mask & BIT(3)) fprintf(outf, "Intel(R) SST-BF (feature base-freq) is supported\n"); else fprintf(outf, "Intel(R) SST-BF (feature base-freq) is not supported\n"); if (isst_read_pm_config(&id, &cp_state, &cp_cap)) { fprintf(outf, "Intel(R) SST-CP (feature core-power) status is unknown\n"); return; } if (cp_cap) fprintf(outf, "Intel(R) SST-CP (feature core-power) is supported\n"); else fprintf(outf, "Intel(R) SST-CP (feature core-power) is not supported\n"); } static void isst_print_platform_information(void) { if (is_clx_n_platform()) { fprintf(stderr, "\nThis option in not supported on this platform\n"); exit(0); } /* Early initialization to create working cpu_map */ set_max_cpu_num(); create_cpu_map(); fprintf(outf, "Platform: API version : %d\n", isst_platform_info.api_version); fprintf(outf, "Platform: Driver version : %d\n", isst_platform_info.driver_version); fprintf(outf, "Platform: mbox supported : %d\n", isst_platform_info.mbox_supported); fprintf(outf, "Platform: mmio supported : %d\n", isst_platform_info.mmio_supported); isst_print_extended_platform_info(); exit(0); } static char *local_str0, *local_str1; static void exec_on_get_ctdp_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { int (*fn_ptr)(struct isst_id *id, void *arg); int ret; fn_ptr = arg1; ret = fn_ptr(id, arg2); if (ret) isst_display_error_info_message(1, "get_tdp_* failed", 0, 0); else isst_ctdp_display_core_info(id, outf, arg3, *(unsigned int *)arg4, local_str0, local_str1); } #define _get_tdp_level(desc, suffix, object, help, str0, str1) \ static void get_tdp_##object(int arg) \ { \ struct isst_pkg_ctdp ctdp; \ \ if (cmd_help) { \ fprintf(stderr, \ "Print %s [No command arguments are required]\n", \ help); \ exit(0); \ } \ local_str0 = str0; \ local_str1 = str1; \ isst_ctdp_display_information_start(outf); \ if (max_target_cpus) \ for_each_online_target_cpu_in_set( \ exec_on_get_ctdp_cpu, isst_get_ctdp_##suffix, \ &ctdp, desc, &ctdp.object); \ else \ for_each_online_power_domain_in_set(exec_on_get_ctdp_cpu, \ isst_get_ctdp_##suffix, \ &ctdp, desc, \ &ctdp.object); \ isst_ctdp_display_information_end(outf); \ } _get_tdp_level("get-config-levels", levels, levels, "Max TDP level", NULL, NULL); _get_tdp_level("get-config-version", levels, version, "TDP version", NULL, NULL); _get_tdp_level("get-config-enabled", levels, enabled, "perf-profile enable status", "disabled", "enabled"); _get_tdp_level("get-config-current_level", levels, current_level, "Current TDP Level", NULL, NULL); _get_tdp_level("get-lock-status", levels, locked, "TDP lock status", "unlocked", "locked"); struct isst_pkg_ctdp clx_n_pkg_dev; static int clx_n_get_base_ratio(void) { FILE *fp; char *begin, *end, *line = NULL; char number[5]; float value = 0; size_t n = 0; fp = fopen("/proc/cpuinfo", "r"); if (!fp) err(-1, "cannot open /proc/cpuinfo\n"); while (getline(&line, &n, fp) > 0) { if (strstr(line, "model name")) { /* this is true for CascadeLake-N */ begin = strstr(line, "@ ") + 2; end = strstr(line, "GHz"); strncpy(number, begin, end - begin); value = atof(number) * 10; break; } } free(line); fclose(fp); return (int)(value); } static int clx_n_config(struct isst_id *id) { int i, ret; unsigned long cpu_bf; struct isst_pkg_ctdp_level_info *ctdp_level; struct isst_pbf_info *pbf_info; ctdp_level = &clx_n_pkg_dev.ctdp_level[0]; pbf_info = &ctdp_level->pbf_info; ctdp_level->core_cpumask_size = alloc_cpu_set(&ctdp_level->core_cpumask); /* find the frequency base ratio */ ctdp_level->tdp_ratio = clx_n_get_base_ratio(); if (ctdp_level->tdp_ratio == 0) { debug_printf("CLX: cn base ratio is zero\n"); ret = -1; goto error_ret; } /* find the high and low priority frequencies */ pbf_info->p1_high = 0; pbf_info->p1_low = ~0; for (i = 0; i < topo_max_cpus; i++) { if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask)) continue; if (!is_cpu_in_power_domain(i, id)) continue; CPU_SET_S(i, ctdp_level->core_cpumask_size, ctdp_level->core_cpumask); cpu_bf = parse_int_file(1, "/sys/devices/system/cpu/cpu%d/cpufreq/base_frequency", i); if (cpu_bf > pbf_info->p1_high) pbf_info->p1_high = cpu_bf; if (cpu_bf < pbf_info->p1_low) pbf_info->p1_low = cpu_bf; } if (pbf_info->p1_high == ~0UL) { debug_printf("CLX: maximum base frequency not set\n"); ret = -1; goto error_ret; } if (pbf_info->p1_low == 0) { debug_printf("CLX: minimum base frequency not set\n"); ret = -1; goto error_ret; } /* convert frequencies back to ratios */ pbf_info->p1_high = pbf_info->p1_high / 100000; pbf_info->p1_low = pbf_info->p1_low / 100000; /* create high priority cpu mask */ pbf_info->core_cpumask_size = alloc_cpu_set(&pbf_info->core_cpumask); for (i = 0; i < topo_max_cpus; i++) { if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask)) continue; if (!is_cpu_in_power_domain(i, id)) continue; cpu_bf = parse_int_file(1, "/sys/devices/system/cpu/cpu%d/cpufreq/base_frequency", i); cpu_bf = cpu_bf / 100000; if (cpu_bf == pbf_info->p1_high) CPU_SET_S(i, pbf_info->core_cpumask_size, pbf_info->core_cpumask); } /* extra ctdp & pbf struct parameters */ ctdp_level->processed = 1; ctdp_level->pbf_support = 1; /* PBF is always supported and enabled */ ctdp_level->pbf_enabled = 1; ctdp_level->fact_support = 0; /* FACT is never supported */ ctdp_level->fact_enabled = 0; return 0; error_ret: free_cpu_set(ctdp_level->core_cpumask); return ret; } static void dump_clx_n_config_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { int ret; if (tdp_level != 0xff && tdp_level != 0) { isst_display_error_info_message(1, "Invalid level", 1, tdp_level); exit(0); } ret = clx_n_config(id); if (ret) { debug_printf("clx_n_config failed"); } else { struct isst_pkg_ctdp_level_info *ctdp_level; struct isst_pbf_info *pbf_info; ctdp_level = &clx_n_pkg_dev.ctdp_level[0]; pbf_info = &ctdp_level->pbf_info; clx_n_pkg_dev.processed = 1; isst_ctdp_display_information(id, outf, tdp_level, &clx_n_pkg_dev); free_cpu_set(ctdp_level->core_cpumask); free_cpu_set(pbf_info->core_cpumask); } } static void dump_isst_config_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_pkg_ctdp pkg_dev; int ret; memset(&pkg_dev, 0, sizeof(pkg_dev)); ret = isst_get_process_ctdp(id, tdp_level, &pkg_dev); if (ret) { isst_display_error_info_message(1, "Failed to get perf-profile info on cpu", 1, id->cpu); isst_ctdp_display_information_end(outf); exit(1); } else { isst_ctdp_display_information(id, outf, tdp_level, &pkg_dev); isst_get_process_ctdp_complete(id, &pkg_dev); } } static void dump_isst_config(int arg) { void *fn; if (cmd_help) { fprintf(stderr, "Print Intel(R) Speed Select Technology Performance profile configuration\n"); fprintf(stderr, "including base frequency and turbo frequency configurations\n"); fprintf(stderr, "Optional: -l|--level : Specify tdp level\n"); fprintf(stderr, "\tIf no arguments, dump information for all TDP levels\n"); exit(0); } if (!is_clx_n_platform()) fn = dump_isst_config_for_cpu; else fn = dump_clx_n_config_for_cpu; isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(fn, NULL, NULL, NULL, NULL); else for_each_online_power_domain_in_set(fn, NULL, NULL, NULL, NULL); isst_ctdp_display_information_end(outf); } static void adjust_scaling_max_from_base_freq(int cpu); static void set_tdp_level_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_pkg_ctdp pkg_dev; int ret; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) { isst_display_error_info_message(1, "Get TDP level failed", 0, 0); isst_ctdp_display_information_end(outf); exit(1); } if (pkg_dev.current_level == tdp_level) { debug_printf("TDP level already set. Skipped\n"); goto display_result; } ret = isst_set_tdp_level(id, tdp_level); if (ret) { isst_display_error_info_message(1, "Set TDP level failed", 0, 0); isst_ctdp_display_information_end(outf); exit(1); } display_result: isst_display_result(id, outf, "perf-profile", "set_tdp_level", ret); if (force_online_offline && id->cpu >= 0) { struct isst_pkg_ctdp_level_info ctdp_level; /* Wait for updated base frequencies */ usleep(2000); /* Adjusting uncore freq */ isst_adjust_uncore_freq(id, tdp_level, &ctdp_level); fprintf(stderr, "Option is set to online/offline\n"); ctdp_level.core_cpumask_size = alloc_cpu_set(&ctdp_level.core_cpumask); ret = isst_get_coremask_info(id, tdp_level, &ctdp_level); if (ret) { isst_display_error_info_message(1, "Can't get coremask, online/offline option is ignored", 0, 0); goto free_mask; } if (use_cgroupv2()) { int ret; fprintf(stderr, "Using cgroup v2 in lieu of online/offline\n"); ret = enable_cpuset_controller(); if (ret) goto use_offline; ret = isolate_cpus(id, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask, tdp_level); if (ret) goto use_offline; goto free_mask; } use_offline: if (ctdp_level.cpu_count) { int i, max_cpus = get_topo_max_cpus(); for (i = 0; i < max_cpus; ++i) { if (!is_cpu_in_power_domain(i, id)) continue; if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) { fprintf(stderr, "online cpu %d\n", i); set_cpu_online_offline(i, 1); adjust_scaling_max_from_base_freq(i); } else { fprintf(stderr, "offline cpu %d\n", i); set_cpu_online_offline(i, 0); } } } free_mask: free_cpu_set(ctdp_level.core_cpumask); } } static void set_tdp_level(int arg) { if (cmd_help) { fprintf(stderr, "Set Config TDP level\n"); fprintf(stderr, "\t Arguments: -l|--level : Specify tdp level\n"); fprintf(stderr, "\t Optional Arguments: -o | online : online/offline for the tdp level\n"); fprintf(stderr, "\t online/offline operation has limitations, refer to Linux hotplug documentation\n"); exit(0); } if (tdp_level == 0xff) { isst_display_error_info_message(1, "Invalid command: specify tdp_level", 0, 0); exit(1); } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(set_tdp_level_for_cpu, NULL, NULL, NULL, NULL); else for_each_online_power_domain_in_set(set_tdp_level_for_cpu, NULL, NULL, NULL, NULL); isst_ctdp_display_information_end(outf); } static void clx_n_dump_pbf_config_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { int ret; ret = clx_n_config(id); if (ret) { isst_display_error_info_message(1, "clx_n_config failed", 0, 0); } else { struct isst_pkg_ctdp_level_info *ctdp_level; struct isst_pbf_info *pbf_info; ctdp_level = &clx_n_pkg_dev.ctdp_level[0]; pbf_info = &ctdp_level->pbf_info; isst_pbf_display_information(id, outf, tdp_level, pbf_info); free_cpu_set(ctdp_level->core_cpumask); free_cpu_set(pbf_info->core_cpumask); } } static void dump_pbf_config_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_pbf_info pbf_info; int ret; ret = isst_get_pbf_info(id, tdp_level, &pbf_info); if (ret) { isst_display_error_info_message(1, "Failed to get base-freq info at this level", 1, tdp_level); isst_ctdp_display_information_end(outf); exit(1); } else { isst_pbf_display_information(id, outf, tdp_level, &pbf_info); free_cpu_set(pbf_info.core_cpumask); } } static void dump_pbf_config(int arg) { void *fn; if (cmd_help) { fprintf(stderr, "Print Intel(R) Speed Select Technology base frequency configuration for a TDP level\n"); fprintf(stderr, "\tArguments: -l|--level : Specify tdp level\n"); exit(0); } if (tdp_level == 0xff) { isst_display_error_info_message(1, "Invalid command: specify tdp_level", 0, 0); exit(1); } if (!is_clx_n_platform()) fn = dump_pbf_config_for_cpu; else fn = clx_n_dump_pbf_config_for_cpu; isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(fn, NULL, NULL, NULL, NULL); else for_each_online_power_domain_in_set(fn, NULL, NULL, NULL, NULL); isst_ctdp_display_information_end(outf); } static int set_clos_param(struct isst_id *id, int clos, int epp, int wt, int min, int max) { struct isst_clos_config clos_config; int ret; ret = isst_pm_get_clos(id, clos, &clos_config); if (ret) { isst_display_error_info_message(1, "isst_pm_get_clos failed", 0, 0); return ret; } clos_config.clos_min = min; clos_config.clos_max = max; clos_config.epp = epp; clos_config.clos_prop_prio = wt; ret = isst_set_clos(id, clos, &clos_config); if (ret) { isst_display_error_info_message(1, "isst_set_clos failed", 0, 0); return ret; } return 0; } static int set_cpufreq_scaling_min_max(int cpu, int max, int freq) { char buffer[128], freq_str[16]; int fd, ret, len; if (max) snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu); else snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu); fd = open(buffer, O_WRONLY); if (fd < 0) return fd; snprintf(freq_str, sizeof(freq_str), "%d", freq); len = strlen(freq_str); ret = write(fd, freq_str, len); if (ret == -1) { close(fd); return ret; } close(fd); return 0; } static int no_turbo(void) { return parse_int_file(0, "/sys/devices/system/cpu/intel_pstate/no_turbo"); } static void adjust_scaling_max_from_base_freq(int cpu) { int base_freq, scaling_max_freq; scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu); base_freq = get_cpufreq_base_freq(cpu); if (scaling_max_freq < base_freq || no_turbo()) set_cpufreq_scaling_min_max(cpu, 1, base_freq); } static void adjust_scaling_min_from_base_freq(int cpu) { int base_freq, scaling_min_freq; scaling_min_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu); base_freq = get_cpufreq_base_freq(cpu); if (scaling_min_freq < base_freq) set_cpufreq_scaling_min_max(cpu, 0, base_freq); } static int set_clx_pbf_cpufreq_scaling_min_max(struct isst_id *id) { struct isst_pkg_ctdp_level_info *ctdp_level; struct isst_pbf_info *pbf_info; int i, freq, freq_high, freq_low; int ret; ret = clx_n_config(id); if (ret) { debug_printf("cpufreq_scaling_min_max failed for CLX"); return ret; } ctdp_level = &clx_n_pkg_dev.ctdp_level[0]; pbf_info = &ctdp_level->pbf_info; freq_high = pbf_info->p1_high * 100000; freq_low = pbf_info->p1_low * 100000; for (i = 0; i < get_topo_max_cpus(); ++i) { if (!is_cpu_in_power_domain(i, id)) continue; if (CPU_ISSET_S(i, pbf_info->core_cpumask_size, pbf_info->core_cpumask)) freq = freq_high; else freq = freq_low; set_cpufreq_scaling_min_max(i, 1, freq); set_cpufreq_scaling_min_max(i, 0, freq); } return 0; } static int set_cpufreq_scaling_min_max_from_cpuinfo(int cpu, int cpuinfo_max, int scaling_max) { char buffer[128], min_freq[16]; int fd, ret, len; if (!CPU_ISSET_S(cpu, present_cpumask_size, present_cpumask)) return -1; if (cpuinfo_max) snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpu); else snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_min_freq", cpu); fd = open(buffer, O_RDONLY); if (fd < 0) return fd; len = read(fd, min_freq, sizeof(min_freq)); close(fd); if (len < 0) return len; if (scaling_max) snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu); else snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu); fd = open(buffer, O_WRONLY); if (fd < 0) return fd; min_freq[15] = '\0'; len = strlen(min_freq); ret = write(fd, min_freq, len); if (ret == -1) { close(fd); return ret; } close(fd); return 0; } static void set_scaling_min_to_cpuinfo_max(struct isst_id *id) { int i; if (id->cpu < 0) return; for (i = 0; i < get_topo_max_cpus(); ++i) { if (!is_cpu_in_power_domain(i, id)) continue; if (is_cpu_online(i) != 1) continue; adjust_scaling_max_from_base_freq(i); set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 0); adjust_scaling_min_from_base_freq(i); } } static void set_scaling_min_to_cpuinfo_min(struct isst_id *id) { int i; if (id->cpu < 0) return; for (i = 0; i < get_topo_max_cpus(); ++i) { if (!is_cpu_in_power_domain(i, id)) continue; if (is_cpu_online(i) != 1) continue; adjust_scaling_max_from_base_freq(i); set_cpufreq_scaling_min_max_from_cpuinfo(i, 0, 0); } } static void set_scaling_max_to_cpuinfo_max(struct isst_id *id) { int i; for (i = 0; i < get_topo_max_cpus(); ++i) { if (!is_cpu_in_power_domain(i, id)) continue; set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 1); } } static int set_core_priority_and_min(struct isst_id *id, int mask_size, cpu_set_t *cpu_mask, int min_high, int min_low) { int ret, i; if (!CPU_COUNT_S(mask_size, cpu_mask)) return -1; ret = set_clos_param(id, 0, 0, 0, min_high, 0xff); if (ret) return ret; ret = set_clos_param(id, 1, 15, 15, min_low, 0xff); if (ret) return ret; ret = set_clos_param(id, 2, 15, 15, min_low, 0xff); if (ret) return ret; ret = set_clos_param(id, 3, 15, 15, min_low, 0xff); if (ret) return ret; for (i = 0; i < get_topo_max_cpus(); ++i) { int clos; struct isst_id tid; if (!is_cpu_in_power_domain(i, id)) continue; if (CPU_ISSET_S(i, mask_size, cpu_mask)) clos = 0; else clos = 3; debug_printf("Associate cpu: %d clos: %d\n", i, clos); set_isst_id(&tid, i); ret = isst_clos_associate(&tid, clos); if (ret) { isst_display_error_info_message(1, "isst_clos_associate failed", 0, 0); return ret; } } return 0; } static int set_pbf_core_power(struct isst_id *id) { struct isst_pbf_info pbf_info; struct isst_pkg_ctdp pkg_dev; int ret; if (id->cpu < 0) return 0; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) { debug_printf("isst_get_ctdp_levels failed"); return ret; } debug_printf("Current_level: %d\n", pkg_dev.current_level); ret = isst_get_pbf_info(id, pkg_dev.current_level, &pbf_info); if (ret) { debug_printf("isst_get_pbf_info failed"); return ret; } debug_printf("p1_high: %d p1_low: %d\n", pbf_info.p1_high, pbf_info.p1_low); ret = set_core_priority_and_min(id, pbf_info.core_cpumask_size, pbf_info.core_cpumask, pbf_info.p1_high, pbf_info.p1_low); if (ret) { debug_printf("set_core_priority_and_min failed"); return ret; } ret = isst_pm_qos_config(id, 1, 1); if (ret) { debug_printf("isst_pm_qos_config failed"); return ret; } return 0; } static void set_pbf_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_pkg_ctdp_level_info ctdp_level; struct isst_pkg_ctdp pkg_dev; int ret; int status = *(int *)arg4; if (is_clx_n_platform()) { ret = 0; if (status) { set_clx_pbf_cpufreq_scaling_min_max(id); } else { set_scaling_max_to_cpuinfo_max(id); set_scaling_min_to_cpuinfo_min(id); } goto disp_result; } ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) { isst_display_error_info_message(1, "Failed to get number of levels", 0, 0); goto disp_result; } ret = isst_get_ctdp_control(id, pkg_dev.current_level, &ctdp_level); if (ret) { isst_display_error_info_message(1, "Failed to get current level", 0, 0); goto disp_result; } if (!ctdp_level.pbf_support) { isst_display_error_info_message(1, "base-freq feature is not present at this level", 1, pkg_dev.current_level); ret = -1; goto disp_result; } if (auto_mode && status) { ret = set_pbf_core_power(id); if (ret) goto disp_result; } ret = isst_set_pbf_fact_status(id, 1, status); if (ret) { debug_printf("isst_set_pbf_fact_status failed"); if (auto_mode) isst_pm_qos_config(id, 0, 0); } else { if (auto_mode) { if (status) set_scaling_min_to_cpuinfo_max(id); else set_scaling_min_to_cpuinfo_min(id); } } if (auto_mode && !status) isst_pm_qos_config(id, 0, 1); disp_result: if (status) isst_display_result(id, outf, "base-freq", "enable", ret); else isst_display_result(id, outf, "base-freq", "disable", ret); } static void set_pbf_enable(int arg) { int enable = arg; if (cmd_help) { if (enable) { fprintf(stderr, "Enable Intel Speed Select Technology base frequency feature\n"); if (is_clx_n_platform()) { fprintf(stderr, "\tOn this platform this command doesn't enable feature in the hardware.\n"); fprintf(stderr, "\tIt updates the cpufreq scaling_min_freq to match cpufreq base_frequency.\n"); exit(0); } fprintf(stderr, "\tOptional Arguments: -a|--auto : Use priority of cores to set core-power associations\n"); } else { if (is_clx_n_platform()) { fprintf(stderr, "\tOn this platform this command doesn't disable feature in the hardware.\n"); fprintf(stderr, "\tIt updates the cpufreq scaling_min_freq to match cpuinfo_min_freq\n"); exit(0); } fprintf(stderr, "Disable Intel Speed Select Technology base frequency feature\n"); fprintf(stderr, "\tOptional Arguments: -a|--auto : Also disable core-power associations\n"); } exit(0); } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(set_pbf_for_cpu, NULL, NULL, NULL, &enable); else for_each_online_power_domain_in_set(set_pbf_for_cpu, NULL, NULL, NULL, &enable); isst_ctdp_display_information_end(outf); } static void dump_fact_config_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_fact_info fact_info; int ret; ret = isst_get_fact_info(id, tdp_level, fact_bucket, &fact_info); if (ret) { isst_display_error_info_message(1, "Failed to get turbo-freq info at this level", 1, tdp_level); isst_ctdp_display_information_end(outf); exit(1); } else { isst_fact_display_information(id, outf, tdp_level, fact_bucket, fact_avx, &fact_info); } } static void dump_fact_config(int arg) { if (cmd_help) { fprintf(stderr, "Print complete Intel Speed Select Technology turbo frequency configuration for a TDP level. Other arguments are optional.\n"); fprintf(stderr, "\tArguments: -l|--level : Specify tdp level\n"); fprintf(stderr, "\tArguments: -b|--bucket : Bucket index to dump\n"); fprintf(stderr, "\tArguments: -r|--trl-type : Specify trl type: sse|avx2|avx512\n"); exit(0); } if (tdp_level == 0xff) { isst_display_error_info_message(1, "Invalid command: specify tdp_level\n", 0, 0); exit(1); } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(dump_fact_config_for_cpu, NULL, NULL, NULL, NULL); else for_each_online_power_domain_in_set(dump_fact_config_for_cpu, NULL, NULL, NULL, NULL); isst_ctdp_display_information_end(outf); } static void set_fact_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_pkg_ctdp_level_info ctdp_level; struct isst_pkg_ctdp pkg_dev; int ret; int status = *(int *)arg4; if (status && no_turbo()) { isst_display_error_info_message(1, "Turbo mode is disabled", 0, 0); ret = -1; goto disp_results; } ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) { isst_display_error_info_message(1, "Failed to get number of levels", 0, 0); goto disp_results; } ret = isst_get_ctdp_control(id, pkg_dev.current_level, &ctdp_level); if (ret) { isst_display_error_info_message(1, "Failed to get current level", 0, 0); goto disp_results; } if (!ctdp_level.fact_support) { isst_display_error_info_message(1, "turbo-freq feature is not present at this level", 1, pkg_dev.current_level); ret = -1; goto disp_results; } if (status) { ret = isst_pm_qos_config(id, 1, 1); if (ret) goto disp_results; } ret = isst_set_pbf_fact_status(id, 0, status); if (ret) { debug_printf("isst_set_pbf_fact_status failed"); if (auto_mode) isst_pm_qos_config(id, 0, 0); goto disp_results; } /* Set TRL */ if (status) { struct isst_pkg_ctdp pkg_dev; ret = isst_get_ctdp_levels(id, &pkg_dev); if (!ret && id->cpu >= 0) ret = isst_set_trl(id, fact_trl); if (ret && auto_mode) isst_pm_qos_config(id, 0, 0); } else { if (auto_mode) isst_pm_qos_config(id, 0, 0); } disp_results: if (status) { isst_display_result(id, outf, "turbo-freq", "enable", ret); if (ret) fact_enable_fail = ret; } else { /* Since we modified TRL during Fact enable, restore it */ isst_set_trl_from_current_tdp(id, fact_trl); isst_display_result(id, outf, "turbo-freq", "disable", ret); } } static void set_fact_enable(int arg) { int i, ret, enable = arg; struct isst_id id; if (cmd_help) { if (enable) { fprintf(stderr, "Enable Intel Speed Select Technology Turbo frequency feature\n"); fprintf(stderr, "Optional: -t|--trl : Specify turbo ratio limit\n"); fprintf(stderr, "\tOptional Arguments: -a|--auto : Designate specified target CPUs with"); fprintf(stderr, "-C|--cpu option as as high priority using core-power feature\n"); } else { fprintf(stderr, "Disable Intel Speed Select Technology turbo frequency feature\n"); fprintf(stderr, "Optional: -t|--trl : Specify turbo ratio limit\n"); fprintf(stderr, "\tOptional Arguments: -a|--auto : Also disable core-power associations\n"); } exit(0); } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(set_fact_for_cpu, NULL, NULL, NULL, &enable); else for_each_online_power_domain_in_set(set_fact_for_cpu, NULL, NULL, NULL, &enable); if (!fact_enable_fail && enable && auto_mode) { /* * When we adjust CLOS param, we have to set for siblings also. * So for the each user specified CPU, also add the sibling * in the present_cpu_mask. */ for (i = 0; i < get_topo_max_cpus(); ++i) { char buffer[128], sibling_list[128], *cpu_str; int fd, len; if (!CPU_ISSET_S(i, target_cpumask_size, target_cpumask)) continue; snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", i); fd = open(buffer, O_RDONLY); if (fd < 0) continue; len = read(fd, sibling_list, sizeof(sibling_list)); close(fd); if (len < 0) continue; sibling_list[127] = '\0'; cpu_str = strtok(sibling_list, ","); while (cpu_str != NULL) { int cpu; sscanf(cpu_str, "%d", &cpu); CPU_SET_S(cpu, target_cpumask_size, target_cpumask); cpu_str = strtok(NULL, ","); } } for (i = 0; i < get_topo_max_cpus(); ++i) { int clos; if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask)) continue; if (is_cpu_online(i) != 1) continue; set_isst_id(&id, i); ret = set_clos_param(&id, 0, 0, 0, 0, 0xff); if (ret) goto error_disp; ret = set_clos_param(&id, 1, 15, 15, 0, 0xff); if (ret) goto error_disp; ret = set_clos_param(&id, 2, 15, 15, 0, 0xff); if (ret) goto error_disp; ret = set_clos_param(&id, 3, 15, 15, 0, 0xff); if (ret) goto error_disp; if (CPU_ISSET_S(i, target_cpumask_size, target_cpumask)) clos = 0; else clos = 3; debug_printf("Associate cpu: %d clos: %d\n", i, clos); ret = isst_clos_associate(&id, clos); if (ret) goto error_disp; } set_isst_id(&id, -1); isst_display_result(&id, outf, "turbo-freq --auto", "enable", 0); } isst_ctdp_display_information_end(outf); return; error_disp: isst_display_result(&id, outf, "turbo-freq --auto", "enable", ret); isst_ctdp_display_information_end(outf); } static void enable_clos_qos_config(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { int ret; int status = *(int *)arg4; if (is_skx_based_platform()) clos_priority_type = 1; ret = isst_pm_qos_config(id, status, clos_priority_type); if (ret) isst_display_error_info_message(1, "isst_pm_qos_config failed", 0, 0); if (status) isst_display_result(id, outf, "core-power", "enable", ret); else isst_display_result(id, outf, "core-power", "disable", ret); } static void set_clos_enable(int arg) { int enable = arg; if (cmd_help) { if (enable) { fprintf(stderr, "Enable core-power for a package/die\n"); if (!is_skx_based_platform()) { fprintf(stderr, "\tClos Enable: Specify priority type with [--priority|-p]\n"); fprintf(stderr, "\t\t 0: Proportional, 1: Ordered\n"); } } else { fprintf(stderr, "Disable core-power: [No command arguments are required]\n"); } exit(0); } if (enable && cpufreq_sysfs_present()) { fprintf(stderr, "cpufreq subsystem and core-power enable will interfere with each other!\n"); } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(enable_clos_qos_config, NULL, NULL, NULL, &enable); else for_each_online_power_domain_in_set(enable_clos_qos_config, NULL, NULL, NULL, &enable); isst_ctdp_display_information_end(outf); } static void dump_clos_config_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_clos_config clos_config; int ret; ret = isst_pm_get_clos(id, current_clos, &clos_config); if (ret) isst_display_error_info_message(1, "isst_pm_get_clos failed", 0, 0); else isst_clos_display_information(id, outf, current_clos, &clos_config); } static void dump_clos_config(int arg) { if (cmd_help) { fprintf(stderr, "Print Intel Speed Select Technology core power configuration\n"); fprintf(stderr, "\tArguments: [-c | --clos]: Specify clos id\n"); exit(0); } if (current_clos < 0 || current_clos > 3) { isst_display_error_info_message(1, "Invalid clos id\n", 0, 0); isst_ctdp_display_information_end(outf); exit(0); } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(dump_clos_config_for_cpu, NULL, NULL, NULL, NULL); else for_each_online_power_domain_in_set(dump_clos_config_for_cpu, NULL, NULL, NULL, NULL); isst_ctdp_display_information_end(outf); } static void get_clos_info_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { int enable, ret, prio_type; ret = isst_clos_get_clos_information(id, &enable, &prio_type); if (ret) isst_display_error_info_message(1, "isst_clos_get_info failed", 0, 0); else { int cp_state, cp_cap; isst_read_pm_config(id, &cp_state, &cp_cap); isst_clos_display_clos_information(id, outf, enable, prio_type, cp_state, cp_cap); } } static void dump_clos_info(int arg) { if (cmd_help) { fprintf(stderr, "Print Intel Speed Select Technology core power information\n"); fprintf(stderr, "\t Optionally specify targeted cpu id with [--cpu|-c]\n"); exit(0); } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(get_clos_info_for_cpu, NULL, NULL, NULL, NULL); else for_each_online_power_domain_in_set(get_clos_info_for_cpu, NULL, NULL, NULL, NULL); isst_ctdp_display_information_end(outf); } static void set_clos_config_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { struct isst_clos_config clos_config; int ret; if (id->cpu < 0) return; clos_config.epp = clos_epp; clos_config.clos_prop_prio = clos_prop_prio; clos_config.clos_min = clos_min; clos_config.clos_max = clos_max; clos_config.clos_desired = clos_desired; ret = isst_set_clos(id, current_clos, &clos_config); if (ret) isst_display_error_info_message(1, "isst_set_clos failed", 0, 0); else isst_display_result(id, outf, "core-power", "config", ret); } static void set_clos_config(int arg) { if (cmd_help) { fprintf(stderr, "Set core-power configuration for one of the four clos ids\n"); fprintf(stderr, "\tSpecify targeted clos id with [--clos|-c]\n"); if (!is_skx_based_platform()) { fprintf(stderr, "\tSpecify clos EPP with [--epp|-e]\n"); fprintf(stderr, "\tSpecify clos Proportional Priority [--weight|-w]\n"); } fprintf(stderr, "\tSpecify clos min in MHz with [--min|-n]\n"); fprintf(stderr, "\tSpecify clos max in MHz with [--max|-m]\n"); exit(0); } if (current_clos < 0 || current_clos > 3) { isst_display_error_info_message(1, "Invalid clos id\n", 0, 0); exit(0); } if (!is_skx_based_platform() && (clos_epp < 0 || clos_epp > 0x0F)) { fprintf(stderr, "clos epp is not specified or invalid, default: 0\n"); clos_epp = 0; } if (!is_skx_based_platform() && (clos_prop_prio < 0 || clos_prop_prio > 0x0F)) { fprintf(stderr, "clos frequency weight is not specified or invalid, default: 0\n"); clos_prop_prio = 0; } if (clos_min < 0) { fprintf(stderr, "clos min is not specified, default: 0\n"); clos_min = 0; } if (clos_max < 0) { fprintf(stderr, "clos max is not specified, default: Max frequency (ratio 0xff)\n"); clos_max = 0xff; } if (clos_desired) { fprintf(stderr, "clos desired is not supported on this platform\n"); clos_desired = 0x00; } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(set_clos_config_for_cpu, NULL, NULL, NULL, NULL); else for_each_online_power_domain_in_set(set_clos_config_for_cpu, NULL, NULL, NULL, NULL); isst_ctdp_display_information_end(outf); } static void set_clos_assoc_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { int ret; ret = isst_clos_associate(id, current_clos); if (ret) debug_printf("isst_clos_associate failed"); else isst_display_result(id, outf, "core-power", "assoc", ret); } static void set_clos_assoc(int arg) { if (cmd_help) { fprintf(stderr, "Associate a clos id to a CPU\n"); fprintf(stderr, "\tSpecify targeted clos id with [--clos|-c]\n"); fprintf(stderr, "\tFor example to associate clos 1 to CPU 0: issue\n"); fprintf(stderr, "\tintel-speed-select --cpu 0 core-power assoc --clos 1\n"); exit(0); } if (current_clos < 0 || current_clos > 3) { isst_display_error_info_message(1, "Invalid clos id\n", 0, 0); exit(0); } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(set_clos_assoc_for_cpu, NULL, NULL, NULL, NULL); else { isst_display_error_info_message(1, "Invalid target cpu. Specify with [-c|--cpu]", 0, 0); } isst_ctdp_display_information_end(outf); } static void get_clos_assoc_for_cpu(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { int clos, ret; ret = isst_clos_get_assoc_status(id, &clos); if (ret) isst_display_error_info_message(1, "isst_clos_get_assoc_status failed", 0, 0); else isst_clos_display_assoc_information(id, outf, clos); } static void get_clos_assoc(int arg) { if (cmd_help) { fprintf(stderr, "Get associate clos id to a CPU\n"); fprintf(stderr, "\tSpecify targeted cpu id with [--cpu|-c]\n"); exit(0); } if (!max_target_cpus) { isst_display_error_info_message(1, "Invalid target cpu. Specify with [-c|--cpu]", 0, 0); exit(0); } isst_ctdp_display_information_start(outf); for_each_online_target_cpu_in_set(get_clos_assoc_for_cpu, NULL, NULL, NULL, NULL); isst_ctdp_display_information_end(outf); } static void set_turbo_mode_for_cpu(struct isst_id *id, int status) { int base_freq; if (status) { base_freq = get_cpufreq_base_freq(id->cpu); set_cpufreq_scaling_min_max(id->cpu, 1, base_freq); } else { set_scaling_max_to_cpuinfo_max(id); } if (status) { isst_display_result(id, outf, "turbo-mode", "enable", 0); } else { isst_display_result(id, outf, "turbo-mode", "disable", 0); } } static void set_turbo_mode(int arg) { int i, enable = arg; struct isst_id id; if (cmd_help) { if (enable) fprintf(stderr, "Set turbo mode enable\n"); else fprintf(stderr, "Set turbo mode disable\n"); exit(0); } isst_ctdp_display_information_start(outf); for (i = 0; i < topo_max_cpus; ++i) { int online; if (i) online = parse_int_file( 1, "/sys/devices/system/cpu/cpu%d/online", i); else online = 1; /* online entry for CPU 0 needs some special configs */ if (online) { set_isst_id(&id, i); set_turbo_mode_for_cpu(&id, enable); } } isst_ctdp_display_information_end(outf); } static void get_set_trl(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { unsigned long long trl; int set = *(int *)arg4; int ret; if (set && !fact_trl) { isst_display_error_info_message(1, "Invalid TRL. Specify with [-t|--trl]", 0, 0); exit(0); } if (set) { ret = isst_set_trl(id, fact_trl); isst_display_result(id, outf, "turbo-mode", "set-trl", ret); return; } ret = isst_get_trl(id, &trl); if (ret) isst_display_result(id, outf, "turbo-mode", "get-trl", ret); else isst_trl_display_information(id, outf, trl); } static void process_trl(int arg) { if (cmd_help) { if (arg) { fprintf(stderr, "Set TRL (turbo ratio limits)\n"); fprintf(stderr, "\t t|--trl: Specify turbo ratio limit for setting TRL\n"); } else { fprintf(stderr, "Get TRL (turbo ratio limits)\n"); } exit(0); } isst_ctdp_display_information_start(outf); if (max_target_cpus) for_each_online_target_cpu_in_set(get_set_trl, NULL, NULL, NULL, &arg); else for_each_online_power_domain_in_set(get_set_trl, NULL, NULL, NULL, &arg); isst_ctdp_display_information_end(outf); } static struct process_cmd_struct clx_n_cmds[] = { { "perf-profile", "info", dump_isst_config, 0 }, { "base-freq", "info", dump_pbf_config, 0 }, { "base-freq", "enable", set_pbf_enable, 1 }, { "base-freq", "disable", set_pbf_enable, 0 }, { NULL, NULL, NULL, 0 } }; static struct process_cmd_struct isst_cmds[] = { { "perf-profile", "get-lock-status", get_tdp_locked, 0 }, { "perf-profile", "get-config-levels", get_tdp_levels, 0 }, { "perf-profile", "get-config-version", get_tdp_version, 0 }, { "perf-profile", "get-config-enabled", get_tdp_enabled, 0 }, { "perf-profile", "get-config-current-level", get_tdp_current_level, 0 }, { "perf-profile", "set-config-level", set_tdp_level, 0 }, { "perf-profile", "info", dump_isst_config, 0 }, { "base-freq", "info", dump_pbf_config, 0 }, { "base-freq", "enable", set_pbf_enable, 1 }, { "base-freq", "disable", set_pbf_enable, 0 }, { "turbo-freq", "info", dump_fact_config, 0 }, { "turbo-freq", "enable", set_fact_enable, 1 }, { "turbo-freq", "disable", set_fact_enable, 0 }, { "core-power", "info", dump_clos_info, 0 }, { "core-power", "enable", set_clos_enable, 1 }, { "core-power", "disable", set_clos_enable, 0 }, { "core-power", "config", set_clos_config, 0 }, { "core-power", "get-config", dump_clos_config, 0 }, { "core-power", "assoc", set_clos_assoc, 0 }, { "core-power", "get-assoc", get_clos_assoc, 0 }, { "turbo-mode", "enable", set_turbo_mode, 0 }, { "turbo-mode", "disable", set_turbo_mode, 1 }, { "turbo-mode", "get-trl", process_trl, 0 }, { "turbo-mode", "set-trl", process_trl, 1 }, { NULL, NULL, NULL } }; /* * parse cpuset with following syntax * 1,2,4..6,8-10 and set bits in cpu_subset */ void parse_cpu_command(char *optarg) { unsigned int start, end, invalid_count; char *next; next = optarg; invalid_count = 0; while (next && *next) { if (*next == '-') /* no negative cpu numbers */ goto error; start = strtoul(next, &next, 10); if (max_target_cpus < MAX_CPUS_IN_ONE_REQ) target_cpus[max_target_cpus++] = start; else invalid_count = 1; if (*next == '\0') break; if (*next == ',') { next += 1; continue; } if (*next == '-') { next += 1; /* start range */ } else if (*next == '.') { next += 1; if (*next == '.') next += 1; /* start range */ else goto error; } end = strtoul(next, &next, 10); if (end <= start) goto error; while (++start <= end) { if (max_target_cpus < MAX_CPUS_IN_ONE_REQ) target_cpus[max_target_cpus++] = start; else invalid_count = 1; } if (*next == ',') next += 1; else if (*next != '\0') goto error; } if (invalid_count) { isst_ctdp_display_information_start(outf); isst_display_error_info_message(1, "Too many CPUs in one request: max is", 1, MAX_CPUS_IN_ONE_REQ - 1); isst_ctdp_display_information_end(outf); exit(-1); } #ifdef DEBUG { int i; for (i = 0; i < max_target_cpus; ++i) printf("cpu [%d] in arg\n", target_cpus[i]); } #endif return; error: fprintf(stderr, "\"--cpu %s\" malformed\n", optarg); exit(-1); } static void parse_cmd_args(int argc, int start, char **argv) { int opt; int option_index; static struct option long_options[] = { { "bucket", required_argument, 0, 'b' }, { "level", required_argument, 0, 'l' }, { "online", required_argument, 0, 'o' }, { "trl-type", required_argument, 0, 'r' }, { "trl", required_argument, 0, 't' }, { "help", no_argument, 0, 'h' }, { "clos", required_argument, 0, 'c' }, { "desired", required_argument, 0, 'd' }, { "epp", required_argument, 0, 'e' }, { "min", required_argument, 0, 'n' }, { "max", required_argument, 0, 'm' }, { "priority", required_argument, 0, 'p' }, { "weight", required_argument, 0, 'w' }, { "auto", no_argument, 0, 'a' }, { 0, 0, 0, 0 } }; option_index = start; optind = start + 1; while ((opt = getopt_long(argc, argv, "b:l:t:c:d:e:n:m:p:w:r:hoa", long_options, &option_index)) != -1) { switch (opt) { case 'a': auto_mode = 1; break; case 'b': fact_bucket = atoi(optarg); break; case 'h': cmd_help = 1; break; case 'l': tdp_level = atoi(optarg); break; case 'o': force_online_offline = 1; break; case 't': sscanf(optarg, "0x%llx", &fact_trl); break; case 'r': if (!strncmp(optarg, "sse", 3)) { fact_avx = 0x01; } else if (!strncmp(optarg, "avx2", 4)) { fact_avx = 0x02; } else if (!strncmp(optarg, "avx512", 6)) { fact_avx = 0x04; } else { fprintf(outf, "Invalid sse,avx options\n"); exit(1); } break; /* CLOS related */ case 'c': current_clos = atoi(optarg); break; case 'd': clos_desired = atoi(optarg); clos_desired /= isst_get_disp_freq_multiplier(); break; case 'e': clos_epp = atoi(optarg); if (is_skx_based_platform()) { isst_display_error_info_message(1, "epp can't be specified on this platform", 0, 0); exit(0); } break; case 'n': clos_min = atoi(optarg); clos_min /= isst_get_disp_freq_multiplier(); break; case 'm': clos_max = atoi(optarg); clos_max /= isst_get_disp_freq_multiplier(); break; case 'p': clos_priority_type = atoi(optarg); if (is_skx_based_platform() && !clos_priority_type) { isst_display_error_info_message(1, "Invalid clos priority type: proportional for this platform", 0, 0); exit(0); } break; case 'w': clos_prop_prio = atoi(optarg); if (is_skx_based_platform()) { isst_display_error_info_message(1, "weight can't be specified on this platform", 0, 0); exit(0); } break; default: printf("Unknown option: ignore\n"); } } if (argv[optind]) printf("Garbage at the end of command: ignore\n"); } static void isst_help(void) { printf("perf-profile:\tAn architectural mechanism that allows multiple optimized \n\ performance profiles per system via static and/or dynamic\n\ adjustment of core count, workload, Tjmax, and\n\ TDP, etc.\n"); printf("\nCommands : For feature=perf-profile\n"); printf("\tinfo\n"); if (!is_clx_n_platform()) { printf("\tget-lock-status\n"); printf("\tget-config-levels\n"); printf("\tget-config-version\n"); printf("\tget-config-enabled\n"); printf("\tget-config-current-level\n"); printf("\tset-config-level\n"); } } static void pbf_help(void) { printf("base-freq:\tEnables users to increase guaranteed base frequency\n\ on certain cores (high priority cores) in exchange for lower\n\ base frequency on remaining cores (low priority cores).\n"); printf("\tcommand : info\n"); printf("\tcommand : enable\n"); printf("\tcommand : disable\n"); } static void fact_help(void) { printf("turbo-freq:\tEnables the ability to set different turbo ratio\n\ limits to cores based on priority.\n"); printf("\nCommand: For feature=turbo-freq\n"); printf("\tcommand : info\n"); printf("\tcommand : enable\n"); printf("\tcommand : disable\n"); } static void turbo_mode_help(void) { printf("turbo-mode:\tEnables users to enable/disable turbo mode by adjusting frequency settings. Also allows to get and set turbo ratio limits (TRL).\n"); printf("\tcommand : enable\n"); printf("\tcommand : disable\n"); printf("\tcommand : get-trl\n"); printf("\tcommand : set-trl\n"); } static void core_power_help(void) { printf("core-power:\tInterface that allows user to define per core/tile\n\ priority.\n"); printf("\nCommands : For feature=core-power\n"); printf("\tinfo\n"); printf("\tenable\n"); printf("\tdisable\n"); printf("\tconfig\n"); printf("\tget-config\n"); printf("\tassoc\n"); printf("\tget-assoc\n"); } struct process_cmd_help_struct { char *feature; void (*process_fn)(void); }; static struct process_cmd_help_struct isst_help_cmds[] = { { "perf-profile", isst_help }, { "base-freq", pbf_help }, { "turbo-freq", fact_help }, { "core-power", core_power_help }, { "turbo-mode", turbo_mode_help }, { NULL, NULL } }; static struct process_cmd_help_struct clx_n_help_cmds[] = { { "perf-profile", isst_help }, { "base-freq", pbf_help }, { NULL, NULL } }; void process_command(int argc, char **argv, struct process_cmd_help_struct *help_cmds, struct process_cmd_struct *cmds) { int i = 0, matched = 0; char *feature = argv[optind]; char *cmd = argv[optind + 1]; if (!feature || !cmd) return; debug_printf("feature name [%s] command [%s]\n", feature, cmd); if (!strcmp(cmd, "-h") || !strcmp(cmd, "--help")) { while (help_cmds[i].feature) { if (!strcmp(help_cmds[i].feature, feature)) { help_cmds[i].process_fn(); exit(0); } ++i; } } i = 0; while (cmds[i].feature) { if (!strcmp(cmds[i].feature, feature) && !strcmp(cmds[i].command, cmd)) { parse_cmd_args(argc, optind + 1, argv); cmds[i].process_fn(cmds[i].arg); matched = 1; break; } ++i; } if (!matched) fprintf(stderr, "Invalid command\n"); } static void usage(void) { if (is_clx_n_platform()) { fprintf(stderr, "\nThere is limited support of Intel Speed Select features on this platform.\n"); fprintf(stderr, "Everything is pre-configured using BIOS options, this tool can't enable any feature in the hardware.\n\n"); } printf("\nUsage:\n"); printf("intel-speed-select [OPTIONS] FEATURE COMMAND COMMAND_ARGUMENTS\n"); printf("\nUse this tool to enumerate and control the Intel Speed Select Technology features:\n"); if (is_clx_n_platform()) printf("\nFEATURE : [perf-profile|base-freq]\n"); else printf("\nFEATURE : [perf-profile|base-freq|turbo-freq|core-power|turbo-mode]\n"); printf("\nFor help on each feature, use -h|--help\n"); printf("\tFor example: intel-speed-select perf-profile -h\n"); printf("\nFor additional help on each command for a feature, use --h|--help\n"); printf("\tFor example: intel-speed-select perf-profile get-lock-status -h\n"); printf("\t\t This will print help for the command \"get-lock-status\" for the feature \"perf-profile\"\n"); printf("\nOPTIONS\n"); printf("\t[-c|--cpu] : logical cpu number\n"); printf("\t\tDefault: Die scoped for all dies in the system with multiple dies/package\n"); printf("\t\t\t Or Package scoped for all Packages when each package contains one die\n"); printf("\t[-d|--debug] : Debug mode\n"); printf("\t[-f|--format] : output format [json|text]. Default: text\n"); printf("\t[-h|--help] : Print help\n"); printf("\t[-i|--info] : Print platform information\n"); printf("\t[-a|--all-cpus-online] : Force online every CPU in the system\n"); printf("\t[-o|--out] : Output file\n"); printf("\t\t\tDefault : stderr\n"); printf("\t[-p|--pause] : Delay between two mail box commands in milliseconds\n"); printf("\t[-r|--retry] : Retry count for mail box commands on failure, default 3\n"); printf("\t[-v|--version] : Print version\n"); printf("\t[-b|--oob : Start a daemon to process HFI events for perf profile change from Out of Band agent.\n"); printf("\t[-n|--no-daemon : Don't run as daemon. By default --oob will turn on daemon mode\n"); printf("\t[-w|--delay : Delay for reading config level state change in OOB poll mode.\n"); printf("\t[-g|--cgroupv2 : Try to use cgroup v2 CPU isolation instead of CPU online/offline.\n"); printf("\nResult format\n"); printf("\tResult display uses a common format for each command:\n"); printf("\tResults are formatted in text/JSON with\n"); printf("\t\tPackage, Die, CPU, and command specific results.\n"); printf("\nExamples\n"); printf("\tTo get platform information:\n"); printf("\t\tintel-speed-select --info\n"); printf("\tTo get full perf-profile information dump:\n"); printf("\t\tintel-speed-select perf-profile info\n"); printf("\tTo get full base-freq information dump:\n"); printf("\t\tintel-speed-select base-freq info -l 0\n"); if (!is_clx_n_platform()) { printf("\tTo get full turbo-freq information dump:\n"); printf("\t\tintel-speed-select turbo-freq info -l 0\n"); } exit(1); } static void print_version(void) { fprintf(outf, "Version %s\n", version_str); exit(0); } static void cmdline(int argc, char **argv) { const char *pathname = "/dev/isst_interface"; char *ptr; FILE *fp; int opt, force_cpus_online = 0; int option_index = 0; int ret; int oob_mode = 0; int poll_interval = -1; int no_daemon = 0; int mbox_delay = 0, mbox_retries = 3; static struct option long_options[] = { { "all-cpus-online", no_argument, 0, 'a' }, { "cpu", required_argument, 0, 'c' }, { "debug", no_argument, 0, 'd' }, { "format", required_argument, 0, 'f' }, { "help", no_argument, 0, 'h' }, { "info", no_argument, 0, 'i' }, { "pause", required_argument, 0, 'p' }, { "out", required_argument, 0, 'o' }, { "retry", required_argument, 0, 'r' }, { "version", no_argument, 0, 'v' }, { "oob", no_argument, 0, 'b' }, { "no-daemon", no_argument, 0, 'n' }, { "poll-interval", required_argument, 0, 'w' }, { "cgroupv2", required_argument, 0, 'g' }, { 0, 0, 0, 0 } }; if (geteuid() != 0) { fprintf(stderr, "Must run as root\n"); exit(0); } ret = update_cpu_model(); if (ret) err(-1, "Invalid CPU model (%d)\n", cpu_model); printf("Intel(R) Speed Select Technology\n"); printf("Executing on CPU model:%d[0x%x]\n", cpu_model, cpu_model); if (!is_clx_n_platform()) { fp = fopen(pathname, "rb"); if (!fp) { fprintf(stderr, "Intel speed select drivers are not loaded on this system.\n"); fprintf(stderr, "Verify that kernel config includes CONFIG_INTEL_SPEED_SELECT_INTERFACE.\n"); fprintf(stderr, "If the config is included then this is not a supported platform.\n"); exit(0); } fclose(fp); } ret = isst_fill_platform_info(); if (ret) goto out; progname = argv[0]; while ((opt = getopt_long_only(argc, argv, "+c:df:hio:vabw:ng", long_options, &option_index)) != -1) { switch (opt) { case 'a': force_cpus_online = 1; break; case 'c': parse_cpu_command(optarg); break; case 'd': debug_flag = 1; printf("Debug Mode ON\n"); break; case 'f': if (!strncmp(optarg, "json", 4)) out_format_json = 1; break; case 'h': usage(); break; case 'i': isst_print_platform_information(); break; case 'o': if (outf) fclose(outf); outf = fopen_or_exit(optarg, "w"); break; case 'p': ret = strtol(optarg, &ptr, 10); if (!ret) fprintf(stderr, "Invalid pause interval, ignore\n"); else mbox_delay = ret; break; case 'r': ret = strtol(optarg, &ptr, 10); if (!ret) fprintf(stderr, "Invalid retry count, ignore\n"); else mbox_retries = ret; break; case 'v': print_version(); break; case 'b': oob_mode = 1; break; case 'n': no_daemon = 1; break; case 'w': ret = strtol(optarg, &ptr, 10); if (!ret) { fprintf(stderr, "Invalid poll interval count\n"); exit(0); } poll_interval = ret; break; case 'g': cgroupv2 = 1; break; default: usage(); } } if (optind > (argc - 2) && !oob_mode) { usage(); exit(0); } isst_update_platform_param(ISST_PARAM_MBOX_DELAY, mbox_delay); isst_update_platform_param(ISST_PARAM_MBOX_RETRIES, mbox_retries); set_max_cpu_num(); if (force_cpus_online) force_all_cpus_online(); store_cpu_topology(); create_cpu_map(); if (oob_mode) { if (debug_flag) fprintf(stderr, "OOB mode is enabled in debug mode\n"); ret = isst_daemon(debug_flag, poll_interval, no_daemon); if (ret) fprintf(stderr, "OOB mode enable failed\n"); goto out; } if (!is_clx_n_platform()) { process_command(argc, argv, isst_help_cmds, isst_cmds); } else { process_command(argc, argv, clx_n_help_cmds, clx_n_cmds); } out: free_cpu_set(present_cpumask); free_cpu_set(target_cpumask); } int main(int argc, char **argv) { outf = stderr; cmdline(argc, argv); return 0; }
linux-master
tools/power/x86/intel-speed-select/isst-config.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Speed Select -- Allow speed select to daemonize * Copyright (c) 2022 Intel Corporation. */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include <sys/file.h> #include <sys/types.h> #include <sys/stat.h> #include <errno.h> #include <getopt.h> #include <signal.h> #include <time.h> #include "isst.h" static int per_package_levels_info[MAX_PACKAGE_COUNT][MAX_DIE_PER_PACKAGE][MAX_PUNIT_PER_DIE]; static time_t per_package_levels_tm[MAX_PACKAGE_COUNT][MAX_DIE_PER_PACKAGE][MAX_PUNIT_PER_DIE]; static void init_levels(void) { int i, j, k; for (i = 0; i < MAX_PACKAGE_COUNT; ++i) for (j = 0; j < MAX_DIE_PER_PACKAGE; ++j) for (k = 0; k < MAX_PUNIT_PER_DIE; ++k) per_package_levels_info[i][j][k] = -1; } void process_level_change(struct isst_id *id) { struct isst_pkg_ctdp_level_info ctdp_level; struct isst_pkg_ctdp pkg_dev; time_t tm; int ret; if (id->pkg < 0 || id->die < 0 || id->punit < 0) { debug_printf("Invalid package/die info for cpu:%d\n", id->cpu); return; } tm = time(NULL); if (tm - per_package_levels_tm[id->pkg][id->die][id->punit] < 2) return; per_package_levels_tm[id->pkg][id->die][id->punit] = tm; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) { debug_printf("Can't get tdp levels for cpu:%d\n", id->cpu); return; } debug_printf("Get Config level %d pkg:%d die:%d current_level:%d\n", id->cpu, id->pkg, id->die, pkg_dev.current_level); if (pkg_dev.locked) { debug_printf("config TDP s locked \n"); return; } if (per_package_levels_info[id->pkg][id->die][id->punit] == pkg_dev.current_level) return; debug_printf("**Config level change for cpu:%d pkg:%d die:%d from %d to %d\n", id->cpu, id->pkg, id->die, per_package_levels_info[id->pkg][id->die][id->punit], pkg_dev.current_level); per_package_levels_info[id->pkg][id->die][id->punit] = pkg_dev.current_level; ctdp_level.core_cpumask_size = alloc_cpu_set(&ctdp_level.core_cpumask); ret = isst_get_coremask_info(id, pkg_dev.current_level, &ctdp_level); if (ret) { free_cpu_set(ctdp_level.core_cpumask); debug_printf("Can't get core_mask:%d\n", id->cpu); return; } if (use_cgroupv2()) { int ret; ret = enable_cpuset_controller(); if (ret) goto use_offline; isolate_cpus(id, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask, pkg_dev.current_level); goto free_mask; } use_offline: if (ctdp_level.cpu_count) { int i, max_cpus = get_topo_max_cpus(); for (i = 0; i < max_cpus; ++i) { if (!is_cpu_in_power_domain(i, id)) continue; if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) { fprintf(stderr, "online cpu %d\n", i); set_cpu_online_offline(i, 1); } else { fprintf(stderr, "offline cpu %d\n", i); set_cpu_online_offline(i, 0); } } } free_mask: free_cpu_set(ctdp_level.core_cpumask); } static void _poll_for_config_change(struct isst_id *id, void *arg1, void *arg2, void *arg3, void *arg4) { process_level_change(id); } static void poll_for_config_change(void) { for_each_online_power_domain_in_set(_poll_for_config_change, NULL, NULL, NULL, NULL); } static int done = 0; static int pid_file_handle; static void signal_handler(int sig) { switch (sig) { case SIGINT: case SIGTERM: done = 1; hfi_exit(); exit(0); break; default: break; } } static void daemonize(char *rundir, char *pidfile) { int pid, sid, i; char str[10]; struct sigaction sig_actions; sigset_t sig_set; int ret; if (getppid() == 1) return; sigemptyset(&sig_set); sigaddset(&sig_set, SIGCHLD); sigaddset(&sig_set, SIGTSTP); sigaddset(&sig_set, SIGTTOU); sigaddset(&sig_set, SIGTTIN); sigprocmask(SIG_BLOCK, &sig_set, NULL); sig_actions.sa_handler = signal_handler; sigemptyset(&sig_actions.sa_mask); sig_actions.sa_flags = 0; sigaction(SIGHUP, &sig_actions, NULL); sigaction(SIGTERM, &sig_actions, NULL); sigaction(SIGINT, &sig_actions, NULL); pid = fork(); if (pid < 0) { /* Could not fork */ exit(EXIT_FAILURE); } if (pid > 0) exit(EXIT_SUCCESS); umask(027); sid = setsid(); if (sid < 0) exit(EXIT_FAILURE); /* close all descriptors */ for (i = getdtablesize(); i >= 0; --i) close(i); i = open("/dev/null", O_RDWR); if (i < 0) exit(EXIT_FAILURE); ret = dup(i); if (ret == -1) exit(EXIT_FAILURE); ret = chdir(rundir); if (ret == -1) exit(EXIT_FAILURE); pid_file_handle = open(pidfile, O_RDWR | O_CREAT, 0600); if (pid_file_handle == -1) { /* Couldn't open lock file */ exit(1); } /* Try to lock file */ #ifdef LOCKF_SUPPORT if (lockf(pid_file_handle, F_TLOCK, 0) == -1) { #else if (flock(pid_file_handle, LOCK_EX|LOCK_NB) < 0) { #endif /* Couldn't get lock on lock file */ fprintf(stderr, "Couldn't get lock file %d\n", getpid()); exit(1); } snprintf(str, sizeof(str), "%d\n", getpid()); ret = write(pid_file_handle, str, strlen(str)); if (ret == -1) exit(EXIT_FAILURE); close(i); } int isst_daemon(int debug_mode, int poll_interval, int no_daemon) { int ret; if (!no_daemon && poll_interval < 0 && !debug_mode) { fprintf(stderr, "OOB mode is enabled and will run as daemon\n"); daemonize((char *) "/tmp/", (char *)"/tmp/hfi-events.pid"); } else { signal(SIGINT, signal_handler); } init_levels(); if (poll_interval < 0) { ret = hfi_main(); if (ret) { fprintf(stderr, "HFI initialization failed\n"); } fprintf(stderr, "Must specify poll-interval\n"); return ret; } debug_printf("Starting loop\n"); while (!done) { sleep(poll_interval); poll_for_config_change(); } return 0; }
linux-master
tools/power/x86/intel-speed-select/isst-daemon.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel dynamic_speed_select -- Enumerate and control features * Copyright (c) 2019 Intel Corporation. */ #include "isst.h" static void printcpulist(int str_len, char *str, int mask_size, cpu_set_t *cpu_mask) { int i, first, curr_index, index; if (!CPU_COUNT_S(mask_size, cpu_mask)) { snprintf(str, str_len, "none"); return; } curr_index = 0; first = 1; for (i = 0; i < get_topo_max_cpus(); ++i) { if (!CPU_ISSET_S(i, mask_size, cpu_mask)) continue; if (!first) { index = snprintf(&str[curr_index], str_len - curr_index, ","); curr_index += index; if (curr_index >= str_len) break; } index = snprintf(&str[curr_index], str_len - curr_index, "%d", i); curr_index += index; if (curr_index >= str_len) break; first = 0; } } static void printcpumask(int str_len, char *str, int mask_size, cpu_set_t *cpu_mask) { int i, max_cpus = get_topo_max_cpus(); unsigned int *mask; int size, index, curr_index; size = max_cpus / (sizeof(unsigned int) * 8); if (max_cpus % (sizeof(unsigned int) * 8)) size++; mask = calloc(size, sizeof(unsigned int)); if (!mask) return; for (i = 0; i < max_cpus; ++i) { int mask_index, bit_index; if (!CPU_ISSET_S(i, mask_size, cpu_mask)) continue; mask_index = i / (sizeof(unsigned int) * 8); bit_index = i % (sizeof(unsigned int) * 8); mask[mask_index] |= BIT(bit_index); } curr_index = 0; for (i = size - 1; i >= 0; --i) { index = snprintf(&str[curr_index], str_len - curr_index, "%08x", mask[i]); curr_index += index; if (curr_index >= str_len) break; if (i) { strncat(&str[curr_index], ",", str_len - curr_index); curr_index++; } if (curr_index >= str_len) break; } free(mask); } static void format_and_print_txt(FILE *outf, int level, char *header, char *value) { char *spaces = " "; static char delimiters[256]; int i, j = 0; if (!level) return; if (level == 1) { strcpy(delimiters, " "); } else { for (i = 0; i < level - 1; ++i) j += snprintf(&delimiters[j], sizeof(delimiters) - j, "%s", spaces); } if (header && value) { fprintf(outf, "%s", delimiters); fprintf(outf, "%s:%s\n", header, value); } else if (header) { fprintf(outf, "%s", delimiters); fprintf(outf, "%s\n", header); } } static int last_level; static void format_and_print(FILE *outf, int level, char *header, char *value) { char *spaces = " "; static char delimiters[256]; int i; if (!out_format_is_json()) { format_and_print_txt(outf, level, header, value); return; } if (level == 0) { if (header) fprintf(outf, "{"); else fprintf(outf, "\n}\n"); } else { int j = 0; for (i = 0; i < level; ++i) j += snprintf(&delimiters[j], sizeof(delimiters) - j, "%s", spaces); if (last_level == level) fprintf(outf, ",\n"); if (value) { if (last_level != level) fprintf(outf, "\n"); fprintf(outf, "%s\"%s\": ", delimiters, header); fprintf(outf, "\"%s\"", value); } else { for (i = last_level - 1; i >= level; --i) { int k = 0; for (j = i; j > 0; --j) k += snprintf(&delimiters[k], sizeof(delimiters) - k, "%s", spaces); if (i == level && header) fprintf(outf, "\n%s},", delimiters); else fprintf(outf, "\n%s}", delimiters); } if (abs(last_level - level) < 3) fprintf(outf, "\n"); if (header) fprintf(outf, "%s\"%s\": {", delimiters, header); } } last_level = level; } static int print_package_info(struct isst_id *id, FILE *outf) { char header[256]; int level = 1; if (out_format_is_json()) { if (api_version() > 1) snprintf(header, sizeof(header), "package-%d:die-%d:powerdomain-%d:cpu-%d", id->pkg, id->die, id->punit, id->cpu); else snprintf(header, sizeof(header), "package-%d:die-%d:cpu-%d", id->pkg, id->die, id->cpu); format_and_print(outf, level, header, NULL); return 1; } snprintf(header, sizeof(header), "package-%d", id->pkg); format_and_print(outf, level++, header, NULL); snprintf(header, sizeof(header), "die-%d", id->die); format_and_print(outf, level++, header, NULL); if (api_version() > 1) { snprintf(header, sizeof(header), "powerdomain-%d", id->punit); format_and_print(outf, level++, header, NULL); } snprintf(header, sizeof(header), "cpu-%d", id->cpu); format_and_print(outf, level, header, NULL); return level; } static void _isst_pbf_display_information(struct isst_id *id, FILE *outf, int level, struct isst_pbf_info *pbf_info, int disp_level) { char header[256]; char value[512]; snprintf(header, sizeof(header), "speed-select-base-freq-properties"); format_and_print(outf, disp_level, header, NULL); snprintf(header, sizeof(header), "high-priority-base-frequency(MHz)"); snprintf(value, sizeof(value), "%d", pbf_info->p1_high * isst_get_disp_freq_multiplier()); format_and_print(outf, disp_level + 1, header, value); snprintf(header, sizeof(header), "high-priority-cpu-mask"); printcpumask(sizeof(value), value, pbf_info->core_cpumask_size, pbf_info->core_cpumask); format_and_print(outf, disp_level + 1, header, value); snprintf(header, sizeof(header), "high-priority-cpu-list"); printcpulist(sizeof(value), value, pbf_info->core_cpumask_size, pbf_info->core_cpumask); format_and_print(outf, disp_level + 1, header, value); snprintf(header, sizeof(header), "low-priority-base-frequency(MHz)"); snprintf(value, sizeof(value), "%d", pbf_info->p1_low * isst_get_disp_freq_multiplier()); format_and_print(outf, disp_level + 1, header, value); if (is_clx_n_platform()) return; snprintf(header, sizeof(header), "tjunction-temperature(C)"); snprintf(value, sizeof(value), "%d", pbf_info->t_prochot); format_and_print(outf, disp_level + 1, header, value); snprintf(header, sizeof(header), "thermal-design-power(W)"); snprintf(value, sizeof(value), "%d", pbf_info->tdp); format_and_print(outf, disp_level + 1, header, value); } static void _isst_fact_display_information(struct isst_id *id, FILE *outf, int level, int fact_bucket, int fact_avx, struct isst_fact_info *fact_info, int base_level) { struct isst_fact_bucket_info *bucket_info = fact_info->bucket_info; int trl_max_levels = isst_get_trl_max_levels(); char header[256]; char value[256]; int print = 0, j; for (j = 0; j < ISST_FACT_MAX_BUCKETS; ++j) { if (fact_bucket != 0xff && fact_bucket != j) continue; /* core count must be valid for CPU power domain */ if (!bucket_info[j].hp_cores && id->cpu >= 0) break; print = 1; } if (!print) { fprintf(stderr, "Invalid bucket\n"); return; } snprintf(header, sizeof(header), "speed-select-turbo-freq-properties"); format_and_print(outf, base_level, header, NULL); for (j = 0; j < ISST_FACT_MAX_BUCKETS; ++j) { int i; if (fact_bucket != 0xff && fact_bucket != j) continue; if (!bucket_info[j].hp_cores) break; snprintf(header, sizeof(header), "bucket-%d", j); format_and_print(outf, base_level + 1, header, NULL); snprintf(header, sizeof(header), "high-priority-cores-count"); snprintf(value, sizeof(value), "%d", bucket_info[j].hp_cores); format_and_print(outf, base_level + 2, header, value); for (i = 0; i < trl_max_levels; i++) { if (!bucket_info[j].hp_ratios[i] || (fact_avx != 0xFF && !(fact_avx & (1 << i)))) continue; if (i == 0 && api_version() == 1 && !is_emr_platform()) snprintf(header, sizeof(header), "high-priority-max-frequency(MHz)"); else snprintf(header, sizeof(header), "high-priority-max-%s-frequency(MHz)", isst_get_trl_level_name(i)); snprintf(value, sizeof(value), "%d", bucket_info[j].hp_ratios[i] * isst_get_disp_freq_multiplier()); format_and_print(outf, base_level + 2, header, value); } } snprintf(header, sizeof(header), "speed-select-turbo-freq-clip-frequencies"); format_and_print(outf, base_level + 1, header, NULL); for (j = 0; j < trl_max_levels; j++) { if (!fact_info->lp_ratios[j]) continue; /* No AVX level name for SSE to be consistent with previous formatting */ if (j == 0 && api_version() == 1 && !is_emr_platform()) snprintf(header, sizeof(header), "low-priority-max-frequency(MHz)"); else snprintf(header, sizeof(header), "low-priority-max-%s-frequency(MHz)", isst_get_trl_level_name(j)); snprintf(value, sizeof(value), "%d", fact_info->lp_ratios[j] * isst_get_disp_freq_multiplier()); format_and_print(outf, base_level + 2, header, value); } } void isst_ctdp_display_core_info(struct isst_id *id, FILE *outf, char *prefix, unsigned int val, char *str0, char *str1) { char value[256]; int level = print_package_info(id, outf); level++; if (str0 && !val) snprintf(value, sizeof(value), "%s", str0); else if (str1 && val) snprintf(value, sizeof(value), "%s", str1); else snprintf(value, sizeof(value), "%u", val); format_and_print(outf, level, prefix, value); format_and_print(outf, 1, NULL, NULL); } void isst_ctdp_display_information(struct isst_id *id, FILE *outf, int tdp_level, struct isst_pkg_ctdp *pkg_dev) { char header[256]; char value[512]; static int level; int trl_max_levels = isst_get_trl_max_levels(); int i; if (pkg_dev->processed) level = print_package_info(id, outf); for (i = 0; i <= pkg_dev->levels; ++i) { struct isst_pkg_ctdp_level_info *ctdp_level; int j, k; ctdp_level = &pkg_dev->ctdp_level[i]; if (!ctdp_level->processed) continue; snprintf(header, sizeof(header), "perf-profile-level-%d", ctdp_level->level); format_and_print(outf, level + 1, header, NULL); if (id->cpu >= 0) { snprintf(header, sizeof(header), "cpu-count"); j = get_cpu_count(id); snprintf(value, sizeof(value), "%d", j); format_and_print(outf, level + 2, header, value); j = CPU_COUNT_S(ctdp_level->core_cpumask_size, ctdp_level->core_cpumask); if (j) { snprintf(header, sizeof(header), "enable-cpu-count"); snprintf(value, sizeof(value), "%d", j); format_and_print(outf, level + 2, header, value); } if (ctdp_level->core_cpumask_size) { snprintf(header, sizeof(header), "enable-cpu-mask"); printcpumask(sizeof(value), value, ctdp_level->core_cpumask_size, ctdp_level->core_cpumask); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "enable-cpu-list"); printcpulist(sizeof(value), value, ctdp_level->core_cpumask_size, ctdp_level->core_cpumask); format_and_print(outf, level + 2, header, value); } } snprintf(header, sizeof(header), "thermal-design-power-ratio"); snprintf(value, sizeof(value), "%d", ctdp_level->tdp_ratio); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "base-frequency(MHz)"); if (!ctdp_level->sse_p1) ctdp_level->sse_p1 = ctdp_level->tdp_ratio; snprintf(value, sizeof(value), "%d", ctdp_level->sse_p1 * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); if (ctdp_level->avx2_p1) { snprintf(header, sizeof(header), "base-frequency-avx2(MHz)"); snprintf(value, sizeof(value), "%d", ctdp_level->avx2_p1 * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); } if (ctdp_level->avx512_p1) { snprintf(header, sizeof(header), "base-frequency-avx512(MHz)"); snprintf(value, sizeof(value), "%d", ctdp_level->avx512_p1 * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); } if (ctdp_level->uncore_pm) { snprintf(header, sizeof(header), "uncore-frequency-min(MHz)"); snprintf(value, sizeof(value), "%d", ctdp_level->uncore_pm * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); } if (ctdp_level->uncore_p0) { snprintf(header, sizeof(header), "uncore-frequency-max(MHz)"); snprintf(value, sizeof(value), "%d", ctdp_level->uncore_p0 * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); } if (ctdp_level->amx_p1) { snprintf(header, sizeof(header), "base-frequency-amx(MHz)"); snprintf(value, sizeof(value), "%d", ctdp_level->amx_p1 * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); } if (ctdp_level->uncore_p1) { snprintf(header, sizeof(header), "uncore-frequency-base(MHz)"); snprintf(value, sizeof(value), "%d", ctdp_level->uncore_p1 * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); } if (ctdp_level->mem_freq) { snprintf(header, sizeof(header), "max-mem-frequency(MHz)"); snprintf(value, sizeof(value), "%d", ctdp_level->mem_freq); format_and_print(outf, level + 2, header, value); } if (api_version() > 1) { snprintf(header, sizeof(header), "cooling_type"); snprintf(value, sizeof(value), "%d", ctdp_level->cooling_type); format_and_print(outf, level + 2, header, value); } snprintf(header, sizeof(header), "speed-select-turbo-freq"); if (ctdp_level->fact_support) { if (ctdp_level->fact_enabled) snprintf(value, sizeof(value), "enabled"); else snprintf(value, sizeof(value), "disabled"); } else snprintf(value, sizeof(value), "unsupported"); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "speed-select-base-freq"); if (ctdp_level->pbf_support) { if (ctdp_level->pbf_enabled) snprintf(value, sizeof(value), "enabled"); else snprintf(value, sizeof(value), "disabled"); } else snprintf(value, sizeof(value), "unsupported"); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "speed-select-core-power"); if (ctdp_level->sst_cp_support) { if (ctdp_level->sst_cp_enabled) snprintf(value, sizeof(value), "enabled"); else snprintf(value, sizeof(value), "disabled"); } else snprintf(value, sizeof(value), "unsupported"); format_and_print(outf, level + 2, header, value); if (is_clx_n_platform()) { if (ctdp_level->pbf_support) _isst_pbf_display_information(id, outf, tdp_level, &ctdp_level->pbf_info, level + 2); continue; } if (ctdp_level->pkg_tdp) { snprintf(header, sizeof(header), "thermal-design-power(W)"); snprintf(value, sizeof(value), "%d", ctdp_level->pkg_tdp); format_and_print(outf, level + 2, header, value); } if (ctdp_level->t_proc_hot) { snprintf(header, sizeof(header), "tjunction-max(C)"); snprintf(value, sizeof(value), "%d", ctdp_level->t_proc_hot); format_and_print(outf, level + 2, header, value); } for (k = 0; k < trl_max_levels; k++) { if (!ctdp_level->trl_ratios[k][0]) continue; snprintf(header, sizeof(header), "turbo-ratio-limits-%s", isst_get_trl_level_name(k)); format_and_print(outf, level + 2, header, NULL); for (j = 0; j < 8; ++j) { snprintf(header, sizeof(header), "bucket-%d", j); format_and_print(outf, level + 3, header, NULL); snprintf(header, sizeof(header), "core-count"); snprintf(value, sizeof(value), "%llu", (ctdp_level->trl_cores >> (j * 8)) & 0xff); format_and_print(outf, level + 4, header, value); snprintf(header, sizeof(header), "max-turbo-frequency(MHz)"); snprintf(value, sizeof(value), "%d", ctdp_level->trl_ratios[k][j] * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 4, header, value); } } if (ctdp_level->pbf_support) _isst_pbf_display_information(id, outf, i, &ctdp_level->pbf_info, level + 2); if (ctdp_level->fact_support) _isst_fact_display_information(id, outf, i, 0xff, 0xff, &ctdp_level->fact_info, level + 2); } format_and_print(outf, 1, NULL, NULL); } static int start; void isst_ctdp_display_information_start(FILE *outf) { last_level = 0; format_and_print(outf, 0, "start", NULL); start = 1; } void isst_ctdp_display_information_end(FILE *outf) { format_and_print(outf, 0, NULL, NULL); start = 0; } void isst_pbf_display_information(struct isst_id *id, FILE *outf, int level, struct isst_pbf_info *pbf_info) { int _level; _level = print_package_info(id, outf); _isst_pbf_display_information(id, outf, level, pbf_info, _level + 1); format_and_print(outf, 1, NULL, NULL); } void isst_fact_display_information(struct isst_id *id, FILE *outf, int level, int fact_bucket, int fact_avx, struct isst_fact_info *fact_info) { int _level; _level = print_package_info(id, outf); _isst_fact_display_information(id, outf, level, fact_bucket, fact_avx, fact_info, _level + 1); format_and_print(outf, 1, NULL, NULL); } void isst_clos_display_information(struct isst_id *id, FILE *outf, int clos, struct isst_clos_config *clos_config) { char header[256]; char value[256]; int level; level = print_package_info(id, outf); snprintf(header, sizeof(header), "core-power"); format_and_print(outf, level + 1, header, NULL); snprintf(header, sizeof(header), "clos"); snprintf(value, sizeof(value), "%d", clos); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "epp"); snprintf(value, sizeof(value), "%d", clos_config->epp); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "clos-proportional-priority"); snprintf(value, sizeof(value), "%d", clos_config->clos_prop_prio); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "clos-min"); snprintf(value, sizeof(value), "%d MHz", clos_config->clos_min * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "clos-max"); if ((clos_config->clos_max * isst_get_disp_freq_multiplier()) == 25500) snprintf(value, sizeof(value), "Max Turbo frequency"); else snprintf(value, sizeof(value), "%d MHz", clos_config->clos_max * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "clos-desired"); snprintf(value, sizeof(value), "%d MHz", clos_config->clos_desired * isst_get_disp_freq_multiplier()); format_and_print(outf, level + 2, header, value); format_and_print(outf, level, NULL, NULL); } void isst_clos_display_clos_information(struct isst_id *id, FILE *outf, int clos_enable, int type, int state, int cap) { char header[256]; char value[256]; int level; level = print_package_info(id, outf); snprintf(header, sizeof(header), "core-power"); format_and_print(outf, level + 1, header, NULL); snprintf(header, sizeof(header), "support-status"); if (cap) snprintf(value, sizeof(value), "supported"); else snprintf(value, sizeof(value), "unsupported"); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "enable-status"); if (state) snprintf(value, sizeof(value), "enabled"); else snprintf(value, sizeof(value), "disabled"); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "clos-enable-status"); if (clos_enable) snprintf(value, sizeof(value), "enabled"); else snprintf(value, sizeof(value), "disabled"); format_and_print(outf, level + 2, header, value); snprintf(header, sizeof(header), "priority-type"); if (type) snprintf(value, sizeof(value), "ordered"); else snprintf(value, sizeof(value), "proportional"); format_and_print(outf, level + 2, header, value); format_and_print(outf, level, NULL, NULL); } void isst_clos_display_assoc_information(struct isst_id *id, FILE *outf, int clos) { char header[256]; char value[256]; int level; level = print_package_info(id, outf); snprintf(header, sizeof(header), "get-assoc"); format_and_print(outf, level + 1, header, NULL); snprintf(header, sizeof(header), "clos"); snprintf(value, sizeof(value), "%d", clos); format_and_print(outf, level + 2, header, value); format_and_print(outf, level, NULL, NULL); } void isst_display_result(struct isst_id *id, FILE *outf, char *feature, char *cmd, int result) { char header[256]; char value[256]; int level = 3; level = print_package_info(id, outf); snprintf(header, sizeof(header), "%s", feature); format_and_print(outf, level + 1, header, NULL); snprintf(header, sizeof(header), "%s", cmd); if (!result) snprintf(value, sizeof(value), "success"); else snprintf(value, sizeof(value), "failed(error %d)", result); format_and_print(outf, level + 2, header, value); format_and_print(outf, level, NULL, NULL); } void isst_display_error_info_message(int error, char *msg, int arg_valid, int arg) { FILE *outf = get_output_file(); static int error_index; char header[256]; char value[256]; if (!out_format_is_json()) { if (arg_valid) snprintf(value, sizeof(value), "%s %d", msg, arg); else snprintf(value, sizeof(value), "%s", msg); if (error) fprintf(outf, "Error: %s\n", value); else fprintf(outf, "Information: %s\n", value); return; } if (!start) format_and_print(outf, 0, "start", NULL); if (error) snprintf(header, sizeof(header), "Error%d", error_index++); else snprintf(header, sizeof(header), "Information:%d", error_index++); format_and_print(outf, 1, header, NULL); snprintf(header, sizeof(header), "message"); if (arg_valid) snprintf(value, sizeof(value), "%s %d", msg, arg); else snprintf(value, sizeof(value), "%s", msg); format_and_print(outf, 2, header, value); format_and_print(outf, 1, NULL, NULL); if (!start) format_and_print(outf, 0, NULL, NULL); } void isst_trl_display_information(struct isst_id *id, FILE *outf, unsigned long long trl) { char header[256]; char value[256]; int level; level = print_package_info(id, outf); snprintf(header, sizeof(header), "get-trl"); format_and_print(outf, level + 1, header, NULL); snprintf(header, sizeof(header), "trl"); snprintf(value, sizeof(value), "0x%llx", trl); format_and_print(outf, level + 2, header, value); format_and_print(outf, level, NULL, NULL); }
linux-master
tools/power/x86/intel-speed-select/isst-display.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Speed Select -- Read HFI events for OOB * Copyright (c) 2022 Intel Corporation. */ /* * This file incorporates work covered by the following copyright and * permission notice: * WPA Supplicant - driver interaction with Linux nl80211/cfg80211 * Copyright (c) 2003-2008, Jouni Malinen <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Alternatively, this software may be distributed under the terms of * BSD license. * * Requires * libnl-genl-3-dev * * For Fedora/CenOS * dnf install libnl3-devel * For Ubuntu * apt install libnl-3-dev libnl-genl-3-dev */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include <sys/file.h> #include <sys/types.h> #include <sys/stat.h> #include <errno.h> #include <getopt.h> #include <signal.h> #include <netlink/genl/genl.h> #include <netlink/genl/family.h> #include <netlink/genl/ctrl.h> #include <linux/thermal.h> #include "isst.h" struct hfi_event_data { struct nl_sock *nl_handle; struct nl_cb *nl_cb; }; struct hfi_event_data drv; static int ack_handler(struct nl_msg *msg, void *arg) { int *err = arg; *err = 0; return NL_STOP; } static int finish_handler(struct nl_msg *msg, void *arg) { int *ret = arg; *ret = 0; return NL_SKIP; } static int error_handler(struct sockaddr_nl *nla, struct nlmsgerr *err, void *arg) { int *ret = arg; *ret = err->error; return NL_SKIP; } static int seq_check_handler(struct nl_msg *msg, void *arg) { return NL_OK; } static int send_and_recv_msgs(struct hfi_event_data *drv, struct nl_msg *msg, int (*valid_handler)(struct nl_msg *, void *), void *valid_data) { struct nl_cb *cb; int err = -ENOMEM; cb = nl_cb_clone(drv->nl_cb); if (!cb) goto out; err = nl_send_auto_complete(drv->nl_handle, msg); if (err < 0) goto out; err = 1; nl_cb_err(cb, NL_CB_CUSTOM, error_handler, &err); nl_cb_set(cb, NL_CB_FINISH, NL_CB_CUSTOM, finish_handler, &err); nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, ack_handler, &err); if (valid_handler) nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, valid_handler, valid_data); while (err > 0) nl_recvmsgs(drv->nl_handle, cb); out: nl_cb_put(cb); nlmsg_free(msg); return err; } struct family_data { const char *group; int id; }; static int family_handler(struct nl_msg *msg, void *arg) { struct family_data *res = arg; struct nlattr *tb[CTRL_ATTR_MAX + 1]; struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg)); struct nlattr *mcgrp; int i; nla_parse(tb, CTRL_ATTR_MAX, genlmsg_attrdata(gnlh, 0), genlmsg_attrlen(gnlh, 0), NULL); if (!tb[CTRL_ATTR_MCAST_GROUPS]) return NL_SKIP; nla_for_each_nested(mcgrp, tb[CTRL_ATTR_MCAST_GROUPS], i) { struct nlattr *tb2[CTRL_ATTR_MCAST_GRP_MAX + 1]; nla_parse(tb2, CTRL_ATTR_MCAST_GRP_MAX, nla_data(mcgrp), nla_len(mcgrp), NULL); if (!tb2[CTRL_ATTR_MCAST_GRP_NAME] || !tb2[CTRL_ATTR_MCAST_GRP_ID] || strncmp(nla_data(tb2[CTRL_ATTR_MCAST_GRP_NAME]), res->group, nla_len(tb2[CTRL_ATTR_MCAST_GRP_NAME])) != 0) continue; res->id = nla_get_u32(tb2[CTRL_ATTR_MCAST_GRP_ID]); break; } return 0; } static int nl_get_multicast_id(struct hfi_event_data *drv, const char *family, const char *group) { struct nl_msg *msg; int ret = -1; struct family_data res = { group, -ENOENT }; msg = nlmsg_alloc(); if (!msg) return -ENOMEM; genlmsg_put(msg, 0, 0, genl_ctrl_resolve(drv->nl_handle, "nlctrl"), 0, 0, CTRL_CMD_GETFAMILY, 0); NLA_PUT_STRING(msg, CTRL_ATTR_FAMILY_NAME, family); ret = send_and_recv_msgs(drv, msg, family_handler, &res); msg = NULL; if (ret == 0) ret = res.id; nla_put_failure: nlmsg_free(msg); return ret; } struct perf_cap { int cpu; int perf; int eff; }; static void process_hfi_event(struct perf_cap *perf_cap) { struct isst_id id; set_isst_id(&id, perf_cap->cpu); process_level_change(&id); } static int handle_event(struct nl_msg *n, void *arg) { struct nlmsghdr *nlh = nlmsg_hdr(n); struct genlmsghdr *genlhdr = genlmsg_hdr(nlh); struct nlattr *attrs[THERMAL_GENL_ATTR_MAX + 1]; int ret; struct perf_cap perf_cap = {0}; ret = genlmsg_parse(nlh, 0, attrs, THERMAL_GENL_ATTR_MAX, NULL); debug_printf("Received event %d parse_rer:%d\n", genlhdr->cmd, ret); if (genlhdr->cmd == THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE) { struct nlattr *cap; int j, index = 0; debug_printf("THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE\n"); nla_for_each_nested(cap, attrs[THERMAL_GENL_ATTR_CPU_CAPABILITY], j) { switch (index) { case 0: perf_cap.cpu = nla_get_u32(cap); break; case 1: perf_cap.perf = nla_get_u32(cap); break; case 2: perf_cap.eff = nla_get_u32(cap); break; default: break; } ++index; if (index == 3) { index = 0; process_hfi_event(&perf_cap); } } } return 0; } static int _hfi_exit; static int check_hf_suport(void) { unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; __cpuid(6, eax, ebx, ecx, edx); if (eax & BIT(19)) return 1; return 0; } int hfi_main(void) { struct nl_sock *sock; struct nl_cb *cb; int err = 0; int mcast_id; if (!check_hf_suport()) { fprintf(stderr, "CPU Doesn't support HFI\n"); return -1; } sock = nl_socket_alloc(); if (!sock) { fprintf(stderr, "nl_socket_alloc failed\n"); return -1; } if (genl_connect(sock)) { fprintf(stderr, "genl_connect(sk_event) failed\n"); goto free_sock; } drv.nl_handle = sock; drv.nl_cb = cb = nl_cb_alloc(NL_CB_DEFAULT); if (drv.nl_cb == NULL) { printf("Failed to allocate netlink callbacks"); goto free_sock; } mcast_id = nl_get_multicast_id(&drv, THERMAL_GENL_FAMILY_NAME, THERMAL_GENL_EVENT_GROUP_NAME); if (mcast_id < 0) { fprintf(stderr, "nl_get_multicast_id failed\n"); goto free_sock; } if (nl_socket_add_membership(sock, mcast_id)) { fprintf(stderr, "nl_socket_add_membership failed"); goto free_sock; } nl_cb_set(cb, NL_CB_SEQ_CHECK, NL_CB_CUSTOM, seq_check_handler, 0); nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, handle_event, NULL); debug_printf("hfi is initialized\n"); while (!_hfi_exit && !err) { err = nl_recvmsgs(sock, cb); debug_printf("nl_recv_message err:%d\n", err); } return 0; /* Netlink library doesn't have calls to dealloc cb or disconnect */ free_sock: nl_socket_free(sock); return -1; } void hfi_exit(void) { _hfi_exit = 1; }
linux-master
tools/power/x86/intel-speed-select/hfi-events.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel Speed Select -- Enumerate and control features for Mailbox Interface * Copyright (c) 2023 Intel Corporation. */ #include "isst.h" static int mbox_delay; static int mbox_retries = 3; #define MAX_TRL_LEVELS_EMR 5 static int mbox_get_disp_freq_multiplier(void) { return DISP_FREQ_MULTIPLIER; } static int mbox_get_trl_max_levels(void) { if (is_emr_platform()) return MAX_TRL_LEVELS_EMR; return 3; } static char *mbox_get_trl_level_name(int level) { if (is_emr_platform()) { static char level_str[18]; if (level >= MAX_TRL_LEVELS_EMR) return NULL; snprintf(level_str, sizeof(level_str), "level-%d", level); return level_str; } switch (level) { case 0: return "sse"; case 1: return "avx2"; case 2: return "avx512"; default: return NULL; } } static void mbox_update_platform_param(enum isst_platform_param param, int value) { switch (param) { case ISST_PARAM_MBOX_DELAY: mbox_delay = value; break; case ISST_PARAM_MBOX_RETRIES: mbox_retries = value; break; default: break; } } static int mbox_is_punit_valid(struct isst_id *id) { if (id->cpu < 0) return 0; if (id->pkg < 0 || id->die < 0 || id->punit) return 0; return 1; } static int _send_mmio_command(unsigned int cpu, unsigned int reg, int write, unsigned int *value) { struct isst_if_io_regs io_regs; const char *pathname = "/dev/isst_interface"; int cmd; FILE *outf = get_output_file(); int fd; debug_printf("mmio_cmd cpu:%d reg:%d write:%d\n", cpu, reg, write); fd = open(pathname, O_RDWR); if (fd < 0) err(-1, "%s open failed", pathname); io_regs.req_count = 1; io_regs.io_reg[0].logical_cpu = cpu; io_regs.io_reg[0].reg = reg; cmd = ISST_IF_IO_CMD; if (write) { io_regs.io_reg[0].read_write = 1; io_regs.io_reg[0].value = *value; } else { io_regs.io_reg[0].read_write = 0; } if (ioctl(fd, cmd, &io_regs) == -1) { if (errno == ENOTTY) { perror("ISST_IF_IO_COMMAND\n"); fprintf(stderr, "Check presence of kernel modules: isst_if_mmio\n"); exit(0); } fprintf(outf, "Error: mmio_cmd cpu:%d reg:%x read_write:%x\n", cpu, reg, write); } else { if (!write) *value = io_regs.io_reg[0].value; debug_printf( "mmio_cmd response: cpu:%d reg:%x rd_write:%x resp:%x\n", cpu, reg, write, *value); } close(fd); return 0; } int _send_mbox_command(unsigned int cpu, unsigned char command, unsigned char sub_command, unsigned int parameter, unsigned int req_data, unsigned int *resp) { const char *pathname = "/dev/isst_interface"; int fd, retry; struct isst_if_mbox_cmds mbox_cmds = { 0 }; debug_printf( "mbox_send: cpu:%d command:%x sub_command:%x parameter:%x req_data:%x\n", cpu, command, sub_command, parameter, req_data); if (!is_skx_based_platform() && command == CONFIG_CLOS && sub_command != CLOS_PM_QOS_CONFIG) { unsigned int value; int write = 0; int clos_id, core_id, ret = 0; debug_printf("CPU %d\n", cpu); if (parameter & BIT(MBOX_CMD_WRITE_BIT)) { value = req_data; write = 1; } switch (sub_command) { case CLOS_PQR_ASSOC: core_id = parameter & 0xff; ret = _send_mmio_command( cpu, PQR_ASSOC_OFFSET + core_id * 4, write, &value); if (!ret && !write) *resp = value; break; case CLOS_PM_CLOS: clos_id = parameter & 0x03; ret = _send_mmio_command( cpu, PM_CLOS_OFFSET + clos_id * 4, write, &value); if (!ret && !write) *resp = value; break; case CLOS_STATUS: break; default: break; } return ret; } mbox_cmds.cmd_count = 1; mbox_cmds.mbox_cmd[0].logical_cpu = cpu; mbox_cmds.mbox_cmd[0].command = command; mbox_cmds.mbox_cmd[0].sub_command = sub_command; mbox_cmds.mbox_cmd[0].parameter = parameter; mbox_cmds.mbox_cmd[0].req_data = req_data; if (mbox_delay) usleep(mbox_delay * 1000); fd = open(pathname, O_RDWR); if (fd < 0) err(-1, "%s open failed", pathname); retry = mbox_retries; do { if (ioctl(fd, ISST_IF_MBOX_COMMAND, &mbox_cmds) == -1) { if (errno == ENOTTY) { perror("ISST_IF_MBOX_COMMAND\n"); fprintf(stderr, "Check presence of kernel modules: isst_if_mbox_pci or isst_if_mbox_msr\n"); exit(0); } debug_printf( "Error: mbox_cmd cpu:%d command:%x sub_command:%x parameter:%x req_data:%x errorno:%d\n", cpu, command, sub_command, parameter, req_data, errno); --retry; } else { *resp = mbox_cmds.mbox_cmd[0].resp_data; debug_printf( "mbox_cmd response: cpu:%d command:%x sub_command:%x parameter:%x req_data:%x resp:%x\n", cpu, command, sub_command, parameter, req_data, *resp); break; } } while (retry); close(fd); if (!retry) { debug_printf("Failed mbox command even after retries\n"); return -1; } return 0; } static int mbox_read_pm_config(struct isst_id *id, int *cp_state, int *cp_cap) { unsigned int resp; int ret; ret = _send_mbox_command(id->cpu, READ_PM_CONFIG, PM_FEATURE, 0, 0, &resp); if (ret) return ret; debug_printf("cpu:%d READ_PM_CONFIG resp:%x\n", id->cpu, resp); *cp_state = resp & BIT(16); *cp_cap = resp & BIT(0) ? 1 : 0; return 0; } static int mbox_get_config_levels(struct isst_id *id, struct isst_pkg_ctdp *pkg_dev) { unsigned int resp; int ret; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_LEVELS_INFO, 0, 0, &resp); if (ret) { pkg_dev->levels = 0; pkg_dev->locked = 1; pkg_dev->current_level = 0; pkg_dev->version = 0; pkg_dev->enabled = 0; return 0; } debug_printf("cpu:%d CONFIG_TDP_GET_LEVELS_INFO resp:%x\n", id->cpu, resp); pkg_dev->version = resp & 0xff; pkg_dev->levels = (resp >> 8) & 0xff; pkg_dev->current_level = (resp >> 16) & 0xff; pkg_dev->locked = !!(resp & BIT(24)); pkg_dev->enabled = !!(resp & BIT(31)); return 0; } static int mbox_get_ctdp_control(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { int cp_state, cp_cap; unsigned int resp; int ret; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_TDP_CONTROL, 0, config_index, &resp); if (ret) return ret; ctdp_level->fact_support = resp & BIT(0); ctdp_level->pbf_support = !!(resp & BIT(1)); ctdp_level->fact_enabled = !!(resp & BIT(16)); ctdp_level->pbf_enabled = !!(resp & BIT(17)); ret = isst_read_pm_config(id, &cp_state, &cp_cap); if (ret) { debug_printf("cpu:%d pm_config is not supported\n", id->cpu); } else { debug_printf("cpu:%d pm_config SST-CP state:%d cap:%d\n", id->cpu, cp_state, cp_cap); ctdp_level->sst_cp_support = cp_cap; ctdp_level->sst_cp_enabled = cp_state; } debug_printf( "cpu:%d CONFIG_TDP_GET_TDP_CONTROL resp:%x fact_support:%d pbf_support: %d fact_enabled:%d pbf_enabled:%d\n", id->cpu, resp, ctdp_level->fact_support, ctdp_level->pbf_support, ctdp_level->fact_enabled, ctdp_level->pbf_enabled); return 0; } static void _get_uncore_p0_p1_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { unsigned int resp; int ret; ctdp_level->uncore_pm = 0; ctdp_level->uncore_p0 = 0; ctdp_level->uncore_p1 = 0; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_RATIO_INFO, 0, (BIT(16) | config_index) , &resp); if (ret) { goto try_uncore_mbox; } ctdp_level->uncore_p0 = resp & GENMASK(7, 0); ctdp_level->uncore_p1 = (resp & GENMASK(15, 8)) >> 8; ctdp_level->uncore_pm = (resp & GENMASK(31, 24)) >> 24; debug_printf( "cpu:%d ctdp:%d CONFIG_TDP_GET_RATIO_INFO resp:%x uncore p0:%d uncore p1:%d uncore pm:%d\n", id->cpu, config_index, resp, ctdp_level->uncore_p0, ctdp_level->uncore_p1, ctdp_level->uncore_pm); return; try_uncore_mbox: ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_UNCORE_P0_P1_INFO, 0, config_index, &resp); if (ret) { ctdp_level->uncore_p0 = 0; ctdp_level->uncore_p1 = 0; return; } ctdp_level->uncore_p0 = resp & GENMASK(7, 0); ctdp_level->uncore_p1 = (resp & GENMASK(15, 8)) >> 8; debug_printf( "cpu:%d ctdp:%d CONFIG_TDP_GET_UNCORE_P0_P1_INFO resp:%x uncore p0:%d uncore p1:%d\n", id->cpu, config_index, resp, ctdp_level->uncore_p0, ctdp_level->uncore_p1); } static int _set_uncore_min_max(struct isst_id *id, int max, int freq) { char buffer[128], freq_str[16]; int fd, ret, len; if (max) snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d/max_freq_khz", id->pkg, id->die); else snprintf(buffer, sizeof(buffer), "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d/min_freq_khz", id->pkg, id->die); fd = open(buffer, O_WRONLY); if (fd < 0) return fd; snprintf(freq_str, sizeof(freq_str), "%d", freq); len = strlen(freq_str); ret = write(fd, freq_str, len); if (ret == -1) { close(fd); return ret; } close(fd); return 0; } static void mbox_adjust_uncore_freq(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { _get_uncore_p0_p1_info(id, config_index, ctdp_level); if (ctdp_level->uncore_pm) _set_uncore_min_max(id, 0, ctdp_level->uncore_pm * 100000); if (ctdp_level->uncore_p0) _set_uncore_min_max(id, 1, ctdp_level->uncore_p0 * 100000); } static void _get_p1_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { unsigned int resp; int ret; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_P1_INFO, 0, config_index, &resp); if (ret) { ctdp_level->sse_p1 = 0; ctdp_level->avx2_p1 = 0; ctdp_level->avx512_p1 = 0; return; } ctdp_level->sse_p1 = resp & GENMASK(7, 0); ctdp_level->avx2_p1 = (resp & GENMASK(15, 8)) >> 8; ctdp_level->avx512_p1 = (resp & GENMASK(23, 16)) >> 16; ctdp_level->amx_p1 = (resp & GENMASK(31, 24)) >> 24; debug_printf( "cpu:%d ctdp:%d CONFIG_TDP_GET_P1_INFO resp:%x sse_p1:%d avx2_p1:%d avx512_p1:%d amx_p1:%d\n", id->cpu, config_index, resp, ctdp_level->sse_p1, ctdp_level->avx2_p1, ctdp_level->avx512_p1, ctdp_level->amx_p1); } static void _get_uncore_mem_freq(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { unsigned int resp; int ret; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_MEM_FREQ, 0, config_index, &resp); if (ret) { ctdp_level->mem_freq = 0; return; } ctdp_level->mem_freq = resp & GENMASK(7, 0); if (is_spr_platform() || is_emr_platform()) { ctdp_level->mem_freq *= 200; } else if (is_icx_platform()) { if (ctdp_level->mem_freq < 7) { ctdp_level->mem_freq = (12 - ctdp_level->mem_freq) * 133.33 * 2 * 10; ctdp_level->mem_freq /= 10; if (ctdp_level->mem_freq % 10 > 5) ctdp_level->mem_freq++; } else { ctdp_level->mem_freq = 0; } } else { ctdp_level->mem_freq = 0; } debug_printf( "cpu:%d ctdp:%d CONFIG_TDP_GET_MEM_FREQ resp:%x uncore mem_freq:%d\n", id->cpu, config_index, resp, ctdp_level->mem_freq); } static int mbox_get_tdp_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { unsigned int resp; int ret; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_TDP_INFO, 0, config_index, &resp); if (ret) { isst_display_error_info_message(1, "Invalid level, Can't get TDP information at level", 1, config_index); return ret; } ctdp_level->pkg_tdp = resp & GENMASK(14, 0); ctdp_level->tdp_ratio = (resp & GENMASK(23, 16)) >> 16; debug_printf( "cpu:%d ctdp:%d CONFIG_TDP_GET_TDP_INFO resp:%x tdp_ratio:%d pkg_tdp:%d\n", id->cpu, config_index, resp, ctdp_level->tdp_ratio, ctdp_level->pkg_tdp); ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_TJMAX_INFO, 0, config_index, &resp); if (ret) return ret; ctdp_level->t_proc_hot = resp & GENMASK(7, 0); _get_uncore_p0_p1_info(id, config_index, ctdp_level); _get_p1_info(id, config_index, ctdp_level); _get_uncore_mem_freq(id, config_index, ctdp_level); debug_printf( "cpu:%d ctdp:%d CONFIG_TDP_GET_TJMAX_INFO resp:%x t_proc_hot:%d\n", id->cpu, config_index, resp, ctdp_level->t_proc_hot); return 0; } static int mbox_get_pwr_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { unsigned int resp; int ret; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_PWR_INFO, 0, config_index, &resp); if (ret) return ret; ctdp_level->pkg_max_power = resp & GENMASK(14, 0); ctdp_level->pkg_min_power = (resp & GENMASK(30, 16)) >> 16; debug_printf( "cpu:%d ctdp:%d CONFIG_TDP_GET_PWR_INFO resp:%x pkg_max_power:%d pkg_min_power:%d\n", id->cpu, config_index, resp, ctdp_level->pkg_max_power, ctdp_level->pkg_min_power); return 0; } static int mbox_get_coremask_info(struct isst_id *id, int config_index, struct isst_pkg_ctdp_level_info *ctdp_level) { unsigned int resp; int i, ret; ctdp_level->cpu_count = 0; for (i = 0; i < 2; ++i) { unsigned long long mask; int cpu_count = 0; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_CORE_MASK, 0, (i << 8) | config_index, &resp); if (ret) return ret; debug_printf( "cpu:%d ctdp:%d mask:%d CONFIG_TDP_GET_CORE_MASK resp:%x\n", id->cpu, config_index, i, resp); mask = (unsigned long long)resp << (32 * i); set_cpu_mask_from_punit_coremask(id, mask, ctdp_level->core_cpumask_size, ctdp_level->core_cpumask, &cpu_count); ctdp_level->cpu_count += cpu_count; debug_printf("cpu:%d ctdp:%d mask:%d cpu count:%d\n", id->cpu, config_index, i, ctdp_level->cpu_count); } return 0; } static int mbox_get_get_trl(struct isst_id *id, int level, int avx_level, int *trl) { unsigned int req, resp; int ret; req = level | (avx_level << 16); ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_TURBO_LIMIT_RATIOS, 0, req, &resp); if (ret) return ret; debug_printf( "cpu:%d CONFIG_TDP_GET_TURBO_LIMIT_RATIOS req:%x resp:%x\n", id->cpu, req, resp); trl[0] = resp & GENMASK(7, 0); trl[1] = (resp & GENMASK(15, 8)) >> 8; trl[2] = (resp & GENMASK(23, 16)) >> 16; trl[3] = (resp & GENMASK(31, 24)) >> 24; req = level | BIT(8) | (avx_level << 16); ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_TURBO_LIMIT_RATIOS, 0, req, &resp); if (ret) return ret; debug_printf("cpu:%d CONFIG_TDP_GET_TURBO_LIMIT req:%x resp:%x\n", id->cpu, req, resp); trl[4] = resp & GENMASK(7, 0); trl[5] = (resp & GENMASK(15, 8)) >> 8; trl[6] = (resp & GENMASK(23, 16)) >> 16; trl[7] = (resp & GENMASK(31, 24)) >> 24; return 0; } static int mbox_get_get_trls(struct isst_id *id, int level, struct isst_pkg_ctdp_level_info *ctdp_level) { int trl_max_levels = isst_get_trl_max_levels(); int i, ret; for (i = 0; i < trl_max_levels; i++) { ret = mbox_get_get_trl(id, level, i, ctdp_level->trl_ratios[i]); if (ret) return ret; } return 0; } static int mbox_get_trl_bucket_info(struct isst_id *id, int level, unsigned long long *buckets_info) { int ret; debug_printf("cpu:%d bucket info via MSR\n", id->cpu); *buckets_info = 0; ret = isst_send_msr_command(id->cpu, 0x1ae, 0, buckets_info); if (ret) return ret; debug_printf("cpu:%d bucket info via MSR successful 0x%llx\n", id->cpu, *buckets_info); return 0; } static int mbox_set_tdp_level(struct isst_id *id, int tdp_level) { unsigned int resp; int ret; if (isst_get_config_tdp_lock_status(id)) { isst_display_error_info_message(1, "TDP is locked", 0, 0); return -1; } ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_SET_LEVEL, 0, tdp_level, &resp); if (ret) { isst_display_error_info_message(1, "Set TDP level failed for level", 1, tdp_level); return ret; } return 0; } static int mbox_get_pbf_info(struct isst_id *id, int level, struct isst_pbf_info *pbf_info) { int max_punit_core, max_mask_index; unsigned int req, resp; int i, ret; max_punit_core = get_max_punit_core_id(id); max_mask_index = max_punit_core > 32 ? 2 : 1; for (i = 0; i < max_mask_index; ++i) { unsigned long long mask; int count; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_CORE_MASK_INFO, 0, (i << 8) | level, &resp); if (ret) break; debug_printf( "cpu:%d CONFIG_TDP_PBF_GET_CORE_MASK_INFO resp:%x\n", id->cpu, resp); mask = (unsigned long long)resp << (32 * i); set_cpu_mask_from_punit_coremask(id, mask, pbf_info->core_cpumask_size, pbf_info->core_cpumask, &count); } req = level; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_P1HI_P1LO_INFO, 0, req, &resp); if (ret) return ret; debug_printf("cpu:%d CONFIG_TDP_PBF_GET_P1HI_P1LO_INFO resp:%x\n", id->cpu, resp); pbf_info->p1_low = resp & 0xff; pbf_info->p1_high = (resp & GENMASK(15, 8)) >> 8; req = level; ret = _send_mbox_command( id->cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TDP_INFO, 0, req, &resp); if (ret) return ret; debug_printf("cpu:%d CONFIG_TDP_PBF_GET_TDP_INFO resp:%x\n", id->cpu, resp); pbf_info->tdp = resp & 0xffff; req = level; ret = _send_mbox_command( id->cpu, CONFIG_TDP, CONFIG_TDP_PBF_GET_TJ_MAX_INFO, 0, req, &resp); if (ret) return ret; debug_printf("cpu:%d CONFIG_TDP_PBF_GET_TJ_MAX_INFO resp:%x\n", id->cpu, resp); pbf_info->t_control = (resp >> 8) & 0xff; pbf_info->t_prochot = resp & 0xff; return 0; } static int mbox_set_pbf_fact_status(struct isst_id *id, int pbf, int enable) { struct isst_pkg_ctdp pkg_dev; struct isst_pkg_ctdp_level_info ctdp_level; int current_level; unsigned int req = 0, resp; int ret; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) debug_printf("cpu:%d No support for dynamic ISST\n", id->cpu); current_level = pkg_dev.current_level; ret = isst_get_ctdp_control(id, current_level, &ctdp_level); if (ret) return ret; if (pbf) { if (ctdp_level.fact_enabled) req = BIT(16); if (enable) req |= BIT(17); else req &= ~BIT(17); } else { if (enable && !ctdp_level.sst_cp_enabled) isst_display_error_info_message(0, "Make sure to execute before: core-power enable", 0, 0); if (ctdp_level.pbf_enabled) req = BIT(17); if (enable) req |= BIT(16); else req &= ~BIT(16); } ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_SET_TDP_CONTROL, 0, req, &resp); if (ret) return ret; debug_printf("cpu:%d CONFIG_TDP_SET_TDP_CONTROL pbf/fact:%d req:%x\n", id->cpu, pbf, req); return 0; } static int _get_fact_bucket_info(struct isst_id *id, int level, struct isst_fact_bucket_info *bucket_info) { unsigned int resp; int i, k, ret; for (i = 0; i < 2; ++i) { int j; ret = _send_mbox_command( id->cpu, CONFIG_TDP, CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_NUMCORES, 0, (i << 8) | level, &resp); if (ret) return ret; debug_printf( "cpu:%d CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_NUMCORES index:%d level:%d resp:%x\n", id->cpu, i, level, resp); for (j = 0; j < 4; ++j) { bucket_info[j + (i * 4)].hp_cores = (resp >> (j * 8)) & 0xff; } } for (k = 0; k < 3; ++k) { for (i = 0; i < 2; ++i) { int j; ret = _send_mbox_command( id->cpu, CONFIG_TDP, CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_RATIOS, 0, (k << 16) | (i << 8) | level, &resp); if (ret) return ret; debug_printf( "cpu:%d CONFIG_TDP_GET_FACT_HP_TURBO_LIMIT_RATIOS index:%d level:%d avx:%d resp:%x\n", id->cpu, i, level, k, resp); for (j = 0; j < 4; ++j) { bucket_info[j + (i * 4)].hp_ratios[k] = (resp >> (j * 8)) & 0xff; } } } return 0; } static int mbox_get_fact_info(struct isst_id *id, int level, int fact_bucket, struct isst_fact_info *fact_info) { unsigned int resp; int j, ret, print; ret = _send_mbox_command(id->cpu, CONFIG_TDP, CONFIG_TDP_GET_FACT_LP_CLIPPING_RATIO, 0, level, &resp); if (ret) return ret; debug_printf("cpu:%d CONFIG_TDP_GET_FACT_LP_CLIPPING_RATIO resp:%x\n", id->cpu, resp); fact_info->lp_ratios[0] = resp & 0xff; fact_info->lp_ratios[1] = (resp >> 8) & 0xff; fact_info->lp_ratios[2] = (resp >> 16) & 0xff; ret = _get_fact_bucket_info(id, level, fact_info->bucket_info); if (ret) return ret; print = 0; for (j = 0; j < ISST_FACT_MAX_BUCKETS; ++j) { if (fact_bucket != 0xff && fact_bucket != j) continue; if (!fact_info->bucket_info[j].hp_cores) break; print = 1; } if (!print) { isst_display_error_info_message(1, "Invalid bucket", 0, 0); return -1; } return 0; } static int mbox_get_clos_information(struct isst_id *id, int *enable, int *type) { unsigned int resp; int ret; ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0, &resp); if (ret) return ret; debug_printf("cpu:%d CLOS_PM_QOS_CONFIG resp:%x\n", id->cpu, resp); if (resp & BIT(1)) *enable = 1; else *enable = 0; if (resp & BIT(2)) *type = 1; else *type = 0; return 0; } static int _write_pm_config(struct isst_id *id, int cp_state) { unsigned int req, resp; int ret; if (cp_state) req = BIT(16); else req = 0; ret = _send_mbox_command(id->cpu, WRITE_PM_CONFIG, PM_FEATURE, 0, req, &resp); if (ret) return ret; debug_printf("cpu:%d WRITE_PM_CONFIG resp:%x\n", id->cpu, resp); return 0; } static int mbox_pm_qos_config(struct isst_id *id, int enable_clos, int priority_type) { unsigned int req, resp; int ret; if (!enable_clos) { struct isst_pkg_ctdp pkg_dev; struct isst_pkg_ctdp_level_info ctdp_level; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) { debug_printf("isst_get_ctdp_levels\n"); return ret; } ret = isst_get_ctdp_control(id, pkg_dev.current_level, &ctdp_level); if (ret) return ret; if (ctdp_level.fact_enabled) { isst_display_error_info_message(1, "Ignoring request, turbo-freq feature is still enabled", 0, 0); return -EINVAL; } ret = _write_pm_config(id, 0); if (ret) isst_display_error_info_message(0, "WRITE_PM_CONFIG command failed, ignoring error", 0, 0); } else { ret = _write_pm_config(id, 1); if (ret) isst_display_error_info_message(0, "WRITE_PM_CONFIG command failed, ignoring error", 0, 0); } ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0, &resp); if (ret) { isst_display_error_info_message(1, "CLOS_PM_QOS_CONFIG command failed", 0, 0); return ret; } debug_printf("cpu:%d CLOS_PM_QOS_CONFIG resp:%x\n", id->cpu, resp); req = resp; if (enable_clos) req = req | BIT(1); else req = req & ~BIT(1); if (priority_type > 1) isst_display_error_info_message(1, "Invalid priority type: Changing type to ordered", 0, 0); if (priority_type) req = req | BIT(2); else req = req & ~BIT(2); ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, BIT(MBOX_CMD_WRITE_BIT), req, &resp); if (ret) return ret; debug_printf("cpu:%d CLOS_PM_QOS_CONFIG priority type:%d req:%x\n", id->cpu, priority_type, req); return 0; } static int mbox_pm_get_clos(struct isst_id *id, int clos, struct isst_clos_config *clos_config) { unsigned int resp; int ret; ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_CLOS, clos, 0, &resp); if (ret) return ret; clos_config->epp = resp & 0x0f; clos_config->clos_prop_prio = (resp >> 4) & 0x0f; clos_config->clos_min = (resp >> 8) & 0xff; clos_config->clos_max = (resp >> 16) & 0xff; clos_config->clos_desired = (resp >> 24) & 0xff; return 0; } static int mbox_set_clos(struct isst_id *id, int clos, struct isst_clos_config *clos_config) { unsigned int req, resp; unsigned int param; int ret; req = clos_config->epp & 0x0f; req |= (clos_config->clos_prop_prio & 0x0f) << 4; req |= (clos_config->clos_min & 0xff) << 8; req |= (clos_config->clos_max & 0xff) << 16; req |= (clos_config->clos_desired & 0xff) << 24; param = BIT(MBOX_CMD_WRITE_BIT) | clos; ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PM_CLOS, param, req, &resp); if (ret) return ret; debug_printf("cpu:%d CLOS_PM_CLOS param:%x req:%x\n", id->cpu, param, req); return 0; } static int mbox_clos_get_assoc_status(struct isst_id *id, int *clos_id) { unsigned int resp; unsigned int param; int core_id, ret; core_id = find_phy_core_num(id->cpu); param = core_id; ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PQR_ASSOC, param, 0, &resp); if (ret) return ret; debug_printf("cpu:%d CLOS_PQR_ASSOC param:%x resp:%x\n", id->cpu, param, resp); *clos_id = (resp >> 16) & 0x03; return 0; } static int mbox_clos_associate(struct isst_id *id, int clos_id) { unsigned int req, resp; unsigned int param; int core_id, ret; req = (clos_id & 0x03) << 16; core_id = find_phy_core_num(id->cpu); param = BIT(MBOX_CMD_WRITE_BIT) | core_id; ret = _send_mbox_command(id->cpu, CONFIG_CLOS, CLOS_PQR_ASSOC, param, req, &resp); if (ret) return ret; debug_printf("cpu:%d CLOS_PQR_ASSOC param:%x req:%x\n", id->cpu, param, req); return 0; } static struct isst_platform_ops mbox_ops = { .get_disp_freq_multiplier = mbox_get_disp_freq_multiplier, .get_trl_max_levels = mbox_get_trl_max_levels, .get_trl_level_name = mbox_get_trl_level_name, .update_platform_param = mbox_update_platform_param, .is_punit_valid = mbox_is_punit_valid, .read_pm_config = mbox_read_pm_config, .get_config_levels = mbox_get_config_levels, .get_ctdp_control = mbox_get_ctdp_control, .get_tdp_info = mbox_get_tdp_info, .get_pwr_info = mbox_get_pwr_info, .get_coremask_info = mbox_get_coremask_info, .get_get_trl = mbox_get_get_trl, .get_get_trls = mbox_get_get_trls, .get_trl_bucket_info = mbox_get_trl_bucket_info, .set_tdp_level = mbox_set_tdp_level, .get_pbf_info = mbox_get_pbf_info, .set_pbf_fact_status = mbox_set_pbf_fact_status, .get_fact_info = mbox_get_fact_info, .adjust_uncore_freq = mbox_adjust_uncore_freq, .get_clos_information = mbox_get_clos_information, .pm_qos_config = mbox_pm_qos_config, .pm_get_clos = mbox_pm_get_clos, .set_clos = mbox_set_clos, .clos_get_assoc_status = mbox_clos_get_assoc_status, .clos_associate = mbox_clos_associate, }; struct isst_platform_ops *mbox_get_platform_ops(void) { return &mbox_ops; }
linux-master
tools/power/x86/intel-speed-select/isst-core-mbox.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017, 2018, 2019, 2021 BMW Car IT GmbH * Author: Viktor Rosendahl ([email protected]) */ #define _GNU_SOURCE #define _POSIX_C_SOURCE 200809L #include <ctype.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <err.h> #include <errno.h> #include <fcntl.h> #include <getopt.h> #include <sched.h> #include <linux/unistd.h> #include <signal.h> #include <sys/inotify.h> #include <unistd.h> #include <pthread.h> #include <tracefs.h> static const char *prg_name; static const char *prg_unknown = "unknown program name"; static int fd_stdout; static int sched_policy; static bool sched_policy_set; static int sched_pri; static bool sched_pri_set; static bool trace_enable = true; static bool setup_ftrace = true; static bool use_random_sleep; #define TRACE_OPTS \ C(FUNC_TR, "function-trace"), \ C(DISP_GR, "display-graph"), \ C(NR, NULL) #undef C #define C(a, b) OPTIDX_##a enum traceopt { TRACE_OPTS }; #undef C #define C(a, b) b static const char *const optstr[] = { TRACE_OPTS }; enum errhandling { ERR_EXIT = 0, ERR_WARN, ERR_CLEANUP, }; static bool use_options[OPTIDX_NR]; static char inotify_buffer[655360]; #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define bool2str(x) (x ? "true":"false") #define DEFAULT_NR_PRINTER_THREADS (3) static unsigned int nr_threads = DEFAULT_NR_PRINTER_THREADS; #define DEFAULT_TABLE_SIZE (2) static unsigned int table_startsize = DEFAULT_TABLE_SIZE; static int verbosity; #define verbose_sizechange() (verbosity >= 1) #define verbose_lostevent() (verbosity >= 2) #define verbose_ftrace() (verbosity >= 1) #define was_changed(ORIG, CUR) (strcmp(ORIG, CUR) != 0) #define needs_change(CUR, WANTED) (strcmp(CUR, WANTED) != 0) static const char *debug_tracefile; static const char *debug_tracefile_dflt; static const char *debug_maxlat; static const char *debug_maxlat_dflt; static const char * const DEBUG_NOFILE = "[file not found]"; static const char * const TR_MAXLAT = "tracing_max_latency"; static const char * const TR_THRESH = "tracing_thresh"; static const char * const TR_CURRENT = "current_tracer"; static const char * const TR_OPTIONS = "trace_options"; static const char * const NOP_TRACER = "nop"; static const char * const OPT_NO_PREFIX = "no"; #define DFLT_THRESHOLD_US "0" static const char *threshold = DFLT_THRESHOLD_US; #define DEV_URANDOM "/dev/urandom" #define RT_DEFAULT_PRI (99) #define DEFAULT_PRI (0) #define USEC_PER_MSEC (1000L) #define NSEC_PER_USEC (1000L) #define NSEC_PER_MSEC (USEC_PER_MSEC * NSEC_PER_USEC) #define MSEC_PER_SEC (1000L) #define USEC_PER_SEC (USEC_PER_MSEC * MSEC_PER_SEC) #define NSEC_PER_SEC (NSEC_PER_MSEC * MSEC_PER_SEC) #define SLEEP_TIME_MS_DEFAULT (1000L) #define TRY_PRINTMUTEX_MS (1000) static long sleep_time = (USEC_PER_MSEC * SLEEP_TIME_MS_DEFAULT); static const char * const queue_full_warning = "Could not queue trace for printing. It is likely that events happen faster\n" "than what they can be printed. Probably partly because of random sleeping\n"; static const char * const no_tracer_msg = "Could not find any tracers! Running this program as root may help!\n"; static const char * const no_latency_tr_msg = "No latency tracers are supported by your kernel!\n"; struct policy { const char *name; int policy; int default_pri; }; static const struct policy policies[] = { { "other", SCHED_OTHER, DEFAULT_PRI }, { "batch", SCHED_BATCH, DEFAULT_PRI }, { "idle", SCHED_IDLE, DEFAULT_PRI }, { "rr", SCHED_RR, RT_DEFAULT_PRI }, { "fifo", SCHED_FIFO, RT_DEFAULT_PRI }, { NULL, 0, DEFAULT_PRI } }; /* * The default tracer will be the first on this list that is supported by the * currently running Linux kernel. */ static const char * const relevant_tracers[] = { "preemptirqsoff", "preemptoff", "irqsoff", "wakeup", "wakeup_rt", "wakeup_dl", NULL }; /* This is the list of tracers for which random sleep makes sense */ static const char * const random_tracers[] = { "preemptirqsoff", "preemptoff", "irqsoff", NULL }; static const char *current_tracer; static bool force_tracer; struct ftrace_state { char *tracer; char *thresh; bool opt[OPTIDX_NR]; bool opt_valid[OPTIDX_NR]; pthread_mutex_t mutex; }; struct entry { int ticket; int ticket_completed_ref; }; struct print_state { int ticket_counter; int ticket_completed; pthread_mutex_t mutex; pthread_cond_t cond; int cnt; pthread_mutex_t cnt_mutex; }; struct short_msg { char buf[160]; int len; }; static struct print_state printstate; static struct ftrace_state save_state; volatile sig_atomic_t signal_flag; #define PROB_TABLE_MAX_SIZE (1000) int probabilities[PROB_TABLE_MAX_SIZE]; struct sleep_table { int *table; int size; pthread_mutex_t mutex; }; static struct sleep_table sleeptable; #define QUEUE_SIZE (10) struct queue { struct entry entries[QUEUE_SIZE]; int next_prod_idx; int next_cons_idx; pthread_mutex_t mutex; pthread_cond_t cond; }; #define MAX_THREADS (40) struct queue printqueue; pthread_t printthread[MAX_THREADS]; pthread_mutex_t print_mtx; #define PRINT_BUFFER_SIZE (16 * 1024 * 1024) static void cleanup_exit(int status); static int set_trace_opt(const char *opt, bool value); static __always_inline void *malloc_or_die(size_t size) { void *ptr = malloc(size); if (unlikely(ptr == NULL)) { warn("malloc() failed"); cleanup_exit(EXIT_FAILURE); } return ptr; } static __always_inline void *malloc_or_die_nocleanup(size_t size) { void *ptr = malloc(size); if (unlikely(ptr == NULL)) err(0, "malloc() failed"); return ptr; } static __always_inline void write_or_die(int fd, const char *buf, size_t count) { ssize_t r; do { r = write(fd, buf, count); if (unlikely(r < 0)) { if (errno == EINTR) continue; warn("write() failed"); cleanup_exit(EXIT_FAILURE); } count -= r; buf += r; } while (count > 0); } static __always_inline void clock_gettime_or_die(clockid_t clk_id, struct timespec *tp) { int r = clock_gettime(clk_id, tp); if (unlikely(r != 0)) err(EXIT_FAILURE, "clock_gettime() failed"); } static __always_inline void sigemptyset_or_die(sigset_t *s) { if (unlikely(sigemptyset(s) != 0)) { warn("sigemptyset() failed"); cleanup_exit(EXIT_FAILURE); } } static __always_inline void sigaddset_or_die(sigset_t *s, int signum) { if (unlikely(sigaddset(s, signum) != 0)) { warn("sigemptyset() failed"); cleanup_exit(EXIT_FAILURE); } } static __always_inline void sigaction_or_die(int signum, const struct sigaction *act, struct sigaction *oldact) { if (unlikely(sigaction(signum, act, oldact) != 0)) { warn("sigaction() failed"); cleanup_exit(EXIT_FAILURE); } } static void open_stdout(void) { if (setvbuf(stdout, NULL, _IONBF, 0) != 0) err(EXIT_FAILURE, "setvbuf() failed"); fd_stdout = fileno(stdout); if (fd_stdout < 0) err(EXIT_FAILURE, "fileno() failed"); } /* * It's not worth it to call cleanup_exit() from mutex functions because * cleanup_exit() uses mutexes. */ static __always_inline void mutex_lock(pthread_mutex_t *mtx) { errno = pthread_mutex_lock(mtx); if (unlikely(errno)) err(EXIT_FAILURE, "pthread_mutex_lock() failed"); } static __always_inline void mutex_unlock(pthread_mutex_t *mtx) { errno = pthread_mutex_unlock(mtx); if (unlikely(errno)) err(EXIT_FAILURE, "pthread_mutex_unlock() failed"); } static __always_inline void cond_signal(pthread_cond_t *cond) { errno = pthread_cond_signal(cond); if (unlikely(errno)) err(EXIT_FAILURE, "pthread_cond_signal() failed"); } static __always_inline void cond_wait(pthread_cond_t *restrict cond, pthread_mutex_t *restrict mutex) { errno = pthread_cond_wait(cond, mutex); if (unlikely(errno)) err(EXIT_FAILURE, "pthread_cond_wait() failed"); } static __always_inline void cond_broadcast(pthread_cond_t *cond) { errno = pthread_cond_broadcast(cond); if (unlikely(errno)) err(EXIT_FAILURE, "pthread_cond_broadcast() failed"); } static __always_inline void mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr) { errno = pthread_mutex_init(mutex, attr); if (errno) err(EXIT_FAILURE, "pthread_mutex_init() failed"); } static __always_inline void mutexattr_init(pthread_mutexattr_t *attr) { errno = pthread_mutexattr_init(attr); if (errno) err(EXIT_FAILURE, "pthread_mutexattr_init() failed"); } static __always_inline void mutexattr_destroy(pthread_mutexattr_t *attr) { errno = pthread_mutexattr_destroy(attr); if (errno) err(EXIT_FAILURE, "pthread_mutexattr_destroy() failed"); } static __always_inline void mutexattr_settype(pthread_mutexattr_t *attr, int type) { errno = pthread_mutexattr_settype(attr, type); if (errno) err(EXIT_FAILURE, "pthread_mutexattr_settype() failed"); } static __always_inline void condattr_init(pthread_condattr_t *attr) { errno = pthread_condattr_init(attr); if (errno) err(EXIT_FAILURE, "pthread_condattr_init() failed"); } static __always_inline void condattr_destroy(pthread_condattr_t *attr) { errno = pthread_condattr_destroy(attr); if (errno) err(EXIT_FAILURE, "pthread_condattr_destroy() failed"); } static __always_inline void condattr_setclock(pthread_condattr_t *attr, clockid_t clock_id) { errno = pthread_condattr_setclock(attr, clock_id); if (unlikely(errno)) err(EXIT_FAILURE, "pthread_condattr_setclock() failed"); } static __always_inline void cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr) { errno = pthread_cond_init(cond, attr); if (errno) err(EXIT_FAILURE, "pthread_cond_init() failed"); } static __always_inline int cond_timedwait(pthread_cond_t *restrict cond, pthread_mutex_t *restrict mutex, const struct timespec *restrict abstime) { errno = pthread_cond_timedwait(cond, mutex, abstime); if (errno && errno != ETIMEDOUT) err(EXIT_FAILURE, "pthread_cond_timedwait() failed"); return errno; } static void init_printstate(void) { pthread_condattr_t cattr; printstate.ticket_counter = 0; printstate.ticket_completed = 0; printstate.cnt = 0; mutex_init(&printstate.mutex, NULL); condattr_init(&cattr); condattr_setclock(&cattr, CLOCK_MONOTONIC); cond_init(&printstate.cond, &cattr); condattr_destroy(&cattr); } static void init_print_mtx(void) { pthread_mutexattr_t mattr; mutexattr_init(&mattr); mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); mutex_init(&print_mtx, &mattr); mutexattr_destroy(&mattr); } static void signal_blocking(int how) { sigset_t s; sigemptyset_or_die(&s); sigaddset_or_die(&s, SIGHUP); sigaddset_or_die(&s, SIGTERM); sigaddset_or_die(&s, SIGINT); errno = pthread_sigmask(how, &s, NULL); if (unlikely(errno)) { warn("pthread_sigmask() failed"); cleanup_exit(EXIT_FAILURE); } } static void signal_handler(int num) { signal_flag = num; } static void setup_sig_handler(void) { struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = signal_handler; sigaction_or_die(SIGHUP, &sa, NULL); sigaction_or_die(SIGTERM, &sa, NULL); sigaction_or_die(SIGINT, &sa, NULL); } static void process_signal(int signal) { char *name; name = strsignal(signal); if (name == NULL) printf("Received signal %d\n", signal); else printf("Received signal %d (%s)\n", signal, name); cleanup_exit(EXIT_SUCCESS); } static __always_inline void check_signals(void) { int signal = signal_flag; if (unlikely(signal)) process_signal(signal); } static __always_inline void get_time_in_future(struct timespec *future, long time_us) { long nsec; clock_gettime_or_die(CLOCK_MONOTONIC, future); future->tv_sec += time_us / USEC_PER_SEC; nsec = future->tv_nsec + (time_us * NSEC_PER_USEC) % NSEC_PER_SEC; if (nsec >= NSEC_PER_SEC) { future->tv_nsec = nsec % NSEC_PER_SEC; future->tv_sec += 1; } } static __always_inline bool time_has_passed(const struct timespec *time) { struct timespec now; clock_gettime_or_die(CLOCK_MONOTONIC, &now); if (now.tv_sec > time->tv_sec) return true; if (now.tv_sec < time->tv_sec) return false; return (now.tv_nsec >= time->tv_nsec); } static bool mutex_trylock_limit(pthread_mutex_t *mutex, int time_ms) { long time_us = time_ms * USEC_PER_MSEC; struct timespec limit; get_time_in_future(&limit, time_us); do { errno = pthread_mutex_trylock(mutex); if (errno && errno != EBUSY) err(EXIT_FAILURE, "pthread_mutex_trylock() failed"); } while (errno && !time_has_passed(&limit)); return errno == 0; } static void restore_trace_opts(const struct ftrace_state *state, const bool *cur) { int i; int r; for (i = 0; i < OPTIDX_NR; i++) if (state->opt_valid[i] && state->opt[i] != cur[i]) { r = set_trace_opt(optstr[i], state->opt[i]); if (r < 0) warnx("Failed to restore the %s option to %s", optstr[i], bool2str(state->opt[i])); else if (verbose_ftrace()) printf("Restored the %s option in %s to %s\n", optstr[i], TR_OPTIONS, bool2str(state->opt[i])); } } static char *read_file(const char *file, enum errhandling h) { int psize; char *r; static const char *emsg = "Failed to read the %s file"; r = tracefs_instance_file_read(NULL, file, &psize); if (!r) { if (h) { warn(emsg, file); if (h == ERR_CLEANUP) cleanup_exit(EXIT_FAILURE); } else errx(EXIT_FAILURE, emsg, file); } if (r && r[psize - 1] == '\n') r[psize - 1] = '\0'; return r; } static void restore_file(const char *file, char **saved, const char *cur) { if (*saved && was_changed(*saved, cur)) { if (tracefs_instance_file_write(NULL, file, *saved) < 0) warnx("Failed to restore %s to %s!", file, *saved); else if (verbose_ftrace()) printf("Restored %s to %s\n", file, *saved); free(*saved); *saved = NULL; } } static void restore_ftrace(void) { mutex_lock(&save_state.mutex); restore_file(TR_CURRENT, &save_state.tracer, current_tracer); restore_file(TR_THRESH, &save_state.thresh, threshold); restore_trace_opts(&save_state, use_options); mutex_unlock(&save_state.mutex); } static void cleanup_exit(int status) { char *maxlat; if (!setup_ftrace) exit(status); /* * We try the print_mtx for 1 sec in order to avoid garbled * output if possible, but if it cannot be obtained we proceed anyway. */ mutex_trylock_limit(&print_mtx, TRY_PRINTMUTEX_MS); maxlat = read_file(TR_MAXLAT, ERR_WARN); if (maxlat) { printf("The maximum detected latency was: %sus\n", maxlat); free(maxlat); } restore_ftrace(); /* * We do not need to unlock the print_mtx here because we will exit at * the end of this function. Unlocking print_mtx causes problems if a * print thread happens to be waiting for the mutex because we have * just changed the ftrace settings to the original and thus the * print thread would output incorrect data from ftrace. */ exit(status); } static void init_save_state(void) { pthread_mutexattr_t mattr; mutexattr_init(&mattr); mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); mutex_init(&save_state.mutex, &mattr); mutexattr_destroy(&mattr); save_state.tracer = NULL; save_state.thresh = NULL; save_state.opt_valid[OPTIDX_FUNC_TR] = false; save_state.opt_valid[OPTIDX_DISP_GR] = false; } static int printstate_next_ticket(struct entry *req) { int r; r = ++(printstate.ticket_counter); req->ticket = r; req->ticket_completed_ref = printstate.ticket_completed; cond_broadcast(&printstate.cond); return r; } static __always_inline void printstate_mark_req_completed(const struct entry *req) { if (req->ticket > printstate.ticket_completed) printstate.ticket_completed = req->ticket; } static __always_inline bool printstate_has_new_req_arrived(const struct entry *req) { return (printstate.ticket_counter != req->ticket); } static __always_inline int printstate_cnt_inc(void) { int value; mutex_lock(&printstate.cnt_mutex); value = ++printstate.cnt; mutex_unlock(&printstate.cnt_mutex); return value; } static __always_inline int printstate_cnt_dec(void) { int value; mutex_lock(&printstate.cnt_mutex); value = --printstate.cnt; mutex_unlock(&printstate.cnt_mutex); return value; } static __always_inline int printstate_cnt_read(void) { int value; mutex_lock(&printstate.cnt_mutex); value = printstate.cnt; mutex_unlock(&printstate.cnt_mutex); return value; } static __always_inline bool prev_req_won_race(const struct entry *req) { return (printstate.ticket_completed != req->ticket_completed_ref); } static void sleeptable_resize(int size, bool printout, struct short_msg *msg) { int bytes; if (printout) { msg->len = 0; if (unlikely(size > PROB_TABLE_MAX_SIZE)) bytes = snprintf(msg->buf, sizeof(msg->buf), "Cannot increase probability table to %d (maximum size reached)\n", size); else bytes = snprintf(msg->buf, sizeof(msg->buf), "Increasing probability table to %d\n", size); if (bytes < 0) warn("snprintf() failed"); else msg->len = bytes; } if (unlikely(size < 0)) { /* Should never happen */ warnx("Bad program state at %s:%d", __FILE__, __LINE__); cleanup_exit(EXIT_FAILURE); return; } sleeptable.size = size; sleeptable.table = &probabilities[PROB_TABLE_MAX_SIZE - size]; } static void init_probabilities(void) { int i; int j = 1000; for (i = 0; i < PROB_TABLE_MAX_SIZE; i++) { probabilities[i] = 1000 / j; j--; } mutex_init(&sleeptable.mutex, NULL); } static int table_get_probability(const struct entry *req, struct short_msg *msg) { int diff = req->ticket - req->ticket_completed_ref; int rval = 0; msg->len = 0; diff--; /* Should never happen...*/ if (unlikely(diff < 0)) { warnx("Programmer assumption error at %s:%d\n", __FILE__, __LINE__); cleanup_exit(EXIT_FAILURE); } mutex_lock(&sleeptable.mutex); if (diff >= (sleeptable.size - 1)) { rval = sleeptable.table[sleeptable.size - 1]; sleeptable_resize(sleeptable.size + 1, verbose_sizechange(), msg); } else { rval = sleeptable.table[diff]; } mutex_unlock(&sleeptable.mutex); return rval; } static void init_queue(struct queue *q) { q->next_prod_idx = 0; q->next_cons_idx = 0; mutex_init(&q->mutex, NULL); errno = pthread_cond_init(&q->cond, NULL); if (errno) err(EXIT_FAILURE, "pthread_cond_init() failed"); } static __always_inline int queue_len(const struct queue *q) { if (q->next_prod_idx >= q->next_cons_idx) return q->next_prod_idx - q->next_cons_idx; else return QUEUE_SIZE - q->next_cons_idx + q->next_prod_idx; } static __always_inline int queue_nr_free(const struct queue *q) { int nr_free = QUEUE_SIZE - queue_len(q); /* * If there is only one slot left we will anyway lie and claim that the * queue is full because adding an element will make it appear empty */ if (nr_free == 1) nr_free = 0; return nr_free; } static __always_inline void queue_idx_inc(int *idx) { *idx = (*idx + 1) % QUEUE_SIZE; } static __always_inline void queue_push_to_back(struct queue *q, const struct entry *e) { q->entries[q->next_prod_idx] = *e; queue_idx_inc(&q->next_prod_idx); } static __always_inline struct entry queue_pop_from_front(struct queue *q) { struct entry e = q->entries[q->next_cons_idx]; queue_idx_inc(&q->next_cons_idx); return e; } static __always_inline void queue_cond_signal(struct queue *q) { cond_signal(&q->cond); } static __always_inline void queue_cond_wait(struct queue *q) { cond_wait(&q->cond, &q->mutex); } static __always_inline int queue_try_to_add_entry(struct queue *q, const struct entry *e) { int r = 0; mutex_lock(&q->mutex); if (queue_nr_free(q) > 0) { queue_push_to_back(q, e); cond_signal(&q->cond); } else r = -1; mutex_unlock(&q->mutex); return r; } static struct entry queue_wait_for_entry(struct queue *q) { struct entry e; mutex_lock(&q->mutex); while (true) { if (queue_len(&printqueue) > 0) { e = queue_pop_from_front(q); break; } queue_cond_wait(q); } mutex_unlock(&q->mutex); return e; } static const struct policy *policy_from_name(const char *name) { const struct policy *p = &policies[0]; while (p->name != NULL) { if (!strcmp(name, p->name)) return p; p++; } return NULL; } static const char *policy_name(int policy) { const struct policy *p = &policies[0]; static const char *rval = "unknown"; while (p->name != NULL) { if (p->policy == policy) return p->name; p++; } return rval; } static bool is_relevant_tracer(const char *name) { unsigned int i; for (i = 0; relevant_tracers[i]; i++) if (!strcmp(name, relevant_tracers[i])) return true; return false; } static bool random_makes_sense(const char *name) { unsigned int i; for (i = 0; random_tracers[i]; i++) if (!strcmp(name, random_tracers[i])) return true; return false; } static void show_available(void) { char **tracers; int found = 0; int i; tracers = tracefs_tracers(NULL); for (i = 0; tracers && tracers[i]; i++) { if (is_relevant_tracer(tracers[i])) found++; } if (!tracers) { warnx(no_tracer_msg); return; } if (!found) { warnx(no_latency_tr_msg); tracefs_list_free(tracers); return; } printf("The following latency tracers are available on your system:\n"); for (i = 0; tracers[i]; i++) { if (is_relevant_tracer(tracers[i])) printf("%s\n", tracers[i]); } tracefs_list_free(tracers); } static bool tracer_valid(const char *name, bool *notracer) { char **tracers; int i; bool rval = false; *notracer = false; tracers = tracefs_tracers(NULL); if (!tracers) { *notracer = true; return false; } for (i = 0; tracers[i]; i++) if (!strcmp(tracers[i], name)) { rval = true; break; } tracefs_list_free(tracers); return rval; } static const char *find_default_tracer(void) { int i; bool notracer; bool valid; for (i = 0; relevant_tracers[i]; i++) { valid = tracer_valid(relevant_tracers[i], &notracer); if (notracer) errx(EXIT_FAILURE, no_tracer_msg); if (valid) return relevant_tracers[i]; } return NULL; } static bool toss_coin(struct drand48_data *buffer, unsigned int prob) { long r; if (unlikely(lrand48_r(buffer, &r))) { warnx("lrand48_r() failed"); cleanup_exit(EXIT_FAILURE); } r = r % 1000L; if (r < prob) return true; else return false; } static long go_to_sleep(const struct entry *req) { struct timespec future; long delay = sleep_time; get_time_in_future(&future, delay); mutex_lock(&printstate.mutex); while (!printstate_has_new_req_arrived(req)) { cond_timedwait(&printstate.cond, &printstate.mutex, &future); if (time_has_passed(&future)) break; } if (printstate_has_new_req_arrived(req)) delay = -1; mutex_unlock(&printstate.mutex); return delay; } static void set_priority(void) { int r; pid_t pid; struct sched_param param; memset(&param, 0, sizeof(param)); param.sched_priority = sched_pri; pid = getpid(); r = sched_setscheduler(pid, sched_policy, &param); if (r != 0) err(EXIT_FAILURE, "sched_setscheduler() failed"); } pid_t latency_collector_gettid(void) { return (pid_t) syscall(__NR_gettid); } static void print_priority(void) { pid_t tid; int policy; int r; struct sched_param param; tid = latency_collector_gettid(); r = pthread_getschedparam(pthread_self(), &policy, &param); if (r != 0) { warn("pthread_getschedparam() failed"); cleanup_exit(EXIT_FAILURE); } mutex_lock(&print_mtx); printf("Thread %d runs with scheduling policy %s and priority %d\n", tid, policy_name(policy), param.sched_priority); mutex_unlock(&print_mtx); } static __always_inline void __print_skipmessage(const struct short_msg *resize_msg, const struct timespec *timestamp, char *buffer, size_t bufspace, const struct entry *req, bool excuse, const char *str) { ssize_t bytes = 0; char *p = &buffer[0]; long us, sec; int r; sec = timestamp->tv_sec; us = timestamp->tv_nsec / 1000; if (resize_msg != NULL && resize_msg->len > 0) { strncpy(p, resize_msg->buf, resize_msg->len); bytes += resize_msg->len; p += resize_msg->len; bufspace -= resize_msg->len; } if (excuse) r = snprintf(p, bufspace, "%ld.%06ld Latency %d printout skipped due to %s\n", sec, us, req->ticket, str); else r = snprintf(p, bufspace, "%ld.%06ld Latency %d detected\n", sec, us, req->ticket); if (r < 0) warn("snprintf() failed"); else bytes += r; /* These prints could happen concurrently */ mutex_lock(&print_mtx); write_or_die(fd_stdout, buffer, bytes); mutex_unlock(&print_mtx); } static void print_skipmessage(const struct short_msg *resize_msg, const struct timespec *timestamp, char *buffer, size_t bufspace, const struct entry *req, bool excuse) { __print_skipmessage(resize_msg, timestamp, buffer, bufspace, req, excuse, "random delay"); } static void print_lostmessage(const struct timespec *timestamp, char *buffer, size_t bufspace, const struct entry *req, const char *reason) { __print_skipmessage(NULL, timestamp, buffer, bufspace, req, true, reason); } static void print_tracefile(const struct short_msg *resize_msg, const struct timespec *timestamp, char *buffer, size_t bufspace, long slept, const struct entry *req) { static const int reserve = 256; char *p = &buffer[0]; ssize_t bytes = 0; ssize_t bytes_tot = 0; long us, sec; long slept_ms; int trace_fd; /* Save some space for the final string and final null char */ bufspace = bufspace - reserve - 1; if (resize_msg != NULL && resize_msg->len > 0) { bytes = resize_msg->len; strncpy(p, resize_msg->buf, bytes); bytes_tot += bytes; p += bytes; bufspace -= bytes; } trace_fd = open(debug_tracefile, O_RDONLY); if (trace_fd < 0) { warn("open() failed on %s", debug_tracefile); return; } sec = timestamp->tv_sec; us = timestamp->tv_nsec / 1000; if (slept != 0) { slept_ms = slept / 1000; bytes = snprintf(p, bufspace, "%ld.%06ld Latency %d randomly sleep for %ld ms before print\n", sec, us, req->ticket, slept_ms); } else { bytes = snprintf(p, bufspace, "%ld.%06ld Latency %d immediate print\n", sec, us, req->ticket); } if (bytes < 0) { warn("snprintf() failed"); return; } p += bytes; bufspace -= bytes; bytes_tot += bytes; bytes = snprintf(p, bufspace, ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> BEGIN <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n" ); if (bytes < 0) { warn("snprintf() failed"); return; } p += bytes; bufspace -= bytes; bytes_tot += bytes; do { bytes = read(trace_fd, p, bufspace); if (bytes < 0) { if (errno == EINTR) continue; warn("read() failed on %s", debug_tracefile); if (unlikely(close(trace_fd) != 0)) warn("close() failed on %s", debug_tracefile); return; } if (bytes == 0) break; p += bytes; bufspace -= bytes; bytes_tot += bytes; } while (true); if (unlikely(close(trace_fd) != 0)) warn("close() failed on %s", debug_tracefile); printstate_cnt_dec(); /* Add the reserve space back to the budget for the final string */ bufspace += reserve; bytes = snprintf(p, bufspace, ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> END <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n"); if (bytes < 0) { warn("snprintf() failed"); return; } bytes_tot += bytes; /* These prints could happen concurrently */ mutex_lock(&print_mtx); write_or_die(fd_stdout, buffer, bytes_tot); mutex_unlock(&print_mtx); } static char *get_no_opt(const char *opt) { char *no_opt; int s; s = strlen(opt) + strlen(OPT_NO_PREFIX) + 1; /* We may be called from cleanup_exit() via set_trace_opt() */ no_opt = malloc_or_die_nocleanup(s); strcpy(no_opt, OPT_NO_PREFIX); strcat(no_opt, opt); return no_opt; } static char *find_next_optstr(const char *allopt, const char **next) { const char *begin; const char *end; char *r; int s = 0; if (allopt == NULL) return NULL; for (begin = allopt; *begin != '\0'; begin++) { if (isgraph(*begin)) break; } if (*begin == '\0') return NULL; for (end = begin; *end != '\0' && isgraph(*end); end++) s++; r = malloc_or_die_nocleanup(s + 1); strncpy(r, begin, s); r[s] = '\0'; *next = begin + s; return r; } static bool get_trace_opt(const char *allopt, const char *opt, bool *found) { *found = false; char *no_opt; char *str; const char *next = allopt; bool rval = false; no_opt = get_no_opt(opt); do { str = find_next_optstr(next, &next); if (str == NULL) break; if (!strcmp(str, opt)) { *found = true; rval = true; free(str); break; } if (!strcmp(str, no_opt)) { *found = true; rval = false; free(str); break; } free(str); } while (true); free(no_opt); return rval; } static int set_trace_opt(const char *opt, bool value) { char *str; int r; if (value) str = strdup(opt); else str = get_no_opt(opt); r = tracefs_instance_file_write(NULL, TR_OPTIONS, str); free(str); return r; } void save_trace_opts(struct ftrace_state *state) { char *allopt; int psize; int i; allopt = tracefs_instance_file_read(NULL, TR_OPTIONS, &psize); if (!allopt) errx(EXIT_FAILURE, "Failed to read the %s file\n", TR_OPTIONS); for (i = 0; i < OPTIDX_NR; i++) state->opt[i] = get_trace_opt(allopt, optstr[i], &state->opt_valid[i]); free(allopt); } static void write_file(const char *file, const char *cur, const char *new, enum errhandling h) { int r; static const char *emsg = "Failed to write to the %s file!"; /* Do nothing if we now that the current and new value are equal */ if (cur && !needs_change(cur, new)) return; r = tracefs_instance_file_write(NULL, file, new); if (r < 0) { if (h) { warnx(emsg, file); if (h == ERR_CLEANUP) cleanup_exit(EXIT_FAILURE); } else errx(EXIT_FAILURE, emsg, file); } if (verbose_ftrace()) { mutex_lock(&print_mtx); printf("%s was set to %s\n", file, new); mutex_unlock(&print_mtx); } } static void reset_max_latency(void) { write_file(TR_MAXLAT, NULL, "0", ERR_CLEANUP); } static void save_and_disable_tracer(void) { char *orig_th; char *tracer; bool need_nop = false; mutex_lock(&save_state.mutex); save_trace_opts(&save_state); tracer = read_file(TR_CURRENT, ERR_EXIT); orig_th = read_file(TR_THRESH, ERR_EXIT); if (needs_change(tracer, NOP_TRACER)) { mutex_lock(&print_mtx); if (force_tracer) { printf( "The %s tracer is already in use but proceeding anyway!\n", tracer); } else { printf( "The %s tracer is already in use, cowardly bailing out!\n" "This could indicate that another program or instance is tracing.\n" "Use the -F [--force] option to disregard the current tracer.\n", tracer); exit(0); } mutex_unlock(&print_mtx); need_nop = true; } save_state.tracer = tracer; save_state.thresh = orig_th; if (need_nop) write_file(TR_CURRENT, NULL, NOP_TRACER, ERR_EXIT); mutex_unlock(&save_state.mutex); } void set_trace_opts(struct ftrace_state *state, bool *new) { int i; int r; /* * We only set options if we earlier detected that the option exists in * the trace_options file and that the wanted setting is different from * the one we saw in save_and_disable_tracer() */ for (i = 0; i < OPTIDX_NR; i++) if (state->opt_valid[i] && state->opt[i] != new[i]) { r = set_trace_opt(optstr[i], new[i]); if (r < 0) { warnx("Failed to set the %s option to %s", optstr[i], bool2str(new[i])); cleanup_exit(EXIT_FAILURE); } if (verbose_ftrace()) { mutex_lock(&print_mtx); printf("%s in %s was set to %s\n", optstr[i], TR_OPTIONS, bool2str(new[i])); mutex_unlock(&print_mtx); } } } static void enable_tracer(void) { mutex_lock(&save_state.mutex); set_trace_opts(&save_state, use_options); write_file(TR_THRESH, save_state.thresh, threshold, ERR_CLEANUP); write_file(TR_CURRENT, NOP_TRACER, current_tracer, ERR_CLEANUP); mutex_unlock(&save_state.mutex); } static void tracing_loop(void) { int ifd = inotify_init(); int wd; const ssize_t bufsize = sizeof(inotify_buffer); const ssize_t istructsize = sizeof(struct inotify_event); char *buf = &inotify_buffer[0]; ssize_t nr_read; char *p; int modified; struct inotify_event *event; struct entry req; char *buffer; const size_t bufspace = PRINT_BUFFER_SIZE; struct timespec timestamp; print_priority(); buffer = malloc_or_die(bufspace); if (ifd < 0) err(EXIT_FAILURE, "inotify_init() failed!"); if (setup_ftrace) { /* * We must disable the tracer before resetting the max_latency */ save_and_disable_tracer(); /* * We must reset the max_latency before the inotify_add_watch() * call. */ reset_max_latency(); } wd = inotify_add_watch(ifd, debug_maxlat, IN_MODIFY); if (wd < 0) err(EXIT_FAILURE, "inotify_add_watch() failed!"); if (setup_ftrace) enable_tracer(); signal_blocking(SIG_UNBLOCK); while (true) { modified = 0; check_signals(); nr_read = read(ifd, buf, bufsize); check_signals(); if (nr_read < 0) { if (errno == EINTR) continue; warn("read() failed on inotify fd!"); cleanup_exit(EXIT_FAILURE); } if (nr_read == bufsize) warnx("inotify() buffer filled, skipping events"); if (nr_read < istructsize) { warnx("read() returned too few bytes on inotify fd"); cleanup_exit(EXIT_FAILURE); } for (p = buf; p < buf + nr_read;) { event = (struct inotify_event *) p; if ((event->mask & IN_MODIFY) != 0) modified++; p += istructsize + event->len; } while (modified > 0) { check_signals(); mutex_lock(&printstate.mutex); check_signals(); printstate_next_ticket(&req); if (printstate_cnt_read() > 0) { printstate_mark_req_completed(&req); mutex_unlock(&printstate.mutex); if (verbose_lostevent()) { clock_gettime_or_die(CLOCK_MONOTONIC, &timestamp); print_lostmessage(&timestamp, buffer, bufspace, &req, "inotify loop"); } break; } mutex_unlock(&printstate.mutex); if (queue_try_to_add_entry(&printqueue, &req) != 0) { /* These prints could happen concurrently */ check_signals(); mutex_lock(&print_mtx); check_signals(); write_or_die(fd_stdout, queue_full_warning, strlen(queue_full_warning)); mutex_unlock(&print_mtx); } modified--; } } } static void *do_printloop(void *arg) { const size_t bufspace = PRINT_BUFFER_SIZE; char *buffer; long *rseed = (long *) arg; struct drand48_data drandbuf; long slept = 0; struct entry req; int prob = 0; struct timespec timestamp; struct short_msg resize_msg; print_priority(); if (srand48_r(*rseed, &drandbuf) != 0) { warn("srand48_r() failed!\n"); cleanup_exit(EXIT_FAILURE); } buffer = malloc_or_die(bufspace); while (true) { req = queue_wait_for_entry(&printqueue); clock_gettime_or_die(CLOCK_MONOTONIC, &timestamp); mutex_lock(&printstate.mutex); if (prev_req_won_race(&req)) { printstate_mark_req_completed(&req); mutex_unlock(&printstate.mutex); if (verbose_lostevent()) print_lostmessage(&timestamp, buffer, bufspace, &req, "print loop"); continue; } mutex_unlock(&printstate.mutex); /* * Toss a coin to decide if we want to sleep before printing * out the backtrace. The reason for this is that opening * /sys/kernel/tracing/trace will cause a blackout of * hundreds of ms, where no latencies will be noted by the * latency tracer. Thus by randomly sleeping we try to avoid * missing traces systematically due to this. With this option * we will sometimes get the first latency, some other times * some of the later ones, in case of closely spaced traces. */ if (trace_enable && use_random_sleep) { slept = 0; prob = table_get_probability(&req, &resize_msg); if (!toss_coin(&drandbuf, prob)) slept = go_to_sleep(&req); if (slept >= 0) { /* A print is ongoing */ printstate_cnt_inc(); /* * We will do the printout below so we have to * mark it as completed while we still have the * mutex. */ mutex_lock(&printstate.mutex); printstate_mark_req_completed(&req); mutex_unlock(&printstate.mutex); } } if (trace_enable) { /* * slept < 0 means that we detected another * notification in go_to_sleep() above */ if (slept >= 0) /* * N.B. printstate_cnt_dec(); will be called * inside print_tracefile() */ print_tracefile(&resize_msg, &timestamp, buffer, bufspace, slept, &req); else print_skipmessage(&resize_msg, &timestamp, buffer, bufspace, &req, true); } else { print_skipmessage(&resize_msg, &timestamp, buffer, bufspace, &req, false); } } return NULL; } static void start_printthread(void) { unsigned int i; long *seed; int ufd; ufd = open(DEV_URANDOM, O_RDONLY); if (nr_threads > MAX_THREADS) { warnx( "Number of requested print threads was %d, max number is %d\n", nr_threads, MAX_THREADS); nr_threads = MAX_THREADS; } for (i = 0; i < nr_threads; i++) { seed = malloc_or_die(sizeof(*seed)); if (ufd < 0 || read(ufd, seed, sizeof(*seed)) != sizeof(*seed)) { printf( "Warning! Using trivial random number seed, since %s not available\n", DEV_URANDOM); fflush(stdout); *seed = i; } errno = pthread_create(&printthread[i], NULL, do_printloop, seed); if (errno) err(EXIT_FAILURE, "pthread_create()"); } if (ufd > 0 && close(ufd) != 0) warn("close() failed"); } static void show_usage(void) { printf( "Usage: %s [OPTION]...\n\n" "Collect closely occurring latencies from %s\n" "with any of the following tracers: preemptirqsoff, preemptoff, irqsoff, " "wakeup,\nwakeup_dl, or wakeup_rt.\n\n" "The occurrence of a latency is detected by monitoring the file\n" "%s with inotify.\n\n" "The following options are supported:\n\n" "-l, --list\t\tList the latency tracers that are supported by the\n" "\t\t\tcurrently running Linux kernel. If you don't see the\n" "\t\t\ttracer that you want, you will probably need to\n" "\t\t\tchange your kernel config and build a new kernel.\n\n" "-t, --tracer TR\t\tUse the tracer TR. The default is to use the first\n" "\t\t\ttracer that is supported by the kernel in the following\n" "\t\t\torder of precedence:\n\n" "\t\t\tpreemptirqsoff\n" "\t\t\tpreemptoff\n" "\t\t\tirqsoff\n" "\t\t\twakeup\n" "\t\t\twakeup_rt\n" "\t\t\twakeup_dl\n" "\n" "\t\t\tIf TR is not on the list above, then a warning will be\n" "\t\t\tprinted.\n\n" "-F, --force\t\tProceed even if another ftrace tracer is active. Without\n" "\t\t\tthis option, the program will refuse to start tracing if\n" "\t\t\tany other tracer than the nop tracer is active.\n\n" "-s, --threshold TH\tConfigure ftrace to use a threshold of TH microseconds\n" "\t\t\tfor the tracer. The default is 0, which means that\n" "\t\t\ttracing_max_latency will be used. tracing_max_latency is\n" "\t\t\tset to 0 when the program is started and contains the\n" "\t\t\tmaximum of the latencies that have been encountered.\n\n" "-f, --function\t\tEnable the function-trace option in trace_options. With\n" "\t\t\tthis option, ftrace will trace the functions that are\n" "\t\t\texecuted during a latency, without it we only get the\n" "\t\t\tbeginning, end, and backtrace.\n\n" "-g, --graph\t\tEnable the display-graph option in trace_option. This\n" "\t\t\toption causes ftrace to show the graph of how functions\n" "\t\t\tare calling other functions.\n\n" "-c, --policy POL\tRun the program with scheduling policy POL. POL can be\n" "\t\t\tother, batch, idle, rr or fifo. The default is rr. When\n" "\t\t\tusing rr or fifo, remember that these policies may cause\n" "\t\t\tother tasks to experience latencies.\n\n" "-p, --priority PRI\tRun the program with priority PRI. The acceptable range\n" "\t\t\tof PRI depends on the scheduling policy.\n\n" "-n, --notrace\t\tIf latency is detected, do not print out the content of\n" "\t\t\tthe trace file to standard output\n\n" "-t, --threads NRTHR\tRun NRTHR threads for printing. Default is %d.\n\n" "-r, --random\t\tArbitrarily sleep a certain amount of time, default\n" "\t\t\t%ld ms, before reading the trace file. The\n" "\t\t\tprobabilities for sleep are chosen so that the\n" "\t\t\tprobability of obtaining any of a cluster of closely\n" "\t\t\toccurring latencies are equal, i.e. we will randomly\n" "\t\t\tchoose which one we collect from the trace file.\n\n" "\t\t\tThis option is probably only useful with the irqsoff,\n" "\t\t\tpreemptoff, and preemptirqsoff tracers.\n\n" "-a, --nrlat NRLAT\tFor the purpose of arbitrary delay, assume that there\n" "\t\t\tare no more than NRLAT clustered latencies. If NRLAT\n" "\t\t\tlatencies are detected during a run, this value will\n" "\t\t\tautomatically be increased to NRLAT + 1 and then to\n" "\t\t\tNRLAT + 2 and so on. The default is %d. This option\n" "\t\t\timplies -r. We need to know this number in order to\n" "\t\t\tbe able to calculate the probabilities of sleeping.\n" "\t\t\tSpecifically, the probabilities of not sleeping, i.e. to\n" "\t\t\tdo an immediate printout will be:\n\n" "\t\t\t1/NRLAT 1/(NRLAT - 1) ... 1/3 1/2 1\n\n" "\t\t\tThe probability of sleeping will be:\n\n" "\t\t\t1 - P, where P is from the series above\n\n" "\t\t\tThis descending probability will cause us to choose\n" "\t\t\tan occurrence at random. Observe that the final\n" "\t\t\tprobability is 0, it is when we reach this probability\n" "\t\t\tthat we increase NRLAT automatically. As an example,\n" "\t\t\twith the default value of 2, the probabilities will be:\n\n" "\t\t\t1/2 0\n\n" "\t\t\tThis means, when a latency is detected we will sleep\n" "\t\t\twith 50%% probability. If we ever detect another latency\n" "\t\t\tduring the sleep period, then the probability of sleep\n" "\t\t\twill be 0%% and the table will be expanded to:\n\n" "\t\t\t1/3 1/2 0\n\n" "-v, --verbose\t\tIncrease the verbosity. If this option is given once,\n" "\t\t\tthen print a message every time that the NRLAT value\n" "\t\t\tis automatically increased. It also causes a message to\n" "\t\t\tbe printed when the ftrace settings are changed. If this\n" "\t\t\toption is given at least twice, then also print a\n" "\t\t\twarning for lost events.\n\n" "-u, --time TIME\t\tArbitrarily sleep for a specified time TIME ms before\n" "\t\t\tprinting out the trace from the trace file. The default\n" "\t\t\tis %ld ms. This option implies -r.\n\n" "-x, --no-ftrace\t\tDo not configure ftrace. This assume that the user\n" "\t\t\tconfigures the ftrace files in sysfs such as\n" "\t\t\t/sys/kernel/tracing/current_tracer or equivalent.\n\n" "-i, --tracefile FILE\tUse FILE as trace file. The default is\n" "\t\t\t%s.\n" "\t\t\tThis options implies -x\n\n" "-m, --max-lat FILE\tUse FILE as tracing_max_latency file. The default is\n" "\t\t\t%s.\n" "\t\t\tThis options implies -x\n\n" , prg_name, debug_tracefile_dflt, debug_maxlat_dflt, DEFAULT_NR_PRINTER_THREADS, SLEEP_TIME_MS_DEFAULT, DEFAULT_TABLE_SIZE, SLEEP_TIME_MS_DEFAULT, debug_tracefile_dflt, debug_maxlat_dflt); } static void find_tracefiles(void) { debug_tracefile_dflt = tracefs_get_tracing_file("trace"); if (debug_tracefile_dflt == NULL) { /* This is needed in show_usage() */ debug_tracefile_dflt = DEBUG_NOFILE; } debug_maxlat_dflt = tracefs_get_tracing_file("tracing_max_latency"); if (debug_maxlat_dflt == NULL) { /* This is needed in show_usage() */ debug_maxlat_dflt = DEBUG_NOFILE; } debug_tracefile = debug_tracefile_dflt; debug_maxlat = debug_maxlat_dflt; } bool alldigits(const char *s) { for (; *s != '\0'; s++) if (!isdigit(*s)) return false; return true; } void check_alldigits(const char *optarg, const char *argname) { if (!alldigits(optarg)) errx(EXIT_FAILURE, "The %s parameter expects a decimal argument\n", argname); } static void scan_arguments(int argc, char *argv[]) { int c; int i; int option_idx = 0; static struct option long_options[] = { { "list", no_argument, 0, 'l' }, { "tracer", required_argument, 0, 't' }, { "force", no_argument, 0, 'F' }, { "threshold", required_argument, 0, 's' }, { "function", no_argument, 0, 'f' }, { "graph", no_argument, 0, 'g' }, { "policy", required_argument, 0, 'c' }, { "priority", required_argument, 0, 'p' }, { "help", no_argument, 0, 'h' }, { "notrace", no_argument, 0, 'n' }, { "random", no_argument, 0, 'r' }, { "nrlat", required_argument, 0, 'a' }, { "threads", required_argument, 0, 'e' }, { "time", required_argument, 0, 'u' }, { "verbose", no_argument, 0, 'v' }, { "no-ftrace", no_argument, 0, 'x' }, { "tracefile", required_argument, 0, 'i' }, { "max-lat", required_argument, 0, 'm' }, { 0, 0, 0, 0 } }; const struct policy *p; int max, min; int value; bool notracer, valid; /* * We must do this before parsing the arguments because show_usage() * needs to display these. */ find_tracefiles(); while (true) { c = getopt_long(argc, argv, "lt:Fs:fgc:p:hnra:e:u:vxi:m:", long_options, &option_idx); if (c == -1) break; switch (c) { case 'l': show_available(); exit(0); break; case 't': current_tracer = strdup(optarg); if (!is_relevant_tracer(current_tracer)) { warnx("%s is not a known latency tracer!\n", current_tracer); } valid = tracer_valid(current_tracer, &notracer); if (notracer) errx(EXIT_FAILURE, no_tracer_msg); if (!valid) errx(EXIT_FAILURE, "The tracer %s is not supported by your kernel!\n", current_tracer); break; case 'F': force_tracer = true; break; case 's': check_alldigits(optarg, "-s [--threshold]"); threshold = strdup(optarg); break; case 'f': use_options[OPTIDX_FUNC_TR] = true; break; case 'g': use_options[OPTIDX_DISP_GR] = true; break; case 'c': p = policy_from_name(optarg); if (p != NULL) { sched_policy = p->policy; sched_policy_set = true; if (!sched_pri_set) { sched_pri = p->default_pri; sched_pri_set = true; } } else { warnx("Unknown scheduling %s\n", optarg); show_usage(); exit(0); } break; case 'p': check_alldigits(optarg, "-p [--priority]"); sched_pri = atoi(optarg); sched_pri_set = true; break; case 'h': show_usage(); exit(0); break; case 'n': trace_enable = false; use_random_sleep = false; break; case 'e': check_alldigits(optarg, "-e [--threads]"); value = atoi(optarg); if (value > 0) nr_threads = value; else { warnx("NRTHR must be > 0\n"); show_usage(); exit(0); } break; case 'u': check_alldigits(optarg, "-u [--time]"); value = atoi(optarg); if (value < 0) { warnx("TIME must be >= 0\n"); show_usage(); exit(0); } trace_enable = true; use_random_sleep = true; sleep_time = value * USEC_PER_MSEC; break; case 'v': verbosity++; break; case 'r': trace_enable = true; use_random_sleep = true; break; case 'a': check_alldigits(optarg, "-a [--nrlat]"); value = atoi(optarg); if (value <= 0) { warnx("NRLAT must be > 0\n"); show_usage(); exit(0); } trace_enable = true; use_random_sleep = true; table_startsize = value; break; case 'x': setup_ftrace = false; break; case 'i': setup_ftrace = false; debug_tracefile = strdup(optarg); break; case 'm': setup_ftrace = false; debug_maxlat = strdup(optarg); break; default: show_usage(); exit(0); break; } } if (setup_ftrace) { if (!current_tracer) { current_tracer = find_default_tracer(); if (!current_tracer) errx(EXIT_FAILURE, "No default tracer found and tracer not specified\n"); } if (use_random_sleep && !random_makes_sense(current_tracer)) { warnx("WARNING: The tracer is %s and random sleep has", current_tracer); fprintf(stderr, "been enabled. Random sleep is intended for the following tracers:\n"); for (i = 0; random_tracers[i]; i++) fprintf(stderr, "%s\n", random_tracers[i]); fprintf(stderr, "\n"); } } if (debug_tracefile == DEBUG_NOFILE || debug_maxlat == DEBUG_NOFILE) errx(EXIT_FAILURE, "Could not find tracing directory e.g. /sys/kernel/tracing\n"); if (!sched_policy_set) { sched_policy = SCHED_RR; sched_policy_set = true; if (!sched_pri_set) { sched_pri = RT_DEFAULT_PRI; sched_pri_set = true; } } max = sched_get_priority_max(sched_policy); min = sched_get_priority_min(sched_policy); if (sched_pri < min) { printf( "ATTENTION: Increasing priority to minimum, which is %d\n", min); sched_pri = min; } if (sched_pri > max) { printf( "ATTENTION: Reducing priority to maximum, which is %d\n", max); sched_pri = max; } } static void show_params(void) { printf( "\n" "Running with scheduling policy %s and priority %d. Using %d print threads.\n", policy_name(sched_policy), sched_pri, nr_threads); if (trace_enable) { if (use_random_sleep) { printf( "%s will be printed with random delay\n" "Start size of the probability table:\t\t\t%d\n" "Print a message when the prob. table changes size:\t%s\n" "Print a warning when an event has been lost:\t\t%s\n" "Sleep time is:\t\t\t\t\t\t%ld ms\n", debug_tracefile, table_startsize, bool2str(verbose_sizechange()), bool2str(verbose_lostevent()), sleep_time / USEC_PER_MSEC); } else { printf("%s will be printed immediately\n", debug_tracefile); } } else { printf("%s will not be printed\n", debug_tracefile); } if (setup_ftrace) { printf("Tracer:\t\t\t\t\t\t\t%s\n" "%s option:\t\t\t\t\t%s\n" "%s option:\t\t\t\t\t%s\n", current_tracer, optstr[OPTIDX_FUNC_TR], bool2str(use_options[OPTIDX_FUNC_TR]), optstr[OPTIDX_DISP_GR], bool2str(use_options[OPTIDX_DISP_GR])); if (!strcmp(threshold, "0")) printf("Threshold:\t\t\t\t\t\ttracing_max_latency\n"); else printf("Threshold:\t\t\t\t\t\t%s\n", threshold); } printf("\n"); } int main(int argc, char *argv[]) { init_save_state(); signal_blocking(SIG_BLOCK); setup_sig_handler(); open_stdout(); if (argc >= 1) prg_name = argv[0]; else prg_name = prg_unknown; scan_arguments(argc, argv); show_params(); init_printstate(); init_print_mtx(); if (use_random_sleep) { init_probabilities(); if (verbose_sizechange()) printf("Initializing probability table to %d\n", table_startsize); sleeptable_resize(table_startsize, false, NULL); } set_priority(); init_queue(&printqueue); start_printthread(); tracing_loop(); return 0; }
linux-master
tools/tracing/latency/latency-collector.c
// SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include <sys/sendfile.h> #include <tracefs.h> #include <signal.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include "trace.h" #include "utils.h" /* * enable_tracer_by_name - enable a tracer on the given instance */ int enable_tracer_by_name(struct tracefs_instance *inst, const char *tracer_name) { enum tracefs_tracers tracer; int retval; tracer = TRACEFS_TRACER_CUSTOM; debug_msg("Enabling %s tracer\n", tracer_name); retval = tracefs_tracer_set(inst, tracer, tracer_name); if (retval < 0) { if (errno == ENODEV) err_msg("Tracer %s not found!\n", tracer_name); err_msg("Failed to enable the %s tracer\n", tracer_name); return -1; } return 0; } /* * disable_tracer - set nop tracer to the insta */ void disable_tracer(struct tracefs_instance *inst) { enum tracefs_tracers t = TRACEFS_TRACER_NOP; int retval; retval = tracefs_tracer_set(inst, t); if (retval < 0) err_msg("Oops, error disabling tracer\n"); } /* * create_instance - create a trace instance with *instance_name */ struct tracefs_instance *create_instance(char *instance_name) { return tracefs_instance_create(instance_name); } /* * destroy_instance - remove a trace instance and free the data */ void destroy_instance(struct tracefs_instance *inst) { tracefs_instance_destroy(inst); tracefs_instance_free(inst); } /* * save_trace_to_file - save the trace output of the instance to the file */ int save_trace_to_file(struct tracefs_instance *inst, const char *filename) { const char *file = "trace"; mode_t mode = 0644; char buffer[4096]; int out_fd, in_fd; int retval = -1; in_fd = tracefs_instance_file_open(inst, file, O_RDONLY); if (in_fd < 0) { err_msg("Failed to open trace file\n"); return -1; } out_fd = creat(filename, mode); if (out_fd < 0) { err_msg("Failed to create output file %s\n", filename); goto out_close_in; } do { retval = read(in_fd, buffer, sizeof(buffer)); if (retval <= 0) goto out_close; retval = write(out_fd, buffer, retval); if (retval < 0) goto out_close; } while (retval > 0); retval = 0; out_close: close(out_fd); out_close_in: close(in_fd); return retval; } /* * collect_registered_events - call the existing callback function for the event * * If an event has a registered callback function, call it. * Otherwise, ignore the event. */ int collect_registered_events(struct tep_event *event, struct tep_record *record, int cpu, void *context) { struct trace_instance *trace = context; struct trace_seq *s = trace->seq; if (!event->handler) return 0; event->handler(s, record, event, context); return 0; } /* * trace_instance_destroy - destroy and free a rtla trace instance */ void trace_instance_destroy(struct trace_instance *trace) { if (trace->inst) { disable_tracer(trace->inst); destroy_instance(trace->inst); trace->inst = NULL; } if (trace->seq) { free(trace->seq); trace->seq = NULL; } if (trace->tep) { tep_free(trace->tep); trace->tep = NULL; } } /* * trace_instance_init - create an rtla trace instance * * It is more than the tracefs instance, as it contains other * things required for the tracing, such as the local events and * a seq file. * * Note that the trace instance is returned disabled. This allows * the tool to apply some other configs, like setting priority * to the kernel threads, before starting generating trace entries. */ int trace_instance_init(struct trace_instance *trace, char *tool_name) { trace->seq = calloc(1, sizeof(*trace->seq)); if (!trace->seq) goto out_err; trace_seq_init(trace->seq); trace->inst = create_instance(tool_name); if (!trace->inst) goto out_err; trace->tep = tracefs_local_events(NULL); if (!trace->tep) goto out_err; /* * Let the main enable the record after setting some other * things such as the priority of the tracer's threads. */ tracefs_trace_off(trace->inst); return 0; out_err: trace_instance_destroy(trace); return 1; } /* * trace_instance_start - start tracing a given rtla instance */ int trace_instance_start(struct trace_instance *trace) { return tracefs_trace_on(trace->inst); } /* * trace_events_free - free a list of trace events */ static void trace_events_free(struct trace_events *events) { struct trace_events *tevent = events; struct trace_events *free_event; while (tevent) { free_event = tevent; tevent = tevent->next; if (free_event->filter) free(free_event->filter); if (free_event->trigger) free(free_event->trigger); free(free_event->system); free(free_event); } } /* * trace_event_alloc - alloc and parse a single trace event */ struct trace_events *trace_event_alloc(const char *event_string) { struct trace_events *tevent; tevent = calloc(1, sizeof(*tevent)); if (!tevent) return NULL; tevent->system = strdup(event_string); if (!tevent->system) { free(tevent); return NULL; } tevent->event = strstr(tevent->system, ":"); if (tevent->event) { *tevent->event = '\0'; tevent->event = &tevent->event[1]; } return tevent; } /* * trace_event_add_filter - record an event filter */ int trace_event_add_filter(struct trace_events *event, char *filter) { if (event->filter) free(event->filter); event->filter = strdup(filter); if (!event->filter) return 1; return 0; } /* * trace_event_add_trigger - record an event trigger action */ int trace_event_add_trigger(struct trace_events *event, char *trigger) { if (event->trigger) free(event->trigger); event->trigger = strdup(trigger); if (!event->trigger) return 1; return 0; } /* * trace_event_disable_filter - disable an event filter */ static void trace_event_disable_filter(struct trace_instance *instance, struct trace_events *tevent) { char filter[1024]; int retval; if (!tevent->filter) return; if (!tevent->filter_enabled) return; debug_msg("Disabling %s:%s filter %s\n", tevent->system, tevent->event ? : "*", tevent->filter); snprintf(filter, 1024, "!%s\n", tevent->filter); retval = tracefs_event_file_write(instance->inst, tevent->system, tevent->event, "filter", filter); if (retval < 0) err_msg("Error disabling %s:%s filter %s\n", tevent->system, tevent->event ? : "*", tevent->filter); } /* * trace_event_save_hist - save the content of an event hist * * If the trigger is a hist: one, save the content of the hist file. */ static void trace_event_save_hist(struct trace_instance *instance, struct trace_events *tevent) { int retval, index, out_fd; mode_t mode = 0644; char path[1024]; char *hist; if (!tevent) return; /* trigger enables hist */ if (!tevent->trigger) return; /* is this a hist: trigger? */ retval = strncmp(tevent->trigger, "hist:", strlen("hist:")); if (retval) return; snprintf(path, 1024, "%s_%s_hist.txt", tevent->system, tevent->event); printf(" Saving event %s:%s hist to %s\n", tevent->system, tevent->event, path); out_fd = creat(path, mode); if (out_fd < 0) { err_msg(" Failed to create %s output file\n", path); return; } hist = tracefs_event_file_read(instance->inst, tevent->system, tevent->event, "hist", 0); if (!hist) { err_msg(" Failed to read %s:%s hist file\n", tevent->system, tevent->event); goto out_close; } index = 0; do { index += write(out_fd, &hist[index], strlen(hist) - index); } while (index < strlen(hist)); free(hist); out_close: close(out_fd); } /* * trace_event_disable_trigger - disable an event trigger */ static void trace_event_disable_trigger(struct trace_instance *instance, struct trace_events *tevent) { char trigger[1024]; int retval; if (!tevent->trigger) return; if (!tevent->trigger_enabled) return; debug_msg("Disabling %s:%s trigger %s\n", tevent->system, tevent->event ? : "*", tevent->trigger); trace_event_save_hist(instance, tevent); snprintf(trigger, 1024, "!%s\n", tevent->trigger); retval = tracefs_event_file_write(instance->inst, tevent->system, tevent->event, "trigger", trigger); if (retval < 0) err_msg("Error disabling %s:%s trigger %s\n", tevent->system, tevent->event ? : "*", tevent->trigger); } /* * trace_events_disable - disable all trace events */ void trace_events_disable(struct trace_instance *instance, struct trace_events *events) { struct trace_events *tevent = events; if (!events) return; while (tevent) { debug_msg("Disabling event %s:%s\n", tevent->system, tevent->event ? : "*"); if (tevent->enabled) { trace_event_disable_filter(instance, tevent); trace_event_disable_trigger(instance, tevent); tracefs_event_disable(instance->inst, tevent->system, tevent->event); } tevent->enabled = 0; tevent = tevent->next; } } /* * trace_event_enable_filter - enable an event filter associated with an event */ static int trace_event_enable_filter(struct trace_instance *instance, struct trace_events *tevent) { char filter[1024]; int retval; if (!tevent->filter) return 0; if (!tevent->event) { err_msg("Filter %s applies only for single events, not for all %s:* events\n", tevent->filter, tevent->system); return 1; } snprintf(filter, 1024, "%s\n", tevent->filter); debug_msg("Enabling %s:%s filter %s\n", tevent->system, tevent->event ? : "*", tevent->filter); retval = tracefs_event_file_write(instance->inst, tevent->system, tevent->event, "filter", filter); if (retval < 0) { err_msg("Error enabling %s:%s filter %s\n", tevent->system, tevent->event ? : "*", tevent->filter); return 1; } tevent->filter_enabled = 1; return 0; } /* * trace_event_enable_trigger - enable an event trigger associated with an event */ static int trace_event_enable_trigger(struct trace_instance *instance, struct trace_events *tevent) { char trigger[1024]; int retval; if (!tevent->trigger) return 0; if (!tevent->event) { err_msg("Trigger %s applies only for single events, not for all %s:* events\n", tevent->trigger, tevent->system); return 1; } snprintf(trigger, 1024, "%s\n", tevent->trigger); debug_msg("Enabling %s:%s trigger %s\n", tevent->system, tevent->event ? : "*", tevent->trigger); retval = tracefs_event_file_write(instance->inst, tevent->system, tevent->event, "trigger", trigger); if (retval < 0) { err_msg("Error enabling %s:%s trigger %s\n", tevent->system, tevent->event ? : "*", tevent->trigger); return 1; } tevent->trigger_enabled = 1; return 0; } /* * trace_events_enable - enable all events */ int trace_events_enable(struct trace_instance *instance, struct trace_events *events) { struct trace_events *tevent = events; int retval; while (tevent) { debug_msg("Enabling event %s:%s\n", tevent->system, tevent->event ? : "*"); retval = tracefs_event_enable(instance->inst, tevent->system, tevent->event); if (retval < 0) { err_msg("Error enabling event %s:%s\n", tevent->system, tevent->event ? : "*"); return 1; } retval = trace_event_enable_filter(instance, tevent); if (retval) return 1; retval = trace_event_enable_trigger(instance, tevent); if (retval) return 1; tevent->enabled = 1; tevent = tevent->next; } return 0; } /* * trace_events_destroy - disable and free all trace events */ void trace_events_destroy(struct trace_instance *instance, struct trace_events *events) { if (!events) return; trace_events_disable(instance, events); trace_events_free(events); } int trace_is_off(struct trace_instance *tool, struct trace_instance *trace) { /* * The tool instance is always present, it is the one used to collect * data. */ if (!tracefs_trace_is_on(tool->inst)) return 1; /* * The trace instance is only enabled when -t is set. IOW, when the system * is tracing. */ if (trace && !tracefs_trace_is_on(trace->inst)) return 1; return 0; }
linux-master
tools/tracing/rtla/src/trace.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #include <sys/types.h> #include <sys/stat.h> #include <pthread.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <stdio.h> #include "timerlat.h" static void timerlat_usage(int err) { int i; static const char * const msg[] = { "", "timerlat version " VERSION, "", " usage: [rtla] timerlat [MODE] ...", "", " modes:", " top - prints the summary from timerlat tracer", " hist - prints a histogram of timer latencies", "", "if no MODE is given, the top mode is called, passing the arguments", NULL, }; for (i = 0; msg[i]; i++) fprintf(stderr, "%s\n", msg[i]); exit(err); } int timerlat_main(int argc, char *argv[]) { if (argc == 0) goto usage; /* * if timerlat was called without any argument, run the * default cmdline. */ if (argc == 1) { timerlat_top_main(argc, argv); exit(0); } if ((strcmp(argv[1], "-h") == 0) || (strcmp(argv[1], "--help") == 0)) { timerlat_usage(0); } else if (strncmp(argv[1], "-", 1) == 0) { /* the user skipped the tool, call the default one */ timerlat_top_main(argc, argv); exit(0); } else if (strcmp(argv[1], "top") == 0) { timerlat_top_main(argc-1, &argv[1]); exit(0); } else if (strcmp(argv[1], "hist") == 0) { timerlat_hist_main(argc-1, &argv[1]); exit(0); } usage: timerlat_usage(1); exit(1); }
linux-master
tools/tracing/rtla/src/timerlat.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #include <getopt.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include "osnoise.h" #include "timerlat.h" /* * rtla_usage - print rtla usage */ static void rtla_usage(int err) { int i; static const char *msg[] = { "", "rtla version " VERSION, "", " usage: rtla COMMAND ...", "", " commands:", " osnoise - gives information about the operating system noise (osnoise)", " hwnoise - gives information about hardware-related noise", " timerlat - measures the timer irq and thread latency", "", NULL, }; for (i = 0; msg[i]; i++) fprintf(stderr, "%s\n", msg[i]); exit(err); } /* * run_command - try to run a rtla tool command * * It returns 0 if it fails. The tool's main will generally not * return as they should call exit(). */ int run_command(int argc, char **argv, int start_position) { if (strcmp(argv[start_position], "osnoise") == 0) { osnoise_main(argc-start_position, &argv[start_position]); goto ran; } else if (strcmp(argv[start_position], "hwnoise") == 0) { hwnoise_main(argc-start_position, &argv[start_position]); goto ran; } else if (strcmp(argv[start_position], "timerlat") == 0) { timerlat_main(argc-start_position, &argv[start_position]); goto ran; } return 0; ran: return 1; } int main(int argc, char *argv[]) { int retval; /* is it an alias? */ retval = run_command(argc, argv, 0); if (retval) exit(0); if (argc < 2) goto usage; if (strcmp(argv[1], "-h") == 0) { rtla_usage(0); } else if (strcmp(argv[1], "--help") == 0) { rtla_usage(0); } retval = run_command(argc, argv, 1); if (retval) exit(0); usage: rtla_usage(1); exit(1); }
linux-master
tools/tracing/rtla/src/rtla.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #define _GNU_SOURCE #include <getopt.h> #include <stdlib.h> #include <string.h> #include <signal.h> #include <unistd.h> #include <errno.h> #include <stdio.h> #include <time.h> #include <sched.h> #include "utils.h" #include "osnoise.h" struct osnoise_hist_params { char *cpus; cpu_set_t monitored_cpus; char *trace_output; char *cgroup_name; unsigned long long runtime; unsigned long long period; long long threshold; long long stop_us; long long stop_total_us; int sleep_time; int duration; int set_sched; int output_divisor; int cgroup; int hk_cpus; cpu_set_t hk_cpu_set; struct sched_attr sched_param; struct trace_events *events; char no_header; char no_summary; char no_index; char with_zeros; int bucket_size; int entries; }; struct osnoise_hist_cpu { int *samples; int count; unsigned long long min_sample; unsigned long long sum_sample; unsigned long long max_sample; }; struct osnoise_hist_data { struct tracefs_hist *trace_hist; struct osnoise_hist_cpu *hist; int entries; int bucket_size; int nr_cpus; }; /* * osnoise_free_histogram - free runtime data */ static void osnoise_free_histogram(struct osnoise_hist_data *data) { int cpu; /* one histogram for IRQ and one for thread, per CPU */ for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (data->hist[cpu].samples) free(data->hist[cpu].samples); } /* one set of histograms per CPU */ if (data->hist) free(data->hist); free(data); } /* * osnoise_alloc_histogram - alloc runtime data */ static struct osnoise_hist_data *osnoise_alloc_histogram(int nr_cpus, int entries, int bucket_size) { struct osnoise_hist_data *data; int cpu; data = calloc(1, sizeof(*data)); if (!data) return NULL; data->entries = entries; data->bucket_size = bucket_size; data->nr_cpus = nr_cpus; data->hist = calloc(1, sizeof(*data->hist) * nr_cpus); if (!data->hist) goto cleanup; for (cpu = 0; cpu < nr_cpus; cpu++) { data->hist[cpu].samples = calloc(1, sizeof(*data->hist->samples) * (entries + 1)); if (!data->hist[cpu].samples) goto cleanup; } /* set the min to max */ for (cpu = 0; cpu < nr_cpus; cpu++) data->hist[cpu].min_sample = ~0; return data; cleanup: osnoise_free_histogram(data); return NULL; } static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu, unsigned long long duration, int count) { struct osnoise_hist_params *params = tool->params; struct osnoise_hist_data *data = tool->data; unsigned long long total_duration; int entries = data->entries; int bucket; int *hist; if (params->output_divisor) duration = duration / params->output_divisor; if (data->bucket_size) bucket = duration / data->bucket_size; total_duration = duration * count; hist = data->hist[cpu].samples; data->hist[cpu].count += count; update_min(&data->hist[cpu].min_sample, &duration); update_sum(&data->hist[cpu].sum_sample, &total_duration); update_max(&data->hist[cpu].max_sample, &duration); if (bucket < entries) hist[bucket] += count; else hist[entries] += count; } /* * osnoise_destroy_trace_hist - disable events used to collect histogram */ static void osnoise_destroy_trace_hist(struct osnoise_tool *tool) { struct osnoise_hist_data *data = tool->data; tracefs_hist_pause(tool->trace.inst, data->trace_hist); tracefs_hist_destroy(tool->trace.inst, data->trace_hist); } /* * osnoise_init_trace_hist - enable events used to collect histogram */ static int osnoise_init_trace_hist(struct osnoise_tool *tool) { struct osnoise_hist_params *params = tool->params; struct osnoise_hist_data *data = tool->data; int bucket_size; char buff[128]; int retval = 0; /* * Set the size of the bucket. */ bucket_size = params->output_divisor * params->bucket_size; snprintf(buff, sizeof(buff), "duration.buckets=%d", bucket_size); data->trace_hist = tracefs_hist_alloc(tool->trace.tep, "osnoise", "sample_threshold", buff, TRACEFS_HIST_KEY_NORMAL); if (!data->trace_hist) return 1; retval = tracefs_hist_add_key(data->trace_hist, "cpu", 0); if (retval) goto out_err; retval = tracefs_hist_start(tool->trace.inst, data->trace_hist); if (retval) goto out_err; return 0; out_err: osnoise_destroy_trace_hist(tool); return 1; } /* * osnoise_read_trace_hist - parse histogram file and file osnoise histogram */ static void osnoise_read_trace_hist(struct osnoise_tool *tool) { struct osnoise_hist_data *data = tool->data; long long cpu, counter, duration; char *content, *position; tracefs_hist_pause(tool->trace.inst, data->trace_hist); content = tracefs_event_file_read(tool->trace.inst, "osnoise", "sample_threshold", "hist", NULL); if (!content) return; position = content; while (true) { position = strstr(position, "duration: ~"); if (!position) break; position += strlen("duration: ~"); duration = get_llong_from_str(position); if (duration == -1) err_msg("error reading duration from histogram\n"); position = strstr(position, "cpu:"); if (!position) break; position += strlen("cpu: "); cpu = get_llong_from_str(position); if (cpu == -1) err_msg("error reading cpu from histogram\n"); position = strstr(position, "hitcount:"); if (!position) break; position += strlen("hitcount: "); counter = get_llong_from_str(position); if (counter == -1) err_msg("error reading counter from histogram\n"); osnoise_hist_update_multiple(tool, cpu, duration, counter); } free(content); } /* * osnoise_hist_header - print the header of the tracer to the output */ static void osnoise_hist_header(struct osnoise_tool *tool) { struct osnoise_hist_params *params = tool->params; struct osnoise_hist_data *data = tool->data; struct trace_seq *s = tool->trace.seq; char duration[26]; int cpu; if (params->no_header) return; get_duration(tool->start_time, duration, sizeof(duration)); trace_seq_printf(s, "# RTLA osnoise histogram\n"); trace_seq_printf(s, "# Time unit is %s (%s)\n", params->output_divisor == 1 ? "nanoseconds" : "microseconds", params->output_divisor == 1 ? "ns" : "us"); trace_seq_printf(s, "# Duration: %s\n", duration); if (!params->no_index) trace_seq_printf(s, "Index"); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].count) continue; trace_seq_printf(s, " CPU-%03d", cpu); } trace_seq_printf(s, "\n"); trace_seq_do_printf(s); trace_seq_reset(s); } /* * osnoise_print_summary - print the summary of the hist data to the output */ static void osnoise_print_summary(struct osnoise_hist_params *params, struct trace_instance *trace, struct osnoise_hist_data *data) { int cpu; if (params->no_summary) return; if (!params->no_index) trace_seq_printf(trace->seq, "count:"); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].count) continue; trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].count); } trace_seq_printf(trace->seq, "\n"); if (!params->no_index) trace_seq_printf(trace->seq, "min: "); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].count) continue; trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].min_sample); } trace_seq_printf(trace->seq, "\n"); if (!params->no_index) trace_seq_printf(trace->seq, "avg: "); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].count) continue; if (data->hist[cpu].count) trace_seq_printf(trace->seq, "%9.2f ", ((double) data->hist[cpu].sum_sample) / data->hist[cpu].count); else trace_seq_printf(trace->seq, " - "); } trace_seq_printf(trace->seq, "\n"); if (!params->no_index) trace_seq_printf(trace->seq, "max: "); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].count) continue; trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].max_sample); } trace_seq_printf(trace->seq, "\n"); trace_seq_do_printf(trace->seq); trace_seq_reset(trace->seq); } /* * osnoise_print_stats - print data for all CPUs */ static void osnoise_print_stats(struct osnoise_hist_params *params, struct osnoise_tool *tool) { struct osnoise_hist_data *data = tool->data; struct trace_instance *trace = &tool->trace; int bucket, cpu; int total; osnoise_hist_header(tool); for (bucket = 0; bucket < data->entries; bucket++) { total = 0; if (!params->no_index) trace_seq_printf(trace->seq, "%-6d", bucket * data->bucket_size); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].count) continue; total += data->hist[cpu].samples[bucket]; trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].samples[bucket]); } if (total == 0 && !params->with_zeros) { trace_seq_reset(trace->seq); continue; } trace_seq_printf(trace->seq, "\n"); trace_seq_do_printf(trace->seq); trace_seq_reset(trace->seq); } if (!params->no_index) trace_seq_printf(trace->seq, "over: "); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].count) continue; trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].samples[data->entries]); } trace_seq_printf(trace->seq, "\n"); trace_seq_do_printf(trace->seq); trace_seq_reset(trace->seq); osnoise_print_summary(params, trace, data); } /* * osnoise_hist_usage - prints osnoise hist usage message */ static void osnoise_hist_usage(char *usage) { int i; static const char * const msg[] = { "", " usage: rtla osnoise hist [-h] [-D] [-d s] [-a us] [-p us] [-r us] [-s us] [-S us] \\", " [-T us] [-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] \\", " [-c cpu-list] [-H cpu-list] [-P priority] [-b N] [-E N] [--no-header] [--no-summary] \\", " [--no-index] [--with-zeros] [-C[=cgroup_name]]", "", " -h/--help: print this menu", " -a/--auto: set automatic trace mode, stopping the session if argument in us sample is hit", " -p/--period us: osnoise period in us", " -r/--runtime us: osnoise runtime in us", " -s/--stop us: stop trace if a single sample is higher than the argument in us", " -S/--stop-total us: stop trace if the total sample is higher than the argument in us", " -T/--threshold us: the minimum delta to be considered a noise", " -c/--cpus cpu-list: list of cpus to run osnoise threads", " -H/--house-keeping cpus: run rtla control threads only on the given cpus", " -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited", " -d/--duration time[s|m|h|d]: duration of the session", " -D/--debug: print debug info", " -t/--trace[=file]: save the stopped trace to [file|osnoise_trace.txt]", " -e/--event <sys:event>: enable the <sys:event> in the trace instance, multiple -e are allowed", " --filter <filter>: enable a trace event filter to the previous -e event", " --trigger <trigger>: enable a trace event trigger to the previous -e event", " -b/--bucket-size N: set the histogram bucket size (default 1)", " -E/--entries N: set the number of entries of the histogram (default 256)", " --no-header: do not print header", " --no-summary: do not print summary", " --no-index: do not print index", " --with-zeros: print zero only entries", " -P/--priority o:prio|r:prio|f:prio|d:runtime:period: set scheduling parameters", " o:prio - use SCHED_OTHER with prio", " r:prio - use SCHED_RR with prio", " f:prio - use SCHED_FIFO with prio", " d:runtime[us|ms|s]:period[us|ms|s] - use SCHED_DEADLINE with runtime and period", " in nanoseconds", NULL, }; if (usage) fprintf(stderr, "%s\n", usage); fprintf(stderr, "rtla osnoise hist: a per-cpu histogram of the OS noise (version %s)\n", VERSION); for (i = 0; msg[i]; i++) fprintf(stderr, "%s\n", msg[i]); exit(1); } /* * osnoise_hist_parse_args - allocs, parse and fill the cmd line parameters */ static struct osnoise_hist_params *osnoise_hist_parse_args(int argc, char *argv[]) { struct osnoise_hist_params *params; struct trace_events *tevent; int retval; int c; params = calloc(1, sizeof(*params)); if (!params) exit(1); /* display data in microseconds */ params->output_divisor = 1000; params->bucket_size = 1; params->entries = 256; while (1) { static struct option long_options[] = { {"auto", required_argument, 0, 'a'}, {"bucket-size", required_argument, 0, 'b'}, {"entries", required_argument, 0, 'E'}, {"cpus", required_argument, 0, 'c'}, {"cgroup", optional_argument, 0, 'C'}, {"debug", no_argument, 0, 'D'}, {"duration", required_argument, 0, 'd'}, {"house-keeping", required_argument, 0, 'H'}, {"help", no_argument, 0, 'h'}, {"period", required_argument, 0, 'p'}, {"priority", required_argument, 0, 'P'}, {"runtime", required_argument, 0, 'r'}, {"stop", required_argument, 0, 's'}, {"stop-total", required_argument, 0, 'S'}, {"trace", optional_argument, 0, 't'}, {"event", required_argument, 0, 'e'}, {"threshold", required_argument, 0, 'T'}, {"no-header", no_argument, 0, '0'}, {"no-summary", no_argument, 0, '1'}, {"no-index", no_argument, 0, '2'}, {"with-zeros", no_argument, 0, '3'}, {"trigger", required_argument, 0, '4'}, {"filter", required_argument, 0, '5'}, {0, 0, 0, 0} }; /* getopt_long stores the option index here. */ int option_index = 0; c = getopt_long(argc, argv, "a:c:C::b:d:e:E:DhH:p:P:r:s:S:t::T:01234:5:", long_options, &option_index); /* detect the end of the options. */ if (c == -1) break; switch (c) { case 'a': /* set sample stop to auto_thresh */ params->stop_us = get_llong_from_str(optarg); /* set sample threshold to 1 */ params->threshold = 1; /* set trace */ params->trace_output = "osnoise_trace.txt"; break; case 'b': params->bucket_size = get_llong_from_str(optarg); if ((params->bucket_size == 0) || (params->bucket_size >= 1000000)) osnoise_hist_usage("Bucket size needs to be > 0 and <= 1000000\n"); break; case 'c': retval = parse_cpu_set(optarg, &params->monitored_cpus); if (retval) osnoise_hist_usage("\nInvalid -c cpu list\n"); params->cpus = optarg; break; case 'C': params->cgroup = 1; if (!optarg) { /* will inherit this cgroup */ params->cgroup_name = NULL; } else if (*optarg == '=') { /* skip the = */ params->cgroup_name = ++optarg; } break; case 'D': config_debug = 1; break; case 'd': params->duration = parse_seconds_duration(optarg); if (!params->duration) osnoise_hist_usage("Invalid -D duration\n"); break; case 'e': tevent = trace_event_alloc(optarg); if (!tevent) { err_msg("Error alloc trace event"); exit(EXIT_FAILURE); } if (params->events) tevent->next = params->events; params->events = tevent; break; case 'E': params->entries = get_llong_from_str(optarg); if ((params->entries < 10) || (params->entries > 9999999)) osnoise_hist_usage("Entries must be > 10 and < 9999999\n"); break; case 'h': case '?': osnoise_hist_usage(NULL); break; case 'H': params->hk_cpus = 1; retval = parse_cpu_set(optarg, &params->hk_cpu_set); if (retval) { err_msg("Error parsing house keeping CPUs\n"); exit(EXIT_FAILURE); } break; case 'p': params->period = get_llong_from_str(optarg); if (params->period > 10000000) osnoise_hist_usage("Period longer than 10 s\n"); break; case 'P': retval = parse_prio(optarg, &params->sched_param); if (retval == -1) osnoise_hist_usage("Invalid -P priority"); params->set_sched = 1; break; case 'r': params->runtime = get_llong_from_str(optarg); if (params->runtime < 100) osnoise_hist_usage("Runtime shorter than 100 us\n"); break; case 's': params->stop_us = get_llong_from_str(optarg); break; case 'S': params->stop_total_us = get_llong_from_str(optarg); break; case 'T': params->threshold = get_llong_from_str(optarg); break; case 't': if (optarg) /* skip = */ params->trace_output = &optarg[1]; else params->trace_output = "osnoise_trace.txt"; break; case '0': /* no header */ params->no_header = 1; break; case '1': /* no summary */ params->no_summary = 1; break; case '2': /* no index */ params->no_index = 1; break; case '3': /* with zeros */ params->with_zeros = 1; break; case '4': /* trigger */ if (params->events) { retval = trace_event_add_trigger(params->events, optarg); if (retval) { err_msg("Error adding trigger %s\n", optarg); exit(EXIT_FAILURE); } } else { osnoise_hist_usage("--trigger requires a previous -e\n"); } break; case '5': /* filter */ if (params->events) { retval = trace_event_add_filter(params->events, optarg); if (retval) { err_msg("Error adding filter %s\n", optarg); exit(EXIT_FAILURE); } } else { osnoise_hist_usage("--filter requires a previous -e\n"); } break; default: osnoise_hist_usage("Invalid option"); } } if (geteuid()) { err_msg("rtla needs root permission\n"); exit(EXIT_FAILURE); } if (params->no_index && !params->with_zeros) osnoise_hist_usage("no-index set and with-zeros not set - it does not make sense"); return params; } /* * osnoise_hist_apply_config - apply the hist configs to the initialized tool */ static int osnoise_hist_apply_config(struct osnoise_tool *tool, struct osnoise_hist_params *params) { int retval; if (!params->sleep_time) params->sleep_time = 1; if (params->cpus) { retval = osnoise_set_cpus(tool->context, params->cpus); if (retval) { err_msg("Failed to apply CPUs config\n"); goto out_err; } } if (params->runtime || params->period) { retval = osnoise_set_runtime_period(tool->context, params->runtime, params->period); if (retval) { err_msg("Failed to set runtime and/or period\n"); goto out_err; } } if (params->stop_us) { retval = osnoise_set_stop_us(tool->context, params->stop_us); if (retval) { err_msg("Failed to set stop us\n"); goto out_err; } } if (params->stop_total_us) { retval = osnoise_set_stop_total_us(tool->context, params->stop_total_us); if (retval) { err_msg("Failed to set stop total us\n"); goto out_err; } } if (params->threshold) { retval = osnoise_set_tracing_thresh(tool->context, params->threshold); if (retval) { err_msg("Failed to set tracing_thresh\n"); goto out_err; } } if (params->hk_cpus) { retval = sched_setaffinity(getpid(), sizeof(params->hk_cpu_set), &params->hk_cpu_set); if (retval == -1) { err_msg("Failed to set rtla to the house keeping CPUs\n"); goto out_err; } } else if (params->cpus) { /* * Even if the user do not set a house-keeping CPU, try to * move rtla to a CPU set different to the one where the user * set the workload to run. * * No need to check results as this is an automatic attempt. */ auto_house_keeping(&params->monitored_cpus); } return 0; out_err: return -1; } /* * osnoise_init_hist - initialize a osnoise hist tool with parameters */ static struct osnoise_tool *osnoise_init_hist(struct osnoise_hist_params *params) { struct osnoise_tool *tool; int nr_cpus; nr_cpus = sysconf(_SC_NPROCESSORS_CONF); tool = osnoise_init_tool("osnoise_hist"); if (!tool) return NULL; tool->data = osnoise_alloc_histogram(nr_cpus, params->entries, params->bucket_size); if (!tool->data) goto out_err; tool->params = params; return tool; out_err: osnoise_destroy_tool(tool); return NULL; } static int stop_tracing; static void stop_hist(int sig) { stop_tracing = 1; } /* * osnoise_hist_set_signals - handles the signal to stop the tool */ static void osnoise_hist_set_signals(struct osnoise_hist_params *params) { signal(SIGINT, stop_hist); if (params->duration) { signal(SIGALRM, stop_hist); alarm(params->duration); } } int osnoise_hist_main(int argc, char *argv[]) { struct osnoise_hist_params *params; struct osnoise_tool *record = NULL; struct osnoise_tool *tool = NULL; struct trace_instance *trace; int return_value = 1; int retval; params = osnoise_hist_parse_args(argc, argv); if (!params) exit(1); tool = osnoise_init_hist(params); if (!tool) { err_msg("Could not init osnoise hist\n"); goto out_exit; } retval = osnoise_hist_apply_config(tool, params); if (retval) { err_msg("Could not apply config\n"); goto out_destroy; } trace = &tool->trace; retval = enable_osnoise(trace); if (retval) { err_msg("Failed to enable osnoise tracer\n"); goto out_destroy; } retval = osnoise_init_trace_hist(tool); if (retval) goto out_destroy; if (params->set_sched) { retval = set_comm_sched_attr("osnoise/", &params->sched_param); if (retval) { err_msg("Failed to set sched parameters\n"); goto out_free; } } if (params->cgroup) { retval = set_comm_cgroup("timerlat/", params->cgroup_name); if (!retval) { err_msg("Failed to move threads to cgroup\n"); goto out_free; } } if (params->trace_output) { record = osnoise_init_trace_tool("osnoise"); if (!record) { err_msg("Failed to enable the trace instance\n"); goto out_free; } if (params->events) { retval = trace_events_enable(&record->trace, params->events); if (retval) goto out_hist; } } /* * Start the tracer here, after having set all instances. * * Let the trace instance start first for the case of hitting a stop * tracing while enabling other instances. The trace instance is the * one with most valuable information. */ if (params->trace_output) trace_instance_start(&record->trace); trace_instance_start(trace); tool->start_time = time(NULL); osnoise_hist_set_signals(params); while (!stop_tracing) { sleep(params->sleep_time); retval = tracefs_iterate_raw_events(trace->tep, trace->inst, NULL, 0, collect_registered_events, trace); if (retval < 0) { err_msg("Error iterating on events\n"); goto out_hist; } if (trace_is_off(&tool->trace, &record->trace)) break; } osnoise_read_trace_hist(tool); osnoise_print_stats(params, tool); return_value = 0; if (trace_is_off(&tool->trace, &record->trace)) { printf("rtla osnoise hit stop tracing\n"); if (params->trace_output) { printf(" Saving trace to %s\n", params->trace_output); save_trace_to_file(record->trace.inst, params->trace_output); } } out_hist: trace_events_destroy(&record->trace, params->events); params->events = NULL; out_free: osnoise_free_histogram(tool->data); out_destroy: osnoise_destroy_tool(record); osnoise_destroy_tool(tool); free(params); out_exit: exit(return_value); }
linux-master
tools/tracing/rtla/src/osnoise_hist.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #define _GNU_SOURCE #include <getopt.h> #include <stdlib.h> #include <string.h> #include <signal.h> #include <unistd.h> #include <stdio.h> #include <time.h> #include <sched.h> #include <pthread.h> #include "utils.h" #include "osnoise.h" #include "timerlat.h" #include "timerlat_aa.h" #include "timerlat_u.h" struct timerlat_hist_params { char *cpus; cpu_set_t monitored_cpus; char *trace_output; char *cgroup_name; unsigned long long runtime; long long stop_us; long long stop_total_us; long long timerlat_period_us; long long print_stack; int sleep_time; int output_divisor; int duration; int set_sched; int dma_latency; int cgroup; int hk_cpus; int no_aa; int dump_tasks; int user_hist; cpu_set_t hk_cpu_set; struct sched_attr sched_param; struct trace_events *events; char no_irq; char no_thread; char no_header; char no_summary; char no_index; char with_zeros; int bucket_size; int entries; }; struct timerlat_hist_cpu { int *irq; int *thread; int *user; int irq_count; int thread_count; int user_count; unsigned long long min_irq; unsigned long long sum_irq; unsigned long long max_irq; unsigned long long min_thread; unsigned long long sum_thread; unsigned long long max_thread; unsigned long long min_user; unsigned long long sum_user; unsigned long long max_user; }; struct timerlat_hist_data { struct timerlat_hist_cpu *hist; int entries; int bucket_size; int nr_cpus; }; /* * timerlat_free_histogram - free runtime data */ static void timerlat_free_histogram(struct timerlat_hist_data *data) { int cpu; /* one histogram for IRQ and one for thread, per CPU */ for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (data->hist[cpu].irq) free(data->hist[cpu].irq); if (data->hist[cpu].thread) free(data->hist[cpu].thread); if (data->hist[cpu].user) free(data->hist[cpu].user); } /* one set of histograms per CPU */ if (data->hist) free(data->hist); free(data); } /* * timerlat_alloc_histogram - alloc runtime data */ static struct timerlat_hist_data *timerlat_alloc_histogram(int nr_cpus, int entries, int bucket_size) { struct timerlat_hist_data *data; int cpu; data = calloc(1, sizeof(*data)); if (!data) return NULL; data->entries = entries; data->bucket_size = bucket_size; data->nr_cpus = nr_cpus; /* one set of histograms per CPU */ data->hist = calloc(1, sizeof(*data->hist) * nr_cpus); if (!data->hist) goto cleanup; /* one histogram for IRQ and one for thread, per cpu */ for (cpu = 0; cpu < nr_cpus; cpu++) { data->hist[cpu].irq = calloc(1, sizeof(*data->hist->irq) * (entries + 1)); if (!data->hist[cpu].irq) goto cleanup; data->hist[cpu].thread = calloc(1, sizeof(*data->hist->thread) * (entries + 1)); if (!data->hist[cpu].thread) goto cleanup; data->hist[cpu].user = calloc(1, sizeof(*data->hist->user) * (entries + 1)); if (!data->hist[cpu].user) goto cleanup; } /* set the min to max */ for (cpu = 0; cpu < nr_cpus; cpu++) { data->hist[cpu].min_irq = ~0; data->hist[cpu].min_thread = ~0; data->hist[cpu].min_user = ~0; } return data; cleanup: timerlat_free_histogram(data); return NULL; } /* * timerlat_hist_update - record a new timerlat occurent on cpu, updating data */ static void timerlat_hist_update(struct osnoise_tool *tool, int cpu, unsigned long long context, unsigned long long latency) { struct timerlat_hist_params *params = tool->params; struct timerlat_hist_data *data = tool->data; int entries = data->entries; int bucket; int *hist; if (params->output_divisor) latency = latency / params->output_divisor; if (data->bucket_size) bucket = latency / data->bucket_size; if (!context) { hist = data->hist[cpu].irq; data->hist[cpu].irq_count++; update_min(&data->hist[cpu].min_irq, &latency); update_sum(&data->hist[cpu].sum_irq, &latency); update_max(&data->hist[cpu].max_irq, &latency); } else if (context == 1) { hist = data->hist[cpu].thread; data->hist[cpu].thread_count++; update_min(&data->hist[cpu].min_thread, &latency); update_sum(&data->hist[cpu].sum_thread, &latency); update_max(&data->hist[cpu].max_thread, &latency); } else { /* user */ hist = data->hist[cpu].user; data->hist[cpu].user_count++; update_min(&data->hist[cpu].min_user, &latency); update_sum(&data->hist[cpu].sum_user, &latency); update_max(&data->hist[cpu].max_user, &latency); } if (bucket < entries) hist[bucket]++; else hist[entries]++; } /* * timerlat_hist_handler - this is the handler for timerlat tracer events */ static int timerlat_hist_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *data) { struct trace_instance *trace = data; unsigned long long context, latency; struct osnoise_tool *tool; int cpu = record->cpu; tool = container_of(trace, struct osnoise_tool, trace); tep_get_field_val(s, event, "context", record, &context, 1); tep_get_field_val(s, event, "timer_latency", record, &latency, 1); timerlat_hist_update(tool, cpu, context, latency); return 0; } /* * timerlat_hist_header - print the header of the tracer to the output */ static void timerlat_hist_header(struct osnoise_tool *tool) { struct timerlat_hist_params *params = tool->params; struct timerlat_hist_data *data = tool->data; struct trace_seq *s = tool->trace.seq; char duration[26]; int cpu; if (params->no_header) return; get_duration(tool->start_time, duration, sizeof(duration)); trace_seq_printf(s, "# RTLA timerlat histogram\n"); trace_seq_printf(s, "# Time unit is %s (%s)\n", params->output_divisor == 1 ? "nanoseconds" : "microseconds", params->output_divisor == 1 ? "ns" : "us"); trace_seq_printf(s, "# Duration: %s\n", duration); if (!params->no_index) trace_seq_printf(s, "Index"); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; if (!params->no_irq) trace_seq_printf(s, " IRQ-%03d", cpu); if (!params->no_thread) trace_seq_printf(s, " Thr-%03d", cpu); if (params->user_hist) trace_seq_printf(s, " Usr-%03d", cpu); } trace_seq_printf(s, "\n"); trace_seq_do_printf(s); trace_seq_reset(s); } /* * timerlat_print_summary - print the summary of the hist data to the output */ static void timerlat_print_summary(struct timerlat_hist_params *params, struct trace_instance *trace, struct timerlat_hist_data *data) { int cpu; if (params->no_summary) return; if (!params->no_index) trace_seq_printf(trace->seq, "count:"); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; if (!params->no_irq) trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].irq_count); if (!params->no_thread) trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].thread_count); if (params->user_hist) trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].user_count); } trace_seq_printf(trace->seq, "\n"); if (!params->no_index) trace_seq_printf(trace->seq, "min: "); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; if (!params->no_irq) trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].min_irq); if (!params->no_thread) trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].min_thread); if (params->user_hist) trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].min_user); } trace_seq_printf(trace->seq, "\n"); if (!params->no_index) trace_seq_printf(trace->seq, "avg: "); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; if (!params->no_irq) { if (data->hist[cpu].irq_count) trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].sum_irq / data->hist[cpu].irq_count); else trace_seq_printf(trace->seq, " - "); } if (!params->no_thread) { if (data->hist[cpu].thread_count) trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].sum_thread / data->hist[cpu].thread_count); else trace_seq_printf(trace->seq, " - "); } if (params->user_hist) { if (data->hist[cpu].user_count) trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].sum_user / data->hist[cpu].user_count); else trace_seq_printf(trace->seq, " - "); } } trace_seq_printf(trace->seq, "\n"); if (!params->no_index) trace_seq_printf(trace->seq, "max: "); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; if (!params->no_irq) trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].max_irq); if (!params->no_thread) trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].max_thread); if (params->user_hist) trace_seq_printf(trace->seq, "%9llu ", data->hist[cpu].max_user); } trace_seq_printf(trace->seq, "\n"); trace_seq_do_printf(trace->seq); trace_seq_reset(trace->seq); } /* * timerlat_print_stats - print data for all CPUs */ static void timerlat_print_stats(struct timerlat_hist_params *params, struct osnoise_tool *tool) { struct timerlat_hist_data *data = tool->data; struct trace_instance *trace = &tool->trace; int bucket, cpu; int total; timerlat_hist_header(tool); for (bucket = 0; bucket < data->entries; bucket++) { total = 0; if (!params->no_index) trace_seq_printf(trace->seq, "%-6d", bucket * data->bucket_size); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; if (!params->no_irq) { total += data->hist[cpu].irq[bucket]; trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].irq[bucket]); } if (!params->no_thread) { total += data->hist[cpu].thread[bucket]; trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].thread[bucket]); } if (params->user_hist) { total += data->hist[cpu].user[bucket]; trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].user[bucket]); } } if (total == 0 && !params->with_zeros) { trace_seq_reset(trace->seq); continue; } trace_seq_printf(trace->seq, "\n"); trace_seq_do_printf(trace->seq); trace_seq_reset(trace->seq); } if (!params->no_index) trace_seq_printf(trace->seq, "over: "); for (cpu = 0; cpu < data->nr_cpus; cpu++) { if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus)) continue; if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count) continue; if (!params->no_irq) trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].irq[data->entries]); if (!params->no_thread) trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].thread[data->entries]); if (params->user_hist) trace_seq_printf(trace->seq, "%9d ", data->hist[cpu].user[data->entries]); } trace_seq_printf(trace->seq, "\n"); trace_seq_do_printf(trace->seq); trace_seq_reset(trace->seq); timerlat_print_summary(params, trace, data); } /* * timerlat_hist_usage - prints timerlat top usage message */ static void timerlat_hist_usage(char *usage) { int i; char *msg[] = { "", " usage: [rtla] timerlat hist [-h] [-q] [-d s] [-D] [-n] [-a us] [-p us] [-i us] [-T us] [-s us] \\", " [-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] [-c cpu-list] [-H cpu-list]\\", " [-P priority] [-E N] [-b N] [--no-irq] [--no-thread] [--no-header] [--no-summary] \\", " [--no-index] [--with-zeros] [--dma-latency us] [-C[=cgroup_name]] [--no-aa] [--dump-task] [-u]", "", " -h/--help: print this menu", " -a/--auto: set automatic trace mode, stopping the session if argument in us latency is hit", " -p/--period us: timerlat period in us", " -i/--irq us: stop trace if the irq latency is higher than the argument in us", " -T/--thread us: stop trace if the thread latency is higher than the argument in us", " -s/--stack us: save the stack trace at the IRQ if a thread latency is higher than the argument in us", " -c/--cpus cpus: run the tracer only on the given cpus", " -H/--house-keeping cpus: run rtla control threads only on the given cpus", " -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited", " -d/--duration time[m|h|d]: duration of the session in seconds", " --dump-tasks: prints the task running on all CPUs if stop conditions are met (depends on !--no-aa)", " -D/--debug: print debug info", " -t/--trace[=file]: save the stopped trace to [file|timerlat_trace.txt]", " -e/--event <sys:event>: enable the <sys:event> in the trace instance, multiple -e are allowed", " --filter <filter>: enable a trace event filter to the previous -e event", " --trigger <trigger>: enable a trace event trigger to the previous -e event", " -n/--nano: display data in nanoseconds", " --no-aa: disable auto-analysis, reducing rtla timerlat cpu usage", " -b/--bucket-size N: set the histogram bucket size (default 1)", " -E/--entries N: set the number of entries of the histogram (default 256)", " --no-irq: ignore IRQ latencies", " --no-thread: ignore thread latencies", " --no-header: do not print header", " --no-summary: do not print summary", " --no-index: do not print index", " --with-zeros: print zero only entries", " --dma-latency us: set /dev/cpu_dma_latency latency <us> to reduce exit from idle latency", " -P/--priority o:prio|r:prio|f:prio|d:runtime:period : set scheduling parameters", " o:prio - use SCHED_OTHER with prio", " r:prio - use SCHED_RR with prio", " f:prio - use SCHED_FIFO with prio", " d:runtime[us|ms|s]:period[us|ms|s] - use SCHED_DEADLINE with runtime and period", " in nanoseconds", " -u/--user-threads: use rtla user-space threads instead of in-kernel timerlat threads", NULL, }; if (usage) fprintf(stderr, "%s\n", usage); fprintf(stderr, "rtla timerlat hist: a per-cpu histogram of the timer latency (version %s)\n", VERSION); for (i = 0; msg[i]; i++) fprintf(stderr, "%s\n", msg[i]); exit(1); } /* * timerlat_hist_parse_args - allocs, parse and fill the cmd line parameters */ static struct timerlat_hist_params *timerlat_hist_parse_args(int argc, char *argv[]) { struct timerlat_hist_params *params; struct trace_events *tevent; int auto_thresh; int retval; int c; params = calloc(1, sizeof(*params)); if (!params) exit(1); /* disabled by default */ params->dma_latency = -1; /* display data in microseconds */ params->output_divisor = 1000; params->bucket_size = 1; params->entries = 256; while (1) { static struct option long_options[] = { {"auto", required_argument, 0, 'a'}, {"cpus", required_argument, 0, 'c'}, {"cgroup", optional_argument, 0, 'C'}, {"bucket-size", required_argument, 0, 'b'}, {"debug", no_argument, 0, 'D'}, {"entries", required_argument, 0, 'E'}, {"duration", required_argument, 0, 'd'}, {"house-keeping", required_argument, 0, 'H'}, {"help", no_argument, 0, 'h'}, {"irq", required_argument, 0, 'i'}, {"nano", no_argument, 0, 'n'}, {"period", required_argument, 0, 'p'}, {"priority", required_argument, 0, 'P'}, {"stack", required_argument, 0, 's'}, {"thread", required_argument, 0, 'T'}, {"trace", optional_argument, 0, 't'}, {"user-threads", no_argument, 0, 'u'}, {"event", required_argument, 0, 'e'}, {"no-irq", no_argument, 0, '0'}, {"no-thread", no_argument, 0, '1'}, {"no-header", no_argument, 0, '2'}, {"no-summary", no_argument, 0, '3'}, {"no-index", no_argument, 0, '4'}, {"with-zeros", no_argument, 0, '5'}, {"trigger", required_argument, 0, '6'}, {"filter", required_argument, 0, '7'}, {"dma-latency", required_argument, 0, '8'}, {"no-aa", no_argument, 0, '9'}, {"dump-task", no_argument, 0, '\1'}, {0, 0, 0, 0} }; /* getopt_long stores the option index here. */ int option_index = 0; c = getopt_long(argc, argv, "a:c:C::b:d:e:E:DhH:i:np:P:s:t::T:u0123456:7:8:9\1", long_options, &option_index); /* detect the end of the options. */ if (c == -1) break; switch (c) { case 'a': auto_thresh = get_llong_from_str(optarg); /* set thread stop to auto_thresh */ params->stop_total_us = auto_thresh; params->stop_us = auto_thresh; /* get stack trace */ params->print_stack = auto_thresh; /* set trace */ params->trace_output = "timerlat_trace.txt"; break; case 'c': retval = parse_cpu_set(optarg, &params->monitored_cpus); if (retval) timerlat_hist_usage("\nInvalid -c cpu list\n"); params->cpus = optarg; break; case 'C': params->cgroup = 1; if (!optarg) { /* will inherit this cgroup */ params->cgroup_name = NULL; } else if (*optarg == '=') { /* skip the = */ params->cgroup_name = ++optarg; } break; case 'b': params->bucket_size = get_llong_from_str(optarg); if ((params->bucket_size == 0) || (params->bucket_size >= 1000000)) timerlat_hist_usage("Bucket size needs to be > 0 and <= 1000000\n"); break; case 'D': config_debug = 1; break; case 'd': params->duration = parse_seconds_duration(optarg); if (!params->duration) timerlat_hist_usage("Invalid -D duration\n"); break; case 'e': tevent = trace_event_alloc(optarg); if (!tevent) { err_msg("Error alloc trace event"); exit(EXIT_FAILURE); } if (params->events) tevent->next = params->events; params->events = tevent; break; case 'E': params->entries = get_llong_from_str(optarg); if ((params->entries < 10) || (params->entries > 9999999)) timerlat_hist_usage("Entries must be > 10 and < 9999999\n"); break; case 'h': case '?': timerlat_hist_usage(NULL); break; case 'H': params->hk_cpus = 1; retval = parse_cpu_set(optarg, &params->hk_cpu_set); if (retval) { err_msg("Error parsing house keeping CPUs\n"); exit(EXIT_FAILURE); } break; case 'i': params->stop_us = get_llong_from_str(optarg); break; case 'n': params->output_divisor = 1; break; case 'p': params->timerlat_period_us = get_llong_from_str(optarg); if (params->timerlat_period_us > 1000000) timerlat_hist_usage("Period longer than 1 s\n"); break; case 'P': retval = parse_prio(optarg, &params->sched_param); if (retval == -1) timerlat_hist_usage("Invalid -P priority"); params->set_sched = 1; break; case 's': params->print_stack = get_llong_from_str(optarg); break; case 'T': params->stop_total_us = get_llong_from_str(optarg); break; case 't': if (optarg) /* skip = */ params->trace_output = &optarg[1]; else params->trace_output = "timerlat_trace.txt"; break; case 'u': params->user_hist = 1; break; case '0': /* no irq */ params->no_irq = 1; break; case '1': /* no thread */ params->no_thread = 1; break; case '2': /* no header */ params->no_header = 1; break; case '3': /* no summary */ params->no_summary = 1; break; case '4': /* no index */ params->no_index = 1; break; case '5': /* with zeros */ params->with_zeros = 1; break; case '6': /* trigger */ if (params->events) { retval = trace_event_add_trigger(params->events, optarg); if (retval) { err_msg("Error adding trigger %s\n", optarg); exit(EXIT_FAILURE); } } else { timerlat_hist_usage("--trigger requires a previous -e\n"); } break; case '7': /* filter */ if (params->events) { retval = trace_event_add_filter(params->events, optarg); if (retval) { err_msg("Error adding filter %s\n", optarg); exit(EXIT_FAILURE); } } else { timerlat_hist_usage("--filter requires a previous -e\n"); } break; case '8': params->dma_latency = get_llong_from_str(optarg); if (params->dma_latency < 0 || params->dma_latency > 10000) { err_msg("--dma-latency needs to be >= 0 and < 10000"); exit(EXIT_FAILURE); } break; case '9': params->no_aa = 1; break; case '\1': params->dump_tasks = 1; break; default: timerlat_hist_usage("Invalid option"); } } if (geteuid()) { err_msg("rtla needs root permission\n"); exit(EXIT_FAILURE); } if (params->no_irq && params->no_thread) timerlat_hist_usage("no-irq and no-thread set, there is nothing to do here"); if (params->no_index && !params->with_zeros) timerlat_hist_usage("no-index set with with-zeros is not set - it does not make sense"); /* * Auto analysis only happens if stop tracing, thus: */ if (!params->stop_us && !params->stop_total_us) params->no_aa = 1; return params; } /* * timerlat_hist_apply_config - apply the hist configs to the initialized tool */ static int timerlat_hist_apply_config(struct osnoise_tool *tool, struct timerlat_hist_params *params) { int retval, i; if (!params->sleep_time) params->sleep_time = 1; if (params->cpus) { retval = osnoise_set_cpus(tool->context, params->cpus); if (retval) { err_msg("Failed to apply CPUs config\n"); goto out_err; } } else { for (i = 0; i < sysconf(_SC_NPROCESSORS_CONF); i++) CPU_SET(i, &params->monitored_cpus); } if (params->stop_us) { retval = osnoise_set_stop_us(tool->context, params->stop_us); if (retval) { err_msg("Failed to set stop us\n"); goto out_err; } } if (params->stop_total_us) { retval = osnoise_set_stop_total_us(tool->context, params->stop_total_us); if (retval) { err_msg("Failed to set stop total us\n"); goto out_err; } } if (params->timerlat_period_us) { retval = osnoise_set_timerlat_period_us(tool->context, params->timerlat_period_us); if (retval) { err_msg("Failed to set timerlat period\n"); goto out_err; } } if (params->print_stack) { retval = osnoise_set_print_stack(tool->context, params->print_stack); if (retval) { err_msg("Failed to set print stack\n"); goto out_err; } } if (params->hk_cpus) { retval = sched_setaffinity(getpid(), sizeof(params->hk_cpu_set), &params->hk_cpu_set); if (retval == -1) { err_msg("Failed to set rtla to the house keeping CPUs\n"); goto out_err; } } else if (params->cpus) { /* * Even if the user do not set a house-keeping CPU, try to * move rtla to a CPU set different to the one where the user * set the workload to run. * * No need to check results as this is an automatic attempt. */ auto_house_keeping(&params->monitored_cpus); } if (params->user_hist) { retval = osnoise_set_workload(tool->context, 0); if (retval) { err_msg("Failed to set OSNOISE_WORKLOAD option\n"); goto out_err; } } return 0; out_err: return -1; } /* * timerlat_init_hist - initialize a timerlat hist tool with parameters */ static struct osnoise_tool *timerlat_init_hist(struct timerlat_hist_params *params) { struct osnoise_tool *tool; int nr_cpus; nr_cpus = sysconf(_SC_NPROCESSORS_CONF); tool = osnoise_init_tool("timerlat_hist"); if (!tool) return NULL; tool->data = timerlat_alloc_histogram(nr_cpus, params->entries, params->bucket_size); if (!tool->data) goto out_err; tool->params = params; tep_register_event_handler(tool->trace.tep, -1, "ftrace", "timerlat", timerlat_hist_handler, tool); return tool; out_err: osnoise_destroy_tool(tool); return NULL; } static int stop_tracing; static void stop_hist(int sig) { stop_tracing = 1; } /* * timerlat_hist_set_signals - handles the signal to stop the tool */ static void timerlat_hist_set_signals(struct timerlat_hist_params *params) { signal(SIGINT, stop_hist); if (params->duration) { signal(SIGALRM, stop_hist); alarm(params->duration); } } int timerlat_hist_main(int argc, char *argv[]) { struct timerlat_hist_params *params; struct osnoise_tool *record = NULL; struct timerlat_u_params params_u; struct osnoise_tool *tool = NULL; struct osnoise_tool *aa = NULL; struct trace_instance *trace; int dma_latency_fd = -1; int return_value = 1; pthread_t timerlat_u; int retval; params = timerlat_hist_parse_args(argc, argv); if (!params) exit(1); tool = timerlat_init_hist(params); if (!tool) { err_msg("Could not init osnoise hist\n"); goto out_exit; } retval = timerlat_hist_apply_config(tool, params); if (retval) { err_msg("Could not apply config\n"); goto out_free; } trace = &tool->trace; retval = enable_timerlat(trace); if (retval) { err_msg("Failed to enable timerlat tracer\n"); goto out_free; } if (params->set_sched) { retval = set_comm_sched_attr("timerlat/", &params->sched_param); if (retval) { err_msg("Failed to set sched parameters\n"); goto out_free; } } if (params->cgroup && !params->user_hist) { retval = set_comm_cgroup("timerlat/", params->cgroup_name); if (!retval) { err_msg("Failed to move threads to cgroup\n"); goto out_free; } } if (params->dma_latency >= 0) { dma_latency_fd = set_cpu_dma_latency(params->dma_latency); if (dma_latency_fd < 0) { err_msg("Could not set /dev/cpu_dma_latency.\n"); goto out_free; } } if (params->trace_output) { record = osnoise_init_trace_tool("timerlat"); if (!record) { err_msg("Failed to enable the trace instance\n"); goto out_free; } if (params->events) { retval = trace_events_enable(&record->trace, params->events); if (retval) goto out_hist; } } if (!params->no_aa) { aa = osnoise_init_tool("timerlat_aa"); if (!aa) goto out_hist; retval = timerlat_aa_init(aa, params->dump_tasks); if (retval) { err_msg("Failed to enable the auto analysis instance\n"); goto out_hist; } retval = enable_timerlat(&aa->trace); if (retval) { err_msg("Failed to enable timerlat tracer\n"); goto out_hist; } } /* * Start the tracers here, after having set all instances. * * Let the trace instance start first for the case of hitting a stop * tracing while enabling other instances. The trace instance is the * one with most valuable information. */ if (params->trace_output) trace_instance_start(&record->trace); if (!params->no_aa) trace_instance_start(&aa->trace); trace_instance_start(trace); tool->start_time = time(NULL); timerlat_hist_set_signals(params); if (params->user_hist) { /* rtla asked to stop */ params_u.should_run = 1; /* all threads left */ params_u.stopped_running = 0; params_u.set = &params->monitored_cpus; if (params->set_sched) params_u.sched_param = &params->sched_param; else params_u.sched_param = NULL; params_u.cgroup_name = params->cgroup_name; retval = pthread_create(&timerlat_u, NULL, timerlat_u_dispatcher, &params_u); if (retval) err_msg("Error creating timerlat user-space threads\n"); } while (!stop_tracing) { sleep(params->sleep_time); retval = tracefs_iterate_raw_events(trace->tep, trace->inst, NULL, 0, collect_registered_events, trace); if (retval < 0) { err_msg("Error iterating on events\n"); goto out_hist; } if (trace_is_off(&tool->trace, &record->trace)) break; /* is there still any user-threads ? */ if (params->user_hist) { if (params_u.stopped_running) { debug_msg("timerlat user-space threads stopped!\n"); break; } } } if (params->user_hist && !params_u.stopped_running) { params_u.should_run = 0; sleep(1); } timerlat_print_stats(params, tool); return_value = 0; if (trace_is_off(&tool->trace, &record->trace)) { printf("rtla timerlat hit stop tracing\n"); if (!params->no_aa) timerlat_auto_analysis(params->stop_us, params->stop_total_us); if (params->trace_output) { printf(" Saving trace to %s\n", params->trace_output); save_trace_to_file(record->trace.inst, params->trace_output); } } out_hist: timerlat_aa_destroy(); if (dma_latency_fd >= 0) close(dma_latency_fd); trace_events_destroy(&record->trace, params->events); params->events = NULL; out_free: timerlat_free_histogram(tool->data); osnoise_destroy_tool(aa); osnoise_destroy_tool(record); osnoise_destroy_tool(tool); free(params); out_exit: exit(return_value); }
linux-master
tools/tracing/rtla/src/timerlat_hist.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2023 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #include <stdlib.h> #include <errno.h> #include "utils.h" #include "osnoise.h" #include "timerlat.h" #include <unistd.h> enum timelat_state { TIMERLAT_INIT = 0, TIMERLAT_WAITING_IRQ, TIMERLAT_WAITING_THREAD, }; #define MAX_COMM 24 /* * Per-cpu data statistics and data. */ struct timerlat_aa_data { /* Current CPU state */ int curr_state; /* timerlat IRQ latency */ unsigned long long tlat_irq_seqnum; unsigned long long tlat_irq_latency; unsigned long long tlat_irq_timstamp; /* timerlat Thread latency */ unsigned long long tlat_thread_seqnum; unsigned long long tlat_thread_latency; unsigned long long tlat_thread_timstamp; /* * Information about the thread running when the IRQ * arrived. * * This can be blocking or interference, depending on the * priority of the thread. Assuming timerlat is the highest * prio, it is blocking. If timerlat has a lower prio, it is * interference. * note: "unsigned long long" because they are fetch using tep_get_field_val(); */ unsigned long long run_thread_pid; char run_thread_comm[MAX_COMM]; unsigned long long thread_blocking_duration; unsigned long long max_exit_idle_latency; /* Information about the timerlat timer irq */ unsigned long long timer_irq_start_time; unsigned long long timer_irq_start_delay; unsigned long long timer_irq_duration; unsigned long long timer_exit_from_idle; /* * Information about the last IRQ before the timerlat irq * arrived. * * If now - timestamp is <= latency, it might have influenced * in the timerlat irq latency. Otherwise, ignore it. */ unsigned long long prev_irq_duration; unsigned long long prev_irq_timstamp; /* * Interference sum. */ unsigned long long thread_nmi_sum; unsigned long long thread_irq_sum; unsigned long long thread_softirq_sum; unsigned long long thread_thread_sum; /* * Interference task information. */ struct trace_seq *prev_irqs_seq; struct trace_seq *nmi_seq; struct trace_seq *irqs_seq; struct trace_seq *softirqs_seq; struct trace_seq *threads_seq; struct trace_seq *stack_seq; /* * Current thread. */ char current_comm[MAX_COMM]; unsigned long long current_pid; /* * Is the system running a kworker? */ unsigned long long kworker; unsigned long long kworker_func; }; /* * The analysis context and system wide view */ struct timerlat_aa_context { int nr_cpus; int dump_tasks; /* per CPU data */ struct timerlat_aa_data *taa_data; /* * required to translate function names and register * events. */ struct osnoise_tool *tool; }; /* * The data is stored as a local variable, but accessed via a helper function. * * It could be stored inside the trace context. But every access would * require container_of() + a series of pointers. Do we need it? Not sure. * * For now keep it simple. If needed, store it in the tool, add the *context * as a parameter in timerlat_aa_get_ctx() and do the magic there. */ static struct timerlat_aa_context *__timerlat_aa_ctx; static struct timerlat_aa_context *timerlat_aa_get_ctx(void) { return __timerlat_aa_ctx; } /* * timerlat_aa_get_data - Get the per-cpu data from the timerlat context */ static struct timerlat_aa_data *timerlat_aa_get_data(struct timerlat_aa_context *taa_ctx, int cpu) { return &taa_ctx->taa_data[cpu]; } /* * timerlat_aa_irq_latency - Handles timerlat IRQ event */ static int timerlat_aa_irq_latency(struct timerlat_aa_data *taa_data, struct trace_seq *s, struct tep_record *record, struct tep_event *event) { /* * For interference, we start now looking for things that can delay * the thread. */ taa_data->curr_state = TIMERLAT_WAITING_THREAD; taa_data->tlat_irq_timstamp = record->ts; /* * Zero values. */ taa_data->thread_nmi_sum = 0; taa_data->thread_irq_sum = 0; taa_data->thread_softirq_sum = 0; taa_data->thread_blocking_duration = 0; taa_data->timer_irq_start_time = 0; taa_data->timer_irq_duration = 0; taa_data->timer_exit_from_idle = 0; /* * Zero interference tasks. */ trace_seq_reset(taa_data->nmi_seq); trace_seq_reset(taa_data->irqs_seq); trace_seq_reset(taa_data->softirqs_seq); trace_seq_reset(taa_data->threads_seq); /* IRQ latency values */ tep_get_field_val(s, event, "timer_latency", record, &taa_data->tlat_irq_latency, 1); tep_get_field_val(s, event, "seqnum", record, &taa_data->tlat_irq_seqnum, 1); /* The thread that can cause blocking */ tep_get_common_field_val(s, event, "common_pid", record, &taa_data->run_thread_pid, 1); /* * Get exit from idle case. * * If it is not idle thread: */ if (taa_data->run_thread_pid) return 0; /* * if the latency is shorter than the known exit from idle: */ if (taa_data->tlat_irq_latency < taa_data->max_exit_idle_latency) return 0; /* * To be safe, ignore the cases in which an IRQ/NMI could have * interfered with the timerlat IRQ. */ if (taa_data->tlat_irq_timstamp - taa_data->tlat_irq_latency < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) return 0; taa_data->max_exit_idle_latency = taa_data->tlat_irq_latency; return 0; } /* * timerlat_aa_thread_latency - Handles timerlat thread event */ static int timerlat_aa_thread_latency(struct timerlat_aa_data *taa_data, struct trace_seq *s, struct tep_record *record, struct tep_event *event) { /* * For interference, we start now looking for things that can delay * the IRQ of the next cycle. */ taa_data->curr_state = TIMERLAT_WAITING_IRQ; taa_data->tlat_thread_timstamp = record->ts; /* Thread latency values */ tep_get_field_val(s, event, "timer_latency", record, &taa_data->tlat_thread_latency, 1); tep_get_field_val(s, event, "seqnum", record, &taa_data->tlat_thread_seqnum, 1); return 0; } /* * timerlat_aa_handler - Handle timerlat events * * This function is called to handle timerlat events recording statistics. * * Returns 0 on success, -1 otherwise. */ static int timerlat_aa_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu); unsigned long long thread; if (!taa_data) return -1; tep_get_field_val(s, event, "context", record, &thread, 1); if (!thread) return timerlat_aa_irq_latency(taa_data, s, record, event); else return timerlat_aa_thread_latency(taa_data, s, record, event); } /* * timerlat_aa_nmi_handler - Handles NMI noise * * It is used to collect information about interferences from NMI. It is * hooked to the osnoise:nmi_noise event. */ static int timerlat_aa_nmi_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu); unsigned long long duration; unsigned long long start; tep_get_field_val(s, event, "duration", record, &duration, 1); tep_get_field_val(s, event, "start", record, &start, 1); if (taa_data->curr_state == TIMERLAT_WAITING_IRQ) { taa_data->prev_irq_duration = duration; taa_data->prev_irq_timstamp = start; trace_seq_reset(taa_data->prev_irqs_seq); trace_seq_printf(taa_data->prev_irqs_seq, "\t%24s \t\t\t%9.2f us\n", "nmi", ns_to_usf(duration)); return 0; } taa_data->thread_nmi_sum += duration; trace_seq_printf(taa_data->nmi_seq, " %24s \t\t\t%9.2f us\n", "nmi", ns_to_usf(duration)); return 0; } /* * timerlat_aa_irq_handler - Handles IRQ noise * * It is used to collect information about interferences from IRQ. It is * hooked to the osnoise:irq_noise event. * * It is a little bit more complex than the other because it measures: * - The IRQs that can delay the timer IRQ before it happened. * - The Timerlat IRQ handler * - The IRQs that happened between the timerlat IRQ and the timerlat thread * (IRQ interference). */ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu); unsigned long long expected_start; unsigned long long duration; unsigned long long vector; unsigned long long start; char *desc; int val; tep_get_field_val(s, event, "duration", record, &duration, 1); tep_get_field_val(s, event, "start", record, &start, 1); tep_get_field_val(s, event, "vector", record, &vector, 1); desc = tep_get_field_raw(s, event, "desc", record, &val, 1); /* * Before the timerlat IRQ. */ if (taa_data->curr_state == TIMERLAT_WAITING_IRQ) { taa_data->prev_irq_duration = duration; taa_data->prev_irq_timstamp = start; trace_seq_reset(taa_data->prev_irqs_seq); trace_seq_printf(taa_data->prev_irqs_seq, "\t%24s:%-3llu \t\t%9.2f us\n", desc, vector, ns_to_usf(duration)); return 0; } /* * The timerlat IRQ: taa_data->timer_irq_start_time is zeroed at * the timerlat irq handler. */ if (!taa_data->timer_irq_start_time) { expected_start = taa_data->tlat_irq_timstamp - taa_data->tlat_irq_latency; taa_data->timer_irq_start_time = start; taa_data->timer_irq_duration = duration; taa_data->timer_irq_start_delay = taa_data->timer_irq_start_time - expected_start; /* * not exit from idle. */ if (taa_data->run_thread_pid) return 0; if (expected_start > taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) taa_data->timer_exit_from_idle = taa_data->timer_irq_start_delay; return 0; } /* * IRQ interference. */ taa_data->thread_irq_sum += duration; trace_seq_printf(taa_data->irqs_seq, " %24s:%-3llu \t %9.2f us\n", desc, vector, ns_to_usf(duration)); return 0; } static char *softirq_name[] = { "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", "TASKLET", "SCHED", "HRTIMER", "RCU" }; /* * timerlat_aa_softirq_handler - Handles Softirq noise * * It is used to collect information about interferences from Softirq. It is * hooked to the osnoise:softirq_noise event. * * It is only printed in the non-rt kernel, as softirqs become thread on RT. */ static int timerlat_aa_softirq_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu); unsigned long long duration; unsigned long long vector; unsigned long long start; if (taa_data->curr_state == TIMERLAT_WAITING_IRQ) return 0; tep_get_field_val(s, event, "duration", record, &duration, 1); tep_get_field_val(s, event, "start", record, &start, 1); tep_get_field_val(s, event, "vector", record, &vector, 1); taa_data->thread_softirq_sum += duration; trace_seq_printf(taa_data->softirqs_seq, "\t%24s:%-3llu \t %9.2f us\n", softirq_name[vector], vector, ns_to_usf(duration)); return 0; } /* * timerlat_aa_softirq_handler - Handles thread noise * * It is used to collect information about interferences from threads. It is * hooked to the osnoise:thread_noise event. * * Note: if you see thread noise, your timerlat thread was not the highest prio one. */ static int timerlat_aa_thread_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu); unsigned long long duration; unsigned long long start; unsigned long long pid; const char *comm; int val; if (taa_data->curr_state == TIMERLAT_WAITING_IRQ) return 0; tep_get_field_val(s, event, "duration", record, &duration, 1); tep_get_field_val(s, event, "start", record, &start, 1); tep_get_common_field_val(s, event, "common_pid", record, &pid, 1); comm = tep_get_field_raw(s, event, "comm", record, &val, 1); if (pid == taa_data->run_thread_pid && !taa_data->thread_blocking_duration) { taa_data->thread_blocking_duration = duration; if (comm) strncpy(taa_data->run_thread_comm, comm, MAX_COMM); else sprintf(taa_data->run_thread_comm, "<...>"); } else { taa_data->thread_thread_sum += duration; trace_seq_printf(taa_data->threads_seq, "\t%24s:%-3llu \t\t%9.2f us\n", comm, pid, ns_to_usf(duration)); } return 0; } /* * timerlat_aa_stack_handler - Handles timerlat IRQ stack trace * * Saves and parse the stack trace generated by the timerlat IRQ. */ static int timerlat_aa_stack_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu); unsigned long *caller; const char *function; int val, i; trace_seq_reset(taa_data->stack_seq); trace_seq_printf(taa_data->stack_seq, " Blocking thread stack trace\n"); caller = tep_get_field_raw(s, event, "caller", record, &val, 1); if (caller) { for (i = 0; ; i++) { function = tep_find_function(taa_ctx->tool->trace.tep, caller[i]); if (!function) break; trace_seq_printf(taa_data->stack_seq, "\t\t-> %s\n", function); } } return 0; } /* * timerlat_aa_sched_switch_handler - Tracks the current thread running on the CPU * * Handles the sched:sched_switch event to trace the current thread running on the * CPU. It is used to display the threads running on the other CPUs when the trace * stops. */ static int timerlat_aa_sched_switch_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu); const char *comm; int val; tep_get_field_val(s, event, "next_pid", record, &taa_data->current_pid, 1); comm = tep_get_field_raw(s, event, "next_comm", record, &val, 1); strncpy(taa_data->current_comm, comm, MAX_COMM); /* * If this was a kworker, clean the last kworkers that ran. */ taa_data->kworker = 0; taa_data->kworker_func = 0; return 0; } /* * timerlat_aa_kworker_start_handler - Tracks a kworker running on the CPU * * Handles workqueue:workqueue_execute_start event, keeping track of * the job that a kworker could be doing in the CPU. * * We already catch problems of hardware related latencies caused by work queues * running driver code that causes hardware stall. For example, with DRM drivers. */ static int timerlat_aa_kworker_start_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); struct timerlat_aa_data *taa_data = timerlat_aa_get_data(taa_ctx, record->cpu); tep_get_field_val(s, event, "work", record, &taa_data->kworker, 1); tep_get_field_val(s, event, "function", record, &taa_data->kworker_func, 1); return 0; } /* * timerlat_thread_analysis - Prints the analysis of a CPU that hit a stop tracing * * This is the core of the analysis. */ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu, int irq_thresh, int thread_thresh) { unsigned long long exp_irq_ts; int total; int irq; /* * IRQ latency or Thread latency? */ if (taa_data->tlat_irq_seqnum > taa_data->tlat_thread_seqnum) { irq = 1; total = taa_data->tlat_irq_latency; } else { irq = 0; total = taa_data->tlat_thread_latency; } /* * Expected IRQ arrival time using the trace clock as the base. */ exp_irq_ts = taa_data->timer_irq_start_time - taa_data->timer_irq_start_delay; if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) printf(" Previous IRQ interference: \t\t up to %9.2f us\n", ns_to_usf(taa_data->prev_irq_duration)); /* * The delay that the IRQ suffered before starting. */ printf(" IRQ handler delay: %16s %9.2f us (%.2f %%)\n", (ns_to_usf(taa_data->timer_exit_from_idle) > 10) ? "(exit from idle)" : "", ns_to_usf(taa_data->timer_irq_start_delay), ns_to_per(total, taa_data->timer_irq_start_delay)); /* * Timerlat IRQ. */ printf(" IRQ latency: \t\t\t\t %9.2f us\n", ns_to_usf(taa_data->tlat_irq_latency)); if (irq) { /* * If the trace stopped due to IRQ, the other events will not happen * because... the trace stopped :-). * * That is all folks, the stack trace was printed before the stop, * so it will be displayed, it is the key. */ printf(" Blocking thread:\n"); printf(" %24s:%-9llu\n", taa_data->run_thread_comm, taa_data->run_thread_pid); } else { /* * The duration of the IRQ handler that handled the timerlat IRQ. */ printf(" Timerlat IRQ duration: \t\t %9.2f us (%.2f %%)\n", ns_to_usf(taa_data->timer_irq_duration), ns_to_per(total, taa_data->timer_irq_duration)); /* * The amount of time that the current thread postponed the scheduler. * * Recalling that it is net from NMI/IRQ/Softirq interference, so there * is no need to compute values here. */ printf(" Blocking thread: \t\t\t %9.2f us (%.2f %%)\n", ns_to_usf(taa_data->thread_blocking_duration), ns_to_per(total, taa_data->thread_blocking_duration)); printf(" %24s:%-9llu %9.2f us\n", taa_data->run_thread_comm, taa_data->run_thread_pid, ns_to_usf(taa_data->thread_blocking_duration)); } /* * Print the stack trace! */ trace_seq_do_printf(taa_data->stack_seq); /* * NMIs can happen during the IRQ, so they are always possible. */ if (taa_data->thread_nmi_sum) printf(" NMI interference \t\t\t %9.2f us (%.2f %%)\n", ns_to_usf(taa_data->thread_nmi_sum), ns_to_per(total, taa_data->thread_nmi_sum)); /* * If it is an IRQ latency, the other factors can be skipped. */ if (irq) goto print_total; /* * Prints the interference caused by IRQs to the thread latency. */ if (taa_data->thread_irq_sum) { printf(" IRQ interference \t\t\t %9.2f us (%.2f %%)\n", ns_to_usf(taa_data->thread_irq_sum), ns_to_per(total, taa_data->thread_irq_sum)); trace_seq_do_printf(taa_data->irqs_seq); } /* * Prints the interference caused by Softirqs to the thread latency. */ if (taa_data->thread_softirq_sum) { printf(" Softirq interference \t\t\t %9.2f us (%.2f %%)\n", ns_to_usf(taa_data->thread_softirq_sum), ns_to_per(total, taa_data->thread_softirq_sum)); trace_seq_do_printf(taa_data->softirqs_seq); } /* * Prints the interference caused by other threads to the thread latency. * * If this happens, your timerlat is not the highest prio. OK, migration * thread can happen. But otherwise, you are not measuring the "scheduling * latency" only, and here is the difference from scheduling latency and * timer handling latency. */ if (taa_data->thread_thread_sum) { printf(" Thread interference \t\t\t %9.2f us (%.2f %%)\n", ns_to_usf(taa_data->thread_thread_sum), ns_to_per(total, taa_data->thread_thread_sum)); trace_seq_do_printf(taa_data->threads_seq); } /* * Done. */ print_total: printf("------------------------------------------------------------------------\n"); printf(" %s latency: \t\t\t %9.2f us (100%%)\n", irq ? "IRQ" : "Thread", ns_to_usf(total)); } static int timerlat_auto_analysis_collect_trace(struct timerlat_aa_context *taa_ctx) { struct trace_instance *trace = &taa_ctx->tool->trace; int retval; retval = tracefs_iterate_raw_events(trace->tep, trace->inst, NULL, 0, collect_registered_events, trace); if (retval < 0) { err_msg("Error iterating on events\n"); return 0; } return 1; } /** * timerlat_auto_analysis - Analyze the collected data */ void timerlat_auto_analysis(int irq_thresh, int thread_thresh) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); unsigned long long max_exit_from_idle = 0; struct timerlat_aa_data *taa_data; int max_exit_from_idle_cpu; struct tep_handle *tep; int cpu; timerlat_auto_analysis_collect_trace(taa_ctx); /* bring stop tracing to the ns scale */ irq_thresh = irq_thresh * 1000; thread_thresh = thread_thresh * 1000; for (cpu = 0; cpu < taa_ctx->nr_cpus; cpu++) { taa_data = timerlat_aa_get_data(taa_ctx, cpu); if (irq_thresh && taa_data->tlat_irq_latency >= irq_thresh) { printf("## CPU %d hit stop tracing, analyzing it ##\n", cpu); timerlat_thread_analysis(taa_data, cpu, irq_thresh, thread_thresh); } else if (thread_thresh && (taa_data->tlat_thread_latency) >= thread_thresh) { printf("## CPU %d hit stop tracing, analyzing it ##\n", cpu); timerlat_thread_analysis(taa_data, cpu, irq_thresh, thread_thresh); } if (taa_data->max_exit_idle_latency > max_exit_from_idle) { max_exit_from_idle = taa_data->max_exit_idle_latency; max_exit_from_idle_cpu = cpu; } } if (max_exit_from_idle) { printf("\n"); printf("Max timerlat IRQ latency from idle: %.2f us in cpu %d\n", ns_to_usf(max_exit_from_idle), max_exit_from_idle_cpu); } if (!taa_ctx->dump_tasks) return; printf("\n"); printf("Printing CPU tasks:\n"); for (cpu = 0; cpu < taa_ctx->nr_cpus; cpu++) { taa_data = timerlat_aa_get_data(taa_ctx, cpu); tep = taa_ctx->tool->trace.tep; printf(" [%.3d] %24s:%llu", cpu, taa_data->current_comm, taa_data->current_pid); if (taa_data->kworker_func) printf(" kworker:%s:%s", tep_find_function(tep, taa_data->kworker) ? : "<...>", tep_find_function(tep, taa_data->kworker_func)); printf("\n"); } } /* * timerlat_aa_destroy_seqs - Destroy seq files used to store parsed data */ static void timerlat_aa_destroy_seqs(struct timerlat_aa_context *taa_ctx) { struct timerlat_aa_data *taa_data; int i; if (!taa_ctx->taa_data) return; for (i = 0; i < taa_ctx->nr_cpus; i++) { taa_data = timerlat_aa_get_data(taa_ctx, i); if (taa_data->prev_irqs_seq) { trace_seq_destroy(taa_data->prev_irqs_seq); free(taa_data->prev_irqs_seq); } if (taa_data->nmi_seq) { trace_seq_destroy(taa_data->nmi_seq); free(taa_data->nmi_seq); } if (taa_data->irqs_seq) { trace_seq_destroy(taa_data->irqs_seq); free(taa_data->irqs_seq); } if (taa_data->softirqs_seq) { trace_seq_destroy(taa_data->softirqs_seq); free(taa_data->softirqs_seq); } if (taa_data->threads_seq) { trace_seq_destroy(taa_data->threads_seq); free(taa_data->threads_seq); } if (taa_data->stack_seq) { trace_seq_destroy(taa_data->stack_seq); free(taa_data->stack_seq); } } } /* * timerlat_aa_init_seqs - Init seq files used to store parsed information * * Instead of keeping data structures to store raw data, use seq files to * store parsed data. * * Allocates and initialize seq files. * * Returns 0 on success, -1 otherwise. */ static int timerlat_aa_init_seqs(struct timerlat_aa_context *taa_ctx) { struct timerlat_aa_data *taa_data; int i; for (i = 0; i < taa_ctx->nr_cpus; i++) { taa_data = timerlat_aa_get_data(taa_ctx, i); taa_data->prev_irqs_seq = calloc(1, sizeof(*taa_data->prev_irqs_seq)); if (!taa_data->prev_irqs_seq) goto out_err; trace_seq_init(taa_data->prev_irqs_seq); taa_data->nmi_seq = calloc(1, sizeof(*taa_data->nmi_seq)); if (!taa_data->nmi_seq) goto out_err; trace_seq_init(taa_data->nmi_seq); taa_data->irqs_seq = calloc(1, sizeof(*taa_data->irqs_seq)); if (!taa_data->irqs_seq) goto out_err; trace_seq_init(taa_data->irqs_seq); taa_data->softirqs_seq = calloc(1, sizeof(*taa_data->softirqs_seq)); if (!taa_data->softirqs_seq) goto out_err; trace_seq_init(taa_data->softirqs_seq); taa_data->threads_seq = calloc(1, sizeof(*taa_data->threads_seq)); if (!taa_data->threads_seq) goto out_err; trace_seq_init(taa_data->threads_seq); taa_data->stack_seq = calloc(1, sizeof(*taa_data->stack_seq)); if (!taa_data->stack_seq) goto out_err; trace_seq_init(taa_data->stack_seq); } return 0; out_err: timerlat_aa_destroy_seqs(taa_ctx); return -1; } /* * timerlat_aa_unregister_events - Unregister events used in the auto-analysis */ static void timerlat_aa_unregister_events(struct osnoise_tool *tool, int dump_tasks) { tep_unregister_event_handler(tool->trace.tep, -1, "ftrace", "timerlat", timerlat_aa_handler, tool); tracefs_event_disable(tool->trace.inst, "osnoise", NULL); tep_unregister_event_handler(tool->trace.tep, -1, "osnoise", "nmi_noise", timerlat_aa_nmi_handler, tool); tep_unregister_event_handler(tool->trace.tep, -1, "osnoise", "irq_noise", timerlat_aa_irq_handler, tool); tep_unregister_event_handler(tool->trace.tep, -1, "osnoise", "softirq_noise", timerlat_aa_softirq_handler, tool); tep_unregister_event_handler(tool->trace.tep, -1, "osnoise", "thread_noise", timerlat_aa_thread_handler, tool); tep_unregister_event_handler(tool->trace.tep, -1, "ftrace", "kernel_stack", timerlat_aa_stack_handler, tool); if (!dump_tasks) return; tracefs_event_disable(tool->trace.inst, "sched", "sched_switch"); tep_unregister_event_handler(tool->trace.tep, -1, "sched", "sched_switch", timerlat_aa_sched_switch_handler, tool); tracefs_event_disable(tool->trace.inst, "workqueue", "workqueue_execute_start"); tep_unregister_event_handler(tool->trace.tep, -1, "workqueue", "workqueue_execute_start", timerlat_aa_kworker_start_handler, tool); } /* * timerlat_aa_register_events - Register events used in the auto-analysis * * Returns 0 on success, -1 otherwise. */ static int timerlat_aa_register_events(struct osnoise_tool *tool, int dump_tasks) { int retval; tep_register_event_handler(tool->trace.tep, -1, "ftrace", "timerlat", timerlat_aa_handler, tool); /* * register auto-analysis handlers. */ retval = tracefs_event_enable(tool->trace.inst, "osnoise", NULL); if (retval < 0 && !errno) { err_msg("Could not find osnoise events\n"); goto out_err; } tep_register_event_handler(tool->trace.tep, -1, "osnoise", "nmi_noise", timerlat_aa_nmi_handler, tool); tep_register_event_handler(tool->trace.tep, -1, "osnoise", "irq_noise", timerlat_aa_irq_handler, tool); tep_register_event_handler(tool->trace.tep, -1, "osnoise", "softirq_noise", timerlat_aa_softirq_handler, tool); tep_register_event_handler(tool->trace.tep, -1, "osnoise", "thread_noise", timerlat_aa_thread_handler, tool); tep_register_event_handler(tool->trace.tep, -1, "ftrace", "kernel_stack", timerlat_aa_stack_handler, tool); if (!dump_tasks) return 0; /* * Dump task events. */ retval = tracefs_event_enable(tool->trace.inst, "sched", "sched_switch"); if (retval < 0 && !errno) { err_msg("Could not find sched_switch\n"); goto out_err; } tep_register_event_handler(tool->trace.tep, -1, "sched", "sched_switch", timerlat_aa_sched_switch_handler, tool); retval = tracefs_event_enable(tool->trace.inst, "workqueue", "workqueue_execute_start"); if (retval < 0 && !errno) { err_msg("Could not find workqueue_execute_start\n"); goto out_err; } tep_register_event_handler(tool->trace.tep, -1, "workqueue", "workqueue_execute_start", timerlat_aa_kworker_start_handler, tool); return 0; out_err: timerlat_aa_unregister_events(tool, dump_tasks); return -1; } /** * timerlat_aa_destroy - Destroy timerlat auto-analysis */ void timerlat_aa_destroy(void) { struct timerlat_aa_context *taa_ctx = timerlat_aa_get_ctx(); if (!taa_ctx) return; if (!taa_ctx->taa_data) goto out_ctx; timerlat_aa_unregister_events(taa_ctx->tool, taa_ctx->dump_tasks); timerlat_aa_destroy_seqs(taa_ctx); free(taa_ctx->taa_data); out_ctx: free(taa_ctx); } /** * timerlat_aa_init - Initialize timerlat auto-analysis * * Returns 0 on success, -1 otherwise. */ int timerlat_aa_init(struct osnoise_tool *tool, int dump_tasks) { int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); struct timerlat_aa_context *taa_ctx; int retval; taa_ctx = calloc(1, sizeof(*taa_ctx)); if (!taa_ctx) return -1; __timerlat_aa_ctx = taa_ctx; taa_ctx->nr_cpus = nr_cpus; taa_ctx->tool = tool; taa_ctx->dump_tasks = dump_tasks; taa_ctx->taa_data = calloc(nr_cpus, sizeof(*taa_ctx->taa_data)); if (!taa_ctx->taa_data) goto out_err; retval = timerlat_aa_init_seqs(taa_ctx); if (retval) goto out_err; retval = timerlat_aa_register_events(tool, dump_tasks); if (retval) goto out_err; return 0; out_err: timerlat_aa_destroy(); return -1; }
linux-master
tools/tracing/rtla/src/timerlat_aa.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #include <sys/types.h> #include <sys/stat.h> #include <pthread.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <stdio.h> #include "osnoise.h" #include "utils.h" /* * osnoise_get_cpus - return the original "osnoise/cpus" content * * It also saves the value to be restored. */ char *osnoise_get_cpus(struct osnoise_context *context) { if (context->curr_cpus) return context->curr_cpus; if (context->orig_cpus) return context->orig_cpus; context->orig_cpus = tracefs_instance_file_read(NULL, "osnoise/cpus", NULL); /* * The error value (NULL) is the same for tracefs_instance_file_read() * and this functions, so: */ return context->orig_cpus; } /* * osnoise_set_cpus - configure osnoise to run on *cpus * * "osnoise/cpus" file is used to set the cpus in which osnoise/timerlat * will run. This function opens this file, saves the current value, * and set the cpus passed as argument. */ int osnoise_set_cpus(struct osnoise_context *context, char *cpus) { char *orig_cpus = osnoise_get_cpus(context); char buffer[1024]; int retval; if (!orig_cpus) return -1; context->curr_cpus = strdup(cpus); if (!context->curr_cpus) return -1; snprintf(buffer, 1024, "%s\n", cpus); debug_msg("setting cpus to %s from %s", cpus, context->orig_cpus); retval = tracefs_instance_file_write(NULL, "osnoise/cpus", buffer); if (retval < 0) { free(context->curr_cpus); context->curr_cpus = NULL; return -1; } return 0; } /* * osnoise_restore_cpus - restore the original "osnoise/cpus" * * osnoise_set_cpus() saves the original data for the "osnoise/cpus" * file. This function restore the original config it was previously * modified. */ void osnoise_restore_cpus(struct osnoise_context *context) { int retval; if (!context->orig_cpus) return; if (!context->curr_cpus) return; /* nothing to do? */ if (!strcmp(context->orig_cpus, context->curr_cpus)) goto out_done; debug_msg("restoring cpus to %s", context->orig_cpus); retval = tracefs_instance_file_write(NULL, "osnoise/cpus", context->orig_cpus); if (retval < 0) err_msg("could not restore original osnoise cpus\n"); out_done: free(context->curr_cpus); context->curr_cpus = NULL; } /* * osnoise_put_cpus - restore cpus config and cleanup data */ void osnoise_put_cpus(struct osnoise_context *context) { osnoise_restore_cpus(context); if (!context->orig_cpus) return; free(context->orig_cpus); context->orig_cpus = NULL; } /* * osnoise_read_ll_config - read a long long value from a config * * returns -1 on error. */ static long long osnoise_read_ll_config(char *rel_path) { long long retval; char *buffer; buffer = tracefs_instance_file_read(NULL, rel_path, NULL); if (!buffer) return -1; /* get_llong_from_str returns -1 on error */ retval = get_llong_from_str(buffer); debug_msg("reading %s returned %lld\n", rel_path, retval); free(buffer); return retval; } /* * osnoise_write_ll_config - write a long long value to a config in rel_path * * returns -1 on error. */ static long long osnoise_write_ll_config(char *rel_path, long long value) { char buffer[BUFF_U64_STR_SIZE]; long long retval; snprintf(buffer, sizeof(buffer), "%lld\n", value); debug_msg("setting %s to %lld\n", rel_path, value); retval = tracefs_instance_file_write(NULL, rel_path, buffer); return retval; } /* * osnoise_get_runtime - return the original "osnoise/runtime_us" value * * It also saves the value to be restored. */ unsigned long long osnoise_get_runtime(struct osnoise_context *context) { long long runtime_us; if (context->runtime_us != OSNOISE_TIME_INIT_VAL) return context->runtime_us; if (context->orig_runtime_us != OSNOISE_TIME_INIT_VAL) return context->orig_runtime_us; runtime_us = osnoise_read_ll_config("osnoise/runtime_us"); if (runtime_us < 0) goto out_err; context->orig_runtime_us = runtime_us; return runtime_us; out_err: return OSNOISE_TIME_INIT_VAL; } /* * osnoise_get_period - return the original "osnoise/period_us" value * * It also saves the value to be restored. */ unsigned long long osnoise_get_period(struct osnoise_context *context) { long long period_us; if (context->period_us != OSNOISE_TIME_INIT_VAL) return context->period_us; if (context->orig_period_us != OSNOISE_TIME_INIT_VAL) return context->orig_period_us; period_us = osnoise_read_ll_config("osnoise/period_us"); if (period_us < 0) goto out_err; context->orig_period_us = period_us; return period_us; out_err: return OSNOISE_TIME_INIT_VAL; } static int __osnoise_write_runtime(struct osnoise_context *context, unsigned long long runtime) { int retval; if (context->orig_runtime_us == OSNOISE_TIME_INIT_VAL) return -1; retval = osnoise_write_ll_config("osnoise/runtime_us", runtime); if (retval < 0) return -1; context->runtime_us = runtime; return 0; } static int __osnoise_write_period(struct osnoise_context *context, unsigned long long period) { int retval; if (context->orig_period_us == OSNOISE_TIME_INIT_VAL) return -1; retval = osnoise_write_ll_config("osnoise/period_us", period); if (retval < 0) return -1; context->period_us = period; return 0; } /* * osnoise_set_runtime_period - set osnoise runtime and period * * Osnoise's runtime and period are related as runtime <= period. * Thus, this function saves the original values, and then tries * to set the runtime and period if they are != 0. */ int osnoise_set_runtime_period(struct osnoise_context *context, unsigned long long runtime, unsigned long long period) { unsigned long long curr_runtime_us; unsigned long long curr_period_us; int retval; if (!period && !runtime) return 0; curr_runtime_us = osnoise_get_runtime(context); curr_period_us = osnoise_get_period(context); /* error getting any value? */ if (curr_period_us == OSNOISE_TIME_INIT_VAL || curr_runtime_us == OSNOISE_TIME_INIT_VAL) return -1; if (!period) { if (runtime > curr_period_us) return -1; return __osnoise_write_runtime(context, runtime); } else if (!runtime) { if (period < curr_runtime_us) return -1; return __osnoise_write_period(context, period); } if (runtime > curr_period_us) { retval = __osnoise_write_period(context, period); if (retval) return -1; retval = __osnoise_write_runtime(context, runtime); if (retval) return -1; } else { retval = __osnoise_write_runtime(context, runtime); if (retval) return -1; retval = __osnoise_write_period(context, period); if (retval) return -1; } return 0; } /* * osnoise_restore_runtime_period - restore the original runtime and period */ void osnoise_restore_runtime_period(struct osnoise_context *context) { unsigned long long orig_runtime = context->orig_runtime_us; unsigned long long orig_period = context->orig_period_us; unsigned long long curr_runtime = context->runtime_us; unsigned long long curr_period = context->period_us; int retval; if ((orig_runtime == OSNOISE_TIME_INIT_VAL) && (orig_period == OSNOISE_TIME_INIT_VAL)) return; if ((orig_period == curr_period) && (orig_runtime == curr_runtime)) goto out_done; retval = osnoise_set_runtime_period(context, orig_runtime, orig_period); if (retval) err_msg("Could not restore original osnoise runtime/period\n"); out_done: context->runtime_us = OSNOISE_TIME_INIT_VAL; context->period_us = OSNOISE_TIME_INIT_VAL; } /* * osnoise_put_runtime_period - restore original values and cleanup data */ void osnoise_put_runtime_period(struct osnoise_context *context) { osnoise_restore_runtime_period(context); if (context->orig_runtime_us != OSNOISE_TIME_INIT_VAL) context->orig_runtime_us = OSNOISE_TIME_INIT_VAL; if (context->orig_period_us != OSNOISE_TIME_INIT_VAL) context->orig_period_us = OSNOISE_TIME_INIT_VAL; } /* * osnoise_get_timerlat_period_us - read and save the original "timerlat_period_us" */ static long long osnoise_get_timerlat_period_us(struct osnoise_context *context) { long long timerlat_period_us; if (context->timerlat_period_us != OSNOISE_TIME_INIT_VAL) return context->timerlat_period_us; if (context->orig_timerlat_period_us != OSNOISE_TIME_INIT_VAL) return context->orig_timerlat_period_us; timerlat_period_us = osnoise_read_ll_config("osnoise/timerlat_period_us"); if (timerlat_period_us < 0) goto out_err; context->orig_timerlat_period_us = timerlat_period_us; return timerlat_period_us; out_err: return OSNOISE_TIME_INIT_VAL; } /* * osnoise_set_timerlat_period_us - set "timerlat_period_us" */ int osnoise_set_timerlat_period_us(struct osnoise_context *context, long long timerlat_period_us) { long long curr_timerlat_period_us = osnoise_get_timerlat_period_us(context); int retval; if (curr_timerlat_period_us == OSNOISE_TIME_INIT_VAL) return -1; retval = osnoise_write_ll_config("osnoise/timerlat_period_us", timerlat_period_us); if (retval < 0) return -1; context->timerlat_period_us = timerlat_period_us; return 0; } /* * osnoise_restore_timerlat_period_us - restore "timerlat_period_us" */ void osnoise_restore_timerlat_period_us(struct osnoise_context *context) { int retval; if (context->orig_timerlat_period_us == OSNOISE_TIME_INIT_VAL) return; if (context->orig_timerlat_period_us == context->timerlat_period_us) goto out_done; retval = osnoise_write_ll_config("osnoise/timerlat_period_us", context->orig_timerlat_period_us); if (retval < 0) err_msg("Could not restore original osnoise timerlat_period_us\n"); out_done: context->timerlat_period_us = OSNOISE_TIME_INIT_VAL; } /* * osnoise_put_timerlat_period_us - restore original values and cleanup data */ void osnoise_put_timerlat_period_us(struct osnoise_context *context) { osnoise_restore_timerlat_period_us(context); if (context->orig_timerlat_period_us == OSNOISE_TIME_INIT_VAL) return; context->orig_timerlat_period_us = OSNOISE_TIME_INIT_VAL; } /* * osnoise_get_stop_us - read and save the original "stop_tracing_us" */ static long long osnoise_get_stop_us(struct osnoise_context *context) { long long stop_us; if (context->stop_us != OSNOISE_OPTION_INIT_VAL) return context->stop_us; if (context->orig_stop_us != OSNOISE_OPTION_INIT_VAL) return context->orig_stop_us; stop_us = osnoise_read_ll_config("osnoise/stop_tracing_us"); if (stop_us < 0) goto out_err; context->orig_stop_us = stop_us; return stop_us; out_err: return OSNOISE_OPTION_INIT_VAL; } /* * osnoise_set_stop_us - set "stop_tracing_us" */ int osnoise_set_stop_us(struct osnoise_context *context, long long stop_us) { long long curr_stop_us = osnoise_get_stop_us(context); int retval; if (curr_stop_us == OSNOISE_OPTION_INIT_VAL) return -1; retval = osnoise_write_ll_config("osnoise/stop_tracing_us", stop_us); if (retval < 0) return -1; context->stop_us = stop_us; return 0; } /* * osnoise_restore_stop_us - restore the original "stop_tracing_us" */ void osnoise_restore_stop_us(struct osnoise_context *context) { int retval; if (context->orig_stop_us == OSNOISE_OPTION_INIT_VAL) return; if (context->orig_stop_us == context->stop_us) goto out_done; retval = osnoise_write_ll_config("osnoise/stop_tracing_us", context->orig_stop_us); if (retval < 0) err_msg("Could not restore original osnoise stop_us\n"); out_done: context->stop_us = OSNOISE_OPTION_INIT_VAL; } /* * osnoise_put_stop_us - restore original values and cleanup data */ void osnoise_put_stop_us(struct osnoise_context *context) { osnoise_restore_stop_us(context); if (context->orig_stop_us == OSNOISE_OPTION_INIT_VAL) return; context->orig_stop_us = OSNOISE_OPTION_INIT_VAL; } /* * osnoise_get_stop_total_us - read and save the original "stop_tracing_total_us" */ static long long osnoise_get_stop_total_us(struct osnoise_context *context) { long long stop_total_us; if (context->stop_total_us != OSNOISE_OPTION_INIT_VAL) return context->stop_total_us; if (context->orig_stop_total_us != OSNOISE_OPTION_INIT_VAL) return context->orig_stop_total_us; stop_total_us = osnoise_read_ll_config("osnoise/stop_tracing_total_us"); if (stop_total_us < 0) goto out_err; context->orig_stop_total_us = stop_total_us; return stop_total_us; out_err: return OSNOISE_OPTION_INIT_VAL; } /* * osnoise_set_stop_total_us - set "stop_tracing_total_us" */ int osnoise_set_stop_total_us(struct osnoise_context *context, long long stop_total_us) { long long curr_stop_total_us = osnoise_get_stop_total_us(context); int retval; if (curr_stop_total_us == OSNOISE_OPTION_INIT_VAL) return -1; retval = osnoise_write_ll_config("osnoise/stop_tracing_total_us", stop_total_us); if (retval < 0) return -1; context->stop_total_us = stop_total_us; return 0; } /* * osnoise_restore_stop_total_us - restore the original "stop_tracing_total_us" */ void osnoise_restore_stop_total_us(struct osnoise_context *context) { int retval; if (context->orig_stop_total_us == OSNOISE_OPTION_INIT_VAL) return; if (context->orig_stop_total_us == context->stop_total_us) goto out_done; retval = osnoise_write_ll_config("osnoise/stop_tracing_total_us", context->orig_stop_total_us); if (retval < 0) err_msg("Could not restore original osnoise stop_total_us\n"); out_done: context->stop_total_us = OSNOISE_OPTION_INIT_VAL; } /* * osnoise_put_stop_total_us - restore original values and cleanup data */ void osnoise_put_stop_total_us(struct osnoise_context *context) { osnoise_restore_stop_total_us(context); if (context->orig_stop_total_us == OSNOISE_OPTION_INIT_VAL) return; context->orig_stop_total_us = OSNOISE_OPTION_INIT_VAL; } /* * osnoise_get_print_stack - read and save the original "print_stack" */ static long long osnoise_get_print_stack(struct osnoise_context *context) { long long print_stack; if (context->print_stack != OSNOISE_OPTION_INIT_VAL) return context->print_stack; if (context->orig_print_stack != OSNOISE_OPTION_INIT_VAL) return context->orig_print_stack; print_stack = osnoise_read_ll_config("osnoise/print_stack"); if (print_stack < 0) goto out_err; context->orig_print_stack = print_stack; return print_stack; out_err: return OSNOISE_OPTION_INIT_VAL; } /* * osnoise_set_print_stack - set "print_stack" */ int osnoise_set_print_stack(struct osnoise_context *context, long long print_stack) { long long curr_print_stack = osnoise_get_print_stack(context); int retval; if (curr_print_stack == OSNOISE_OPTION_INIT_VAL) return -1; retval = osnoise_write_ll_config("osnoise/print_stack", print_stack); if (retval < 0) return -1; context->print_stack = print_stack; return 0; } /* * osnoise_restore_print_stack - restore the original "print_stack" */ void osnoise_restore_print_stack(struct osnoise_context *context) { int retval; if (context->orig_print_stack == OSNOISE_OPTION_INIT_VAL) return; if (context->orig_print_stack == context->print_stack) goto out_done; retval = osnoise_write_ll_config("osnoise/print_stack", context->orig_print_stack); if (retval < 0) err_msg("Could not restore original osnoise print_stack\n"); out_done: context->print_stack = OSNOISE_OPTION_INIT_VAL; } /* * osnoise_put_print_stack - restore original values and cleanup data */ void osnoise_put_print_stack(struct osnoise_context *context) { osnoise_restore_print_stack(context); if (context->orig_print_stack == OSNOISE_OPTION_INIT_VAL) return; context->orig_print_stack = OSNOISE_OPTION_INIT_VAL; } /* * osnoise_get_tracing_thresh - read and save the original "tracing_thresh" */ static long long osnoise_get_tracing_thresh(struct osnoise_context *context) { long long tracing_thresh; if (context->tracing_thresh != OSNOISE_OPTION_INIT_VAL) return context->tracing_thresh; if (context->orig_tracing_thresh != OSNOISE_OPTION_INIT_VAL) return context->orig_tracing_thresh; tracing_thresh = osnoise_read_ll_config("tracing_thresh"); if (tracing_thresh < 0) goto out_err; context->orig_tracing_thresh = tracing_thresh; return tracing_thresh; out_err: return OSNOISE_OPTION_INIT_VAL; } /* * osnoise_set_tracing_thresh - set "tracing_thresh" */ int osnoise_set_tracing_thresh(struct osnoise_context *context, long long tracing_thresh) { long long curr_tracing_thresh = osnoise_get_tracing_thresh(context); int retval; if (curr_tracing_thresh == OSNOISE_OPTION_INIT_VAL) return -1; retval = osnoise_write_ll_config("tracing_thresh", tracing_thresh); if (retval < 0) return -1; context->tracing_thresh = tracing_thresh; return 0; } /* * osnoise_restore_tracing_thresh - restore the original "tracing_thresh" */ void osnoise_restore_tracing_thresh(struct osnoise_context *context) { int retval; if (context->orig_tracing_thresh == OSNOISE_OPTION_INIT_VAL) return; if (context->orig_tracing_thresh == context->tracing_thresh) goto out_done; retval = osnoise_write_ll_config("tracing_thresh", context->orig_tracing_thresh); if (retval < 0) err_msg("Could not restore original tracing_thresh\n"); out_done: context->tracing_thresh = OSNOISE_OPTION_INIT_VAL; } /* * osnoise_put_tracing_thresh - restore original values and cleanup data */ void osnoise_put_tracing_thresh(struct osnoise_context *context) { osnoise_restore_tracing_thresh(context); if (context->orig_tracing_thresh == OSNOISE_OPTION_INIT_VAL) return; context->orig_tracing_thresh = OSNOISE_OPTION_INIT_VAL; } static int osnoise_options_get_option(char *option) { char *options = tracefs_instance_file_read(NULL, "osnoise/options", NULL); char no_option[128]; int retval = 0; char *opt; if (!options) return OSNOISE_OPTION_INIT_VAL; /* * Check first if the option is disabled. */ snprintf(no_option, sizeof(no_option), "NO_%s", option); opt = strstr(options, no_option); if (opt) goto out_free; /* * Now that it is not disabled, if the string is there, it is * enabled. If the string is not there, the option does not exist. */ opt = strstr(options, option); if (opt) retval = 1; else retval = OSNOISE_OPTION_INIT_VAL; out_free: free(options); return retval; } static int osnoise_options_set_option(char *option, bool onoff) { char no_option[128]; if (onoff) return tracefs_instance_file_write(NULL, "osnoise/options", option); snprintf(no_option, sizeof(no_option), "NO_%s", option); return tracefs_instance_file_write(NULL, "osnoise/options", no_option); } static int osnoise_get_irq_disable(struct osnoise_context *context) { if (context->opt_irq_disable != OSNOISE_OPTION_INIT_VAL) return context->opt_irq_disable; if (context->orig_opt_irq_disable != OSNOISE_OPTION_INIT_VAL) return context->orig_opt_irq_disable; context->orig_opt_irq_disable = osnoise_options_get_option("OSNOISE_IRQ_DISABLE"); return context->orig_opt_irq_disable; } int osnoise_set_irq_disable(struct osnoise_context *context, bool onoff) { int opt_irq_disable = osnoise_get_irq_disable(context); int retval; if (opt_irq_disable == OSNOISE_OPTION_INIT_VAL) return -1; if (opt_irq_disable == onoff) return 0; retval = osnoise_options_set_option("OSNOISE_IRQ_DISABLE", onoff); if (retval < 0) return -1; context->opt_irq_disable = onoff; return 0; } static void osnoise_restore_irq_disable(struct osnoise_context *context) { int retval; if (context->orig_opt_irq_disable == OSNOISE_OPTION_INIT_VAL) return; if (context->orig_opt_irq_disable == context->opt_irq_disable) goto out_done; retval = osnoise_options_set_option("OSNOISE_IRQ_DISABLE", context->orig_opt_irq_disable); if (retval < 0) err_msg("Could not restore original OSNOISE_IRQ_DISABLE option\n"); out_done: context->orig_opt_irq_disable = OSNOISE_OPTION_INIT_VAL; } static void osnoise_put_irq_disable(struct osnoise_context *context) { osnoise_restore_irq_disable(context); if (context->orig_opt_irq_disable == OSNOISE_OPTION_INIT_VAL) return; context->orig_opt_irq_disable = OSNOISE_OPTION_INIT_VAL; } static int osnoise_get_workload(struct osnoise_context *context) { if (context->opt_workload != OSNOISE_OPTION_INIT_VAL) return context->opt_workload; if (context->orig_opt_workload != OSNOISE_OPTION_INIT_VAL) return context->orig_opt_workload; context->orig_opt_workload = osnoise_options_get_option("OSNOISE_WORKLOAD"); return context->orig_opt_workload; } int osnoise_set_workload(struct osnoise_context *context, bool onoff) { int opt_workload = osnoise_get_workload(context); int retval; if (opt_workload == OSNOISE_OPTION_INIT_VAL) return -1; if (opt_workload == onoff) return 0; retval = osnoise_options_set_option("OSNOISE_WORKLOAD", onoff); if (retval < 0) return -1; context->opt_workload = onoff; return 0; } static void osnoise_restore_workload(struct osnoise_context *context) { int retval; if (context->orig_opt_workload == OSNOISE_OPTION_INIT_VAL) return; if (context->orig_opt_workload == context->opt_workload) goto out_done; retval = osnoise_options_set_option("OSNOISE_WORKLOAD", context->orig_opt_workload); if (retval < 0) err_msg("Could not restore original OSNOISE_WORKLOAD option\n"); out_done: context->orig_opt_workload = OSNOISE_OPTION_INIT_VAL; } static void osnoise_put_workload(struct osnoise_context *context) { osnoise_restore_workload(context); if (context->orig_opt_workload == OSNOISE_OPTION_INIT_VAL) return; context->orig_opt_workload = OSNOISE_OPTION_INIT_VAL; } /* * enable_osnoise - enable osnoise tracer in the trace_instance */ int enable_osnoise(struct trace_instance *trace) { return enable_tracer_by_name(trace->inst, "osnoise"); } /* * enable_timerlat - enable timerlat tracer in the trace_instance */ int enable_timerlat(struct trace_instance *trace) { return enable_tracer_by_name(trace->inst, "timerlat"); } enum { FLAG_CONTEXT_NEWLY_CREATED = (1 << 0), FLAG_CONTEXT_DELETED = (1 << 1), }; /* * osnoise_get_context - increase the usage of a context and return it */ int osnoise_get_context(struct osnoise_context *context) { int ret; if (context->flags & FLAG_CONTEXT_DELETED) { ret = -1; } else { context->ref++; ret = 0; } return ret; } /* * osnoise_context_alloc - alloc an osnoise_context * * The osnoise context contains the information of the "osnoise/" configs. * It is used to set and restore the config. */ struct osnoise_context *osnoise_context_alloc(void) { struct osnoise_context *context; context = calloc(1, sizeof(*context)); if (!context) return NULL; context->orig_stop_us = OSNOISE_OPTION_INIT_VAL; context->stop_us = OSNOISE_OPTION_INIT_VAL; context->orig_stop_total_us = OSNOISE_OPTION_INIT_VAL; context->stop_total_us = OSNOISE_OPTION_INIT_VAL; context->orig_print_stack = OSNOISE_OPTION_INIT_VAL; context->print_stack = OSNOISE_OPTION_INIT_VAL; context->orig_tracing_thresh = OSNOISE_OPTION_INIT_VAL; context->tracing_thresh = OSNOISE_OPTION_INIT_VAL; context->orig_opt_irq_disable = OSNOISE_OPTION_INIT_VAL; context->opt_irq_disable = OSNOISE_OPTION_INIT_VAL; context->orig_opt_workload = OSNOISE_OPTION_INIT_VAL; context->opt_workload = OSNOISE_OPTION_INIT_VAL; osnoise_get_context(context); return context; } /* * osnoise_put_context - put the osnoise_put_context * * If there is no other user for the context, the original data * is restored. */ void osnoise_put_context(struct osnoise_context *context) { if (--context->ref < 1) context->flags |= FLAG_CONTEXT_DELETED; if (!(context->flags & FLAG_CONTEXT_DELETED)) return; osnoise_put_cpus(context); osnoise_put_runtime_period(context); osnoise_put_stop_us(context); osnoise_put_stop_total_us(context); osnoise_put_timerlat_period_us(context); osnoise_put_print_stack(context); osnoise_put_tracing_thresh(context); osnoise_put_irq_disable(context); osnoise_put_workload(context); free(context); } /* * osnoise_destroy_tool - disable trace, restore configs and free data */ void osnoise_destroy_tool(struct osnoise_tool *top) { if (!top) return; trace_instance_destroy(&top->trace); if (top->context) osnoise_put_context(top->context); free(top); } /* * osnoise_init_tool - init an osnoise tool * * It allocs data, create a context to store data and * creates a new trace instance for the tool. */ struct osnoise_tool *osnoise_init_tool(char *tool_name) { struct osnoise_tool *top; int retval; top = calloc(1, sizeof(*top)); if (!top) return NULL; top->context = osnoise_context_alloc(); if (!top->context) goto out_err; retval = trace_instance_init(&top->trace, tool_name); if (retval) goto out_err; return top; out_err: osnoise_destroy_tool(top); return NULL; } /* * osnoise_init_trace_tool - init a tracer instance to trace osnoise events */ struct osnoise_tool *osnoise_init_trace_tool(char *tracer) { struct osnoise_tool *trace; int retval; trace = osnoise_init_tool("osnoise_trace"); if (!trace) return NULL; retval = tracefs_event_enable(trace->trace.inst, "osnoise", NULL); if (retval < 0 && !errno) { err_msg("Could not find osnoise events\n"); goto out_err; } retval = enable_tracer_by_name(trace->trace.inst, tracer); if (retval) { err_msg("Could not enable %s tracer for tracing\n", tracer); goto out_err; } return trace; out_err: osnoise_destroy_tool(trace); return NULL; } static void osnoise_usage(int err) { int i; static const char *msg[] = { "", "osnoise version " VERSION, "", " usage: [rtla] osnoise [MODE] ...", "", " modes:", " top - prints the summary from osnoise tracer", " hist - prints a histogram of osnoise samples", "", "if no MODE is given, the top mode is called, passing the arguments", NULL, }; for (i = 0; msg[i]; i++) fprintf(stderr, "%s\n", msg[i]); exit(err); } int osnoise_main(int argc, char *argv[]) { if (argc == 0) goto usage; /* * if osnoise was called without any argument, run the * default cmdline. */ if (argc == 1) { osnoise_top_main(argc, argv); exit(0); } if ((strcmp(argv[1], "-h") == 0) || (strcmp(argv[1], "--help") == 0)) { osnoise_usage(0); } else if (strncmp(argv[1], "-", 1) == 0) { /* the user skipped the tool, call the default one */ osnoise_top_main(argc, argv); exit(0); } else if (strcmp(argv[1], "top") == 0) { osnoise_top_main(argc-1, &argv[1]); exit(0); } else if (strcmp(argv[1], "hist") == 0) { osnoise_hist_main(argc-1, &argv[1]); exit(0); } usage: osnoise_usage(1); exit(1); } int hwnoise_main(int argc, char *argv[]) { osnoise_top_main(argc, argv); exit(0); }
linux-master
tools/tracing/rtla/src/osnoise.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #define _GNU_SOURCE #include <dirent.h> #include <stdarg.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <sched.h> #include <stdio.h> #include "utils.h" #define MAX_MSG_LENGTH 1024 int config_debug; /* * err_msg - print an error message to the stderr */ void err_msg(const char *fmt, ...) { char message[MAX_MSG_LENGTH]; va_list ap; va_start(ap, fmt); vsnprintf(message, sizeof(message), fmt, ap); va_end(ap); fprintf(stderr, "%s", message); } /* * debug_msg - print a debug message to stderr if debug is set */ void debug_msg(const char *fmt, ...) { char message[MAX_MSG_LENGTH]; va_list ap; if (!config_debug) return; va_start(ap, fmt); vsnprintf(message, sizeof(message), fmt, ap); va_end(ap); fprintf(stderr, "%s", message); } /* * get_llong_from_str - get a long long int from a string */ long long get_llong_from_str(char *start) { long long value; char *end; errno = 0; value = strtoll(start, &end, 10); if (errno || start == end) return -1; return value; } /* * get_duration - fill output with a human readable duration since start_time */ void get_duration(time_t start_time, char *output, int output_size) { time_t now = time(NULL); struct tm *tm_info; time_t duration; duration = difftime(now, start_time); tm_info = gmtime(&duration); snprintf(output, output_size, "%3d %02d:%02d:%02d", tm_info->tm_yday, tm_info->tm_hour, tm_info->tm_min, tm_info->tm_sec); } /* * parse_cpu_set - parse a cpu_list filling cpu_set_t argument * * Receives a cpu list, like 1-3,5 (cpus 1, 2, 3, 5), and then set * filling cpu_set_t argument. * * Returns 1 on success, 0 otherwise. */ int parse_cpu_set(char *cpu_list, cpu_set_t *set) { const char *p; int end_cpu; int nr_cpus; int cpu; int i; CPU_ZERO(set); nr_cpus = sysconf(_SC_NPROCESSORS_CONF); for (p = cpu_list; *p; ) { cpu = atoi(p); if (cpu < 0 || (!cpu && *p != '0') || cpu >= nr_cpus) goto err; while (isdigit(*p)) p++; if (*p == '-') { p++; end_cpu = atoi(p); if (end_cpu < cpu || (!end_cpu && *p != '0') || end_cpu >= nr_cpus) goto err; while (isdigit(*p)) p++; } else end_cpu = cpu; if (cpu == end_cpu) { debug_msg("cpu_set: adding cpu %d\n", cpu); CPU_SET(cpu, set); } else { for (i = cpu; i <= end_cpu; i++) { debug_msg("cpu_set: adding cpu %d\n", i); CPU_SET(i, set); } } if (*p == ',') p++; } return 0; err: debug_msg("Error parsing the cpu set %s\n", cpu_list); return 1; } /* * parse_duration - parse duration with s/m/h/d suffix converting it to seconds */ long parse_seconds_duration(char *val) { char *end; long t; t = strtol(val, &end, 10); if (end) { switch (*end) { case 's': case 'S': break; case 'm': case 'M': t *= 60; break; case 'h': case 'H': t *= 60 * 60; break; case 'd': case 'D': t *= 24 * 60 * 60; break; } } return t; } /* * parse_ns_duration - parse duration with ns/us/ms/s converting it to nanoseconds */ long parse_ns_duration(char *val) { char *end; long t; t = strtol(val, &end, 10); if (end) { if (!strncmp(end, "ns", 2)) { return t; } else if (!strncmp(end, "us", 2)) { t *= 1000; return t; } else if (!strncmp(end, "ms", 2)) { t *= 1000 * 1000; return t; } else if (!strncmp(end, "s", 1)) { t *= 1000 * 1000 * 1000; return t; } return -1; } return t; } /* * This is a set of helper functions to use SCHED_DEADLINE. */ #ifdef __x86_64__ # define __NR_sched_setattr 314 # define __NR_sched_getattr 315 #elif __i386__ # define __NR_sched_setattr 351 # define __NR_sched_getattr 352 #elif __arm__ # define __NR_sched_setattr 380 # define __NR_sched_getattr 381 #elif __aarch64__ || __riscv # define __NR_sched_setattr 274 # define __NR_sched_getattr 275 #elif __powerpc__ # define __NR_sched_setattr 355 # define __NR_sched_getattr 356 #elif __s390x__ # define __NR_sched_setattr 345 # define __NR_sched_getattr 346 #endif #define SCHED_DEADLINE 6 static inline int sched_setattr(pid_t pid, const struct sched_attr *attr, unsigned int flags) { return syscall(__NR_sched_setattr, pid, attr, flags); } static inline int sched_getattr(pid_t pid, struct sched_attr *attr, unsigned int size, unsigned int flags) { return syscall(__NR_sched_getattr, pid, attr, size, flags); } int __set_sched_attr(int pid, struct sched_attr *attr) { int flags = 0; int retval; retval = sched_setattr(pid, attr, flags); if (retval < 0) { err_msg("Failed to set sched attributes to the pid %d: %s\n", pid, strerror(errno)); return 1; } return 0; } /* * procfs_is_workload_pid - check if a procfs entry contains a comm_prefix* comm * * Check if the procfs entry is a directory of a process, and then check if the * process has a comm with the prefix set in char *comm_prefix. As the * current users of this function only check for kernel threads, there is no * need to check for the threads for the process. * * Return: True if the proc_entry contains a comm file with comm_prefix*. * Otherwise returns false. */ static int procfs_is_workload_pid(const char *comm_prefix, struct dirent *proc_entry) { char buffer[MAX_PATH]; int comm_fd, retval; char *t_name; if (proc_entry->d_type != DT_DIR) return 0; if (*proc_entry->d_name == '.') return 0; /* check if the string is a pid */ for (t_name = proc_entry->d_name; t_name; t_name++) { if (!isdigit(*t_name)) break; } if (*t_name != '\0') return 0; snprintf(buffer, MAX_PATH, "/proc/%s/comm", proc_entry->d_name); comm_fd = open(buffer, O_RDONLY); if (comm_fd < 0) return 0; memset(buffer, 0, MAX_PATH); retval = read(comm_fd, buffer, MAX_PATH); close(comm_fd); if (retval <= 0) return 0; retval = strncmp(comm_prefix, buffer, strlen(comm_prefix)); if (retval) return 0; /* comm already have \n */ debug_msg("Found workload pid:%s comm:%s", proc_entry->d_name, buffer); return 1; } /* * set_comm_sched_attr - set sched params to threads starting with char *comm_prefix * * This function uses procfs to list the currently running threads and then set the * sched_attr *attr to the threads that start with char *comm_prefix. It is * mainly used to set the priority to the kernel threads created by the * tracers. */ int set_comm_sched_attr(const char *comm_prefix, struct sched_attr *attr) { struct dirent *proc_entry; DIR *procfs; int retval; if (strlen(comm_prefix) >= MAX_PATH) { err_msg("Command prefix is too long: %d < strlen(%s)\n", MAX_PATH, comm_prefix); return 1; } procfs = opendir("/proc"); if (!procfs) { err_msg("Could not open procfs\n"); return 1; } while ((proc_entry = readdir(procfs))) { retval = procfs_is_workload_pid(comm_prefix, proc_entry); if (!retval) continue; /* procfs_is_workload_pid confirmed it is a pid */ retval = __set_sched_attr(atoi(proc_entry->d_name), attr); if (retval) { err_msg("Error setting sched attributes for pid:%s\n", proc_entry->d_name); goto out_err; } debug_msg("Set sched attributes for pid:%s\n", proc_entry->d_name); } return 0; out_err: closedir(procfs); return 1; } #define INVALID_VAL (~0L) static long get_long_ns_after_colon(char *start) { long val = INVALID_VAL; /* find the ":" */ start = strstr(start, ":"); if (!start) return -1; /* skip ":" */ start++; val = parse_ns_duration(start); return val; } static long get_long_after_colon(char *start) { long val = INVALID_VAL; /* find the ":" */ start = strstr(start, ":"); if (!start) return -1; /* skip ":" */ start++; val = get_llong_from_str(start); return val; } /* * parse priority in the format: * SCHED_OTHER: * o:<prio> * O:<prio> * SCHED_RR: * r:<prio> * R:<prio> * SCHED_FIFO: * f:<prio> * F:<prio> * SCHED_DEADLINE: * d:runtime:period * D:runtime:period */ int parse_prio(char *arg, struct sched_attr *sched_param) { long prio; long runtime; long period; memset(sched_param, 0, sizeof(*sched_param)); sched_param->size = sizeof(*sched_param); switch (arg[0]) { case 'd': case 'D': /* d:runtime:period */ if (strlen(arg) < 4) return -1; runtime = get_long_ns_after_colon(arg); if (runtime == INVALID_VAL) return -1; period = get_long_ns_after_colon(&arg[2]); if (period == INVALID_VAL) return -1; if (runtime > period) return -1; sched_param->sched_policy = SCHED_DEADLINE; sched_param->sched_runtime = runtime; sched_param->sched_deadline = period; sched_param->sched_period = period; break; case 'f': case 'F': /* f:prio */ prio = get_long_after_colon(arg); if (prio == INVALID_VAL) return -1; if (prio < sched_get_priority_min(SCHED_FIFO)) return -1; if (prio > sched_get_priority_max(SCHED_FIFO)) return -1; sched_param->sched_policy = SCHED_FIFO; sched_param->sched_priority = prio; break; case 'r': case 'R': /* r:prio */ prio = get_long_after_colon(arg); if (prio == INVALID_VAL) return -1; if (prio < sched_get_priority_min(SCHED_RR)) return -1; if (prio > sched_get_priority_max(SCHED_RR)) return -1; sched_param->sched_policy = SCHED_RR; sched_param->sched_priority = prio; break; case 'o': case 'O': /* o:prio */ prio = get_long_after_colon(arg); if (prio == INVALID_VAL) return -1; if (prio < sched_get_priority_min(SCHED_OTHER)) return -1; if (prio > sched_get_priority_max(SCHED_OTHER)) return -1; sched_param->sched_policy = SCHED_OTHER; sched_param->sched_priority = prio; break; default: return -1; } return 0; } /* * set_cpu_dma_latency - set the /dev/cpu_dma_latecy * * This is used to reduce the exit from idle latency. The value * will be reset once the file descriptor of /dev/cpu_dma_latecy * is closed. * * Return: the /dev/cpu_dma_latecy file descriptor */ int set_cpu_dma_latency(int32_t latency) { int retval; int fd; fd = open("/dev/cpu_dma_latency", O_RDWR); if (fd < 0) { err_msg("Error opening /dev/cpu_dma_latency\n"); return -1; } retval = write(fd, &latency, 4); if (retval < 1) { err_msg("Error setting /dev/cpu_dma_latency\n"); close(fd); return -1; } debug_msg("Set /dev/cpu_dma_latency to %d\n", latency); return fd; } #define _STR(x) #x #define STR(x) _STR(x) /* * find_mount - find a the mount point of a given fs * * Returns 0 if mount is not found, otherwise return 1 and fill mp * with the mount point. */ static const int find_mount(const char *fs, char *mp, int sizeof_mp) { char mount_point[MAX_PATH]; char type[100]; int found; FILE *fp; fp = fopen("/proc/mounts", "r"); if (!fp) return 0; while (fscanf(fp, "%*s %" STR(MAX_PATH) "s %99s %*s %*d %*d\n", mount_point, type) == 2) { if (strcmp(type, fs) == 0) { found = 1; break; } } fclose(fp); if (!found) return 0; memset(mp, 0, sizeof_mp); strncpy(mp, mount_point, sizeof_mp - 1); debug_msg("Fs %s found at %s\n", fs, mp); return 1; } /* * get_self_cgroup - get the current thread cgroup path * * Parse /proc/$$/cgroup file to get the thread's cgroup. As an example of line to parse: * * 0::/user.slice/user-0.slice/session-3.scope'\n' * * This function is interested in the content after the second : and before the '\n'. * * Returns 1 if a string was found, 0 otherwise. */ static int get_self_cgroup(char *self_cg, int sizeof_self_cg) { char path[MAX_PATH], *start; int fd, retval; snprintf(path, MAX_PATH, "/proc/%d/cgroup", getpid()); fd = open(path, O_RDONLY); if (fd < 0) return 0; retval = read(fd, path, MAX_PATH); close(fd); if (retval <= 0) return 0; start = path; start = strstr(start, ":"); if (!start) return 0; /* skip ":" */ start++; start = strstr(start, ":"); if (!start) return 0; /* skip ":" */ start++; if (strlen(start) >= sizeof_self_cg) return 0; snprintf(self_cg, sizeof_self_cg, "%s", start); /* Swap '\n' with '\0' */ start = strstr(self_cg, "\n"); /* there must be '\n' */ if (!start) return 0; /* ok, it found a string after the second : and before the \n */ *start = '\0'; return 1; } /* * set_comm_cgroup - Set cgroup to pid_t pid * * If cgroup argument is not NULL, the threads will move to the given cgroup. * Otherwise, the cgroup of the calling, i.e., rtla, thread will be used. * * Supports cgroup v2. * * Returns 1 on success, 0 otherwise. */ int set_pid_cgroup(pid_t pid, const char *cgroup) { char cgroup_path[MAX_PATH - strlen("/cgroup.procs")]; char cgroup_procs[MAX_PATH]; char pid_str[24]; int retval; int cg_fd; retval = find_mount("cgroup2", cgroup_path, sizeof(cgroup_path)); if (!retval) { err_msg("Did not find cgroupv2 mount point\n"); return 0; } if (!cgroup) { retval = get_self_cgroup(&cgroup_path[strlen(cgroup_path)], sizeof(cgroup_path) - strlen(cgroup_path)); if (!retval) { err_msg("Did not find self cgroup\n"); return 0; } } else { snprintf(&cgroup_path[strlen(cgroup_path)], sizeof(cgroup_path) - strlen(cgroup_path), "%s/", cgroup); } snprintf(cgroup_procs, MAX_PATH, "%s/cgroup.procs", cgroup_path); debug_msg("Using cgroup path at: %s\n", cgroup_procs); cg_fd = open(cgroup_procs, O_RDWR); if (cg_fd < 0) return 0; snprintf(pid_str, sizeof(pid_str), "%d\n", pid); retval = write(cg_fd, pid_str, strlen(pid_str)); if (retval < 0) err_msg("Error setting cgroup attributes for pid:%s - %s\n", pid_str, strerror(errno)); else debug_msg("Set cgroup attributes for pid:%s\n", pid_str); close(cg_fd); return (retval >= 0); } /** * set_comm_cgroup - Set cgroup to threads starting with char *comm_prefix * * If cgroup argument is not NULL, the threads will move to the given cgroup. * Otherwise, the cgroup of the calling, i.e., rtla, thread will be used. * * Supports cgroup v2. * * Returns 1 on success, 0 otherwise. */ int set_comm_cgroup(const char *comm_prefix, const char *cgroup) { char cgroup_path[MAX_PATH - strlen("/cgroup.procs")]; char cgroup_procs[MAX_PATH]; struct dirent *proc_entry; DIR *procfs; int retval; int cg_fd; if (strlen(comm_prefix) >= MAX_PATH) { err_msg("Command prefix is too long: %d < strlen(%s)\n", MAX_PATH, comm_prefix); return 0; } retval = find_mount("cgroup2", cgroup_path, sizeof(cgroup_path)); if (!retval) { err_msg("Did not find cgroupv2 mount point\n"); return 0; } if (!cgroup) { retval = get_self_cgroup(&cgroup_path[strlen(cgroup_path)], sizeof(cgroup_path) - strlen(cgroup_path)); if (!retval) { err_msg("Did not find self cgroup\n"); return 0; } } else { snprintf(&cgroup_path[strlen(cgroup_path)], sizeof(cgroup_path) - strlen(cgroup_path), "%s/", cgroup); } snprintf(cgroup_procs, MAX_PATH, "%s/cgroup.procs", cgroup_path); debug_msg("Using cgroup path at: %s\n", cgroup_procs); cg_fd = open(cgroup_procs, O_RDWR); if (cg_fd < 0) return 0; procfs = opendir("/proc"); if (!procfs) { err_msg("Could not open procfs\n"); goto out_cg; } while ((proc_entry = readdir(procfs))) { retval = procfs_is_workload_pid(comm_prefix, proc_entry); if (!retval) continue; retval = write(cg_fd, proc_entry->d_name, strlen(proc_entry->d_name)); if (retval < 0) { err_msg("Error setting cgroup attributes for pid:%s - %s\n", proc_entry->d_name, strerror(errno)); goto out_procfs; } debug_msg("Set cgroup attributes for pid:%s\n", proc_entry->d_name); } closedir(procfs); close(cg_fd); return 1; out_procfs: closedir(procfs); out_cg: close(cg_fd); return 0; } /** * auto_house_keeping - Automatically move rtla out of measurement threads * * Try to move rtla away from the tracer, if possible. * * Returns 1 on success, 0 otherwise. */ int auto_house_keeping(cpu_set_t *monitored_cpus) { cpu_set_t rtla_cpus, house_keeping_cpus; int retval; /* first get the CPUs in which rtla can actually run. */ retval = sched_getaffinity(getpid(), sizeof(rtla_cpus), &rtla_cpus); if (retval == -1) { debug_msg("Could not get rtla affinity, rtla might run with the threads!\n"); return 0; } /* then check if the existing setup is already good. */ CPU_AND(&house_keeping_cpus, &rtla_cpus, monitored_cpus); if (!CPU_COUNT(&house_keeping_cpus)) { debug_msg("rtla and the monitored CPUs do not share CPUs."); debug_msg("Skipping auto house-keeping\n"); return 1; } /* remove the intersection */ CPU_XOR(&house_keeping_cpus, &rtla_cpus, monitored_cpus); /* get only those that rtla can run */ CPU_AND(&house_keeping_cpus, &house_keeping_cpus, &rtla_cpus); /* is there any cpu left? */ if (!CPU_COUNT(&house_keeping_cpus)) { debug_msg("Could not find any CPU for auto house-keeping\n"); return 0; } retval = sched_setaffinity(getpid(), sizeof(house_keeping_cpus), &house_keeping_cpus); if (retval == -1) { debug_msg("Could not set affinity for auto house-keeping\n"); return 0; } debug_msg("rtla automatically moved to an auto house-keeping cpu set\n"); return 1; }
linux-master
tools/tracing/rtla/src/utils.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #define _GNU_SOURCE #include <getopt.h> #include <stdlib.h> #include <string.h> #include <signal.h> #include <unistd.h> #include <stdio.h> #include <time.h> #include <errno.h> #include <sched.h> #include <pthread.h> #include "utils.h" #include "osnoise.h" #include "timerlat.h" #include "timerlat_aa.h" #include "timerlat_u.h" struct timerlat_top_params { char *cpus; cpu_set_t monitored_cpus; char *trace_output; char *cgroup_name; unsigned long long runtime; long long stop_us; long long stop_total_us; long long timerlat_period_us; long long print_stack; int sleep_time; int output_divisor; int duration; int quiet; int set_sched; int dma_latency; int no_aa; int aa_only; int dump_tasks; int cgroup; int hk_cpus; int user_top; cpu_set_t hk_cpu_set; struct sched_attr sched_param; struct trace_events *events; }; struct timerlat_top_cpu { int irq_count; int thread_count; int user_count; unsigned long long cur_irq; unsigned long long min_irq; unsigned long long sum_irq; unsigned long long max_irq; unsigned long long cur_thread; unsigned long long min_thread; unsigned long long sum_thread; unsigned long long max_thread; unsigned long long cur_user; unsigned long long min_user; unsigned long long sum_user; unsigned long long max_user; }; struct timerlat_top_data { struct timerlat_top_cpu *cpu_data; int nr_cpus; }; /* * timerlat_free_top - free runtime data */ static void timerlat_free_top(struct timerlat_top_data *data) { free(data->cpu_data); free(data); } /* * timerlat_alloc_histogram - alloc runtime data */ static struct timerlat_top_data *timerlat_alloc_top(int nr_cpus) { struct timerlat_top_data *data; int cpu; data = calloc(1, sizeof(*data)); if (!data) return NULL; data->nr_cpus = nr_cpus; /* one set of histograms per CPU */ data->cpu_data = calloc(1, sizeof(*data->cpu_data) * nr_cpus); if (!data->cpu_data) goto cleanup; /* set the min to max */ for (cpu = 0; cpu < nr_cpus; cpu++) { data->cpu_data[cpu].min_irq = ~0; data->cpu_data[cpu].min_thread = ~0; data->cpu_data[cpu].min_user = ~0; } return data; cleanup: timerlat_free_top(data); return NULL; } /* * timerlat_hist_update - record a new timerlat occurent on cpu, updating data */ static void timerlat_top_update(struct osnoise_tool *tool, int cpu, unsigned long long thread, unsigned long long latency) { struct timerlat_top_data *data = tool->data; struct timerlat_top_cpu *cpu_data = &data->cpu_data[cpu]; if (!thread) { cpu_data->irq_count++; cpu_data->cur_irq = latency; update_min(&cpu_data->min_irq, &latency); update_sum(&cpu_data->sum_irq, &latency); update_max(&cpu_data->max_irq, &latency); } else if (thread == 1) { cpu_data->thread_count++; cpu_data->cur_thread = latency; update_min(&cpu_data->min_thread, &latency); update_sum(&cpu_data->sum_thread, &latency); update_max(&cpu_data->max_thread, &latency); } else { cpu_data->user_count++; cpu_data->cur_user = latency; update_min(&cpu_data->min_user, &latency); update_sum(&cpu_data->sum_user, &latency); update_max(&cpu_data->max_user, &latency); } } /* * timerlat_top_handler - this is the handler for timerlat tracer events */ static int timerlat_top_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct trace_instance *trace = context; struct timerlat_top_params *params; unsigned long long latency, thread; struct osnoise_tool *top; int cpu = record->cpu; top = container_of(trace, struct osnoise_tool, trace); params = top->params; if (!params->aa_only) { tep_get_field_val(s, event, "context", record, &thread, 1); tep_get_field_val(s, event, "timer_latency", record, &latency, 1); timerlat_top_update(top, cpu, thread, latency); } return 0; } /* * timerlat_top_header - print the header of the tool output */ static void timerlat_top_header(struct osnoise_tool *top) { struct timerlat_top_params *params = top->params; struct trace_seq *s = top->trace.seq; char duration[26]; get_duration(top->start_time, duration, sizeof(duration)); trace_seq_printf(s, "\033[2;37;40m"); trace_seq_printf(s, " Timer Latency "); if (params->user_top) trace_seq_printf(s, " "); trace_seq_printf(s, "\033[0;0;0m"); trace_seq_printf(s, "\n"); trace_seq_printf(s, "%-6s | IRQ Timer Latency (%s) | Thread Timer Latency (%s)", duration, params->output_divisor == 1 ? "ns" : "us", params->output_divisor == 1 ? "ns" : "us"); if (params->user_top) { trace_seq_printf(s, " | Ret user Timer Latency (%s)", params->output_divisor == 1 ? "ns" : "us"); } trace_seq_printf(s, "\n"); trace_seq_printf(s, "\033[2;30;47m"); trace_seq_printf(s, "CPU COUNT | cur min avg max | cur min avg max"); if (params->user_top) trace_seq_printf(s, " | cur min avg max"); trace_seq_printf(s, "\033[0;0;0m"); trace_seq_printf(s, "\n"); } /* * timerlat_top_print - prints the output of a given CPU */ static void timerlat_top_print(struct osnoise_tool *top, int cpu) { struct timerlat_top_params *params = top->params; struct timerlat_top_data *data = top->data; struct timerlat_top_cpu *cpu_data = &data->cpu_data[cpu]; int divisor = params->output_divisor; struct trace_seq *s = top->trace.seq; if (divisor == 0) return; /* * Skip if no data is available: is this cpu offline? */ if (!cpu_data->irq_count && !cpu_data->thread_count) return; /* * Unless trace is being lost, IRQ counter is always the max. */ trace_seq_printf(s, "%3d #%-9d |", cpu, cpu_data->irq_count); if (!cpu_data->irq_count) { trace_seq_printf(s, " - "); trace_seq_printf(s, " - "); trace_seq_printf(s, " - "); trace_seq_printf(s, " - |"); } else { trace_seq_printf(s, "%9llu ", cpu_data->cur_irq / params->output_divisor); trace_seq_printf(s, "%9llu ", cpu_data->min_irq / params->output_divisor); trace_seq_printf(s, "%9llu ", (cpu_data->sum_irq / cpu_data->irq_count) / divisor); trace_seq_printf(s, "%9llu |", cpu_data->max_irq / divisor); } if (!cpu_data->thread_count) { trace_seq_printf(s, " - "); trace_seq_printf(s, " - "); trace_seq_printf(s, " - "); trace_seq_printf(s, " -\n"); } else { trace_seq_printf(s, "%9llu ", cpu_data->cur_thread / divisor); trace_seq_printf(s, "%9llu ", cpu_data->min_thread / divisor); trace_seq_printf(s, "%9llu ", (cpu_data->sum_thread / cpu_data->thread_count) / divisor); trace_seq_printf(s, "%9llu", cpu_data->max_thread / divisor); } if (!params->user_top) { trace_seq_printf(s, "\n"); return; } trace_seq_printf(s, " |"); if (!cpu_data->user_count) { trace_seq_printf(s, " - "); trace_seq_printf(s, " - "); trace_seq_printf(s, " - "); trace_seq_printf(s, " -\n"); } else { trace_seq_printf(s, "%9llu ", cpu_data->cur_user / divisor); trace_seq_printf(s, "%9llu ", cpu_data->min_user / divisor); trace_seq_printf(s, "%9llu ", (cpu_data->sum_user / cpu_data->user_count) / divisor); trace_seq_printf(s, "%9llu\n", cpu_data->max_user / divisor); } } /* * clear_terminal - clears the output terminal */ static void clear_terminal(struct trace_seq *seq) { if (!config_debug) trace_seq_printf(seq, "\033c"); } /* * timerlat_print_stats - print data for all cpus */ static void timerlat_print_stats(struct timerlat_top_params *params, struct osnoise_tool *top) { struct trace_instance *trace = &top->trace; static int nr_cpus = -1; int i; if (params->aa_only) return; if (nr_cpus == -1) nr_cpus = sysconf(_SC_NPROCESSORS_CONF); if (!params->quiet) clear_terminal(trace->seq); timerlat_top_header(top); for (i = 0; i < nr_cpus; i++) { if (params->cpus && !CPU_ISSET(i, &params->monitored_cpus)) continue; timerlat_top_print(top, i); } trace_seq_do_printf(trace->seq); trace_seq_reset(trace->seq); } /* * timerlat_top_usage - prints timerlat top usage message */ static void timerlat_top_usage(char *usage) { int i; static const char *const msg[] = { "", " usage: rtla timerlat [top] [-h] [-q] [-a us] [-d s] [-D] [-n] [-p us] [-i us] [-T us] [-s us] \\", " [[-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] [-c cpu-list] [-H cpu-list]\\", " [-P priority] [--dma-latency us] [--aa-only us] [-C[=cgroup_name]] [-u]", "", " -h/--help: print this menu", " -a/--auto: set automatic trace mode, stopping the session if argument in us latency is hit", " --aa-only us: stop if <us> latency is hit, only printing the auto analysis (reduces CPU usage)", " -p/--period us: timerlat period in us", " -i/--irq us: stop trace if the irq latency is higher than the argument in us", " -T/--thread us: stop trace if the thread latency is higher than the argument in us", " -s/--stack us: save the stack trace at the IRQ if a thread latency is higher than the argument in us", " -c/--cpus cpus: run the tracer only on the given cpus", " -H/--house-keeping cpus: run rtla control threads only on the given cpus", " -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited", " -d/--duration time[m|h|d]: duration of the session in seconds", " -D/--debug: print debug info", " --dump-tasks: prints the task running on all CPUs if stop conditions are met (depends on !--no-aa)", " -t/--trace[=file]: save the stopped trace to [file|timerlat_trace.txt]", " -e/--event <sys:event>: enable the <sys:event> in the trace instance, multiple -e are allowed", " --filter <command>: enable a trace event filter to the previous -e event", " --trigger <command>: enable a trace event trigger to the previous -e event", " -n/--nano: display data in nanoseconds", " --no-aa: disable auto-analysis, reducing rtla timerlat cpu usage", " -q/--quiet print only a summary at the end", " --dma-latency us: set /dev/cpu_dma_latency latency <us> to reduce exit from idle latency", " -P/--priority o:prio|r:prio|f:prio|d:runtime:period : set scheduling parameters", " o:prio - use SCHED_OTHER with prio", " r:prio - use SCHED_RR with prio", " f:prio - use SCHED_FIFO with prio", " d:runtime[us|ms|s]:period[us|ms|s] - use SCHED_DEADLINE with runtime and period", " in nanoseconds", " -u/--user-threads: use rtla user-space threads instead of in-kernel timerlat threads", NULL, }; if (usage) fprintf(stderr, "%s\n", usage); fprintf(stderr, "rtla timerlat top: a per-cpu summary of the timer latency (version %s)\n", VERSION); for (i = 0; msg[i]; i++) fprintf(stderr, "%s\n", msg[i]); exit(1); } /* * timerlat_top_parse_args - allocs, parse and fill the cmd line parameters */ static struct timerlat_top_params *timerlat_top_parse_args(int argc, char **argv) { struct timerlat_top_params *params; struct trace_events *tevent; long long auto_thresh; int retval; int c; params = calloc(1, sizeof(*params)); if (!params) exit(1); /* disabled by default */ params->dma_latency = -1; /* display data in microseconds */ params->output_divisor = 1000; while (1) { static struct option long_options[] = { {"auto", required_argument, 0, 'a'}, {"cpus", required_argument, 0, 'c'}, {"cgroup", optional_argument, 0, 'C'}, {"debug", no_argument, 0, 'D'}, {"duration", required_argument, 0, 'd'}, {"event", required_argument, 0, 'e'}, {"help", no_argument, 0, 'h'}, {"house-keeping", required_argument, 0, 'H'}, {"irq", required_argument, 0, 'i'}, {"nano", no_argument, 0, 'n'}, {"period", required_argument, 0, 'p'}, {"priority", required_argument, 0, 'P'}, {"quiet", no_argument, 0, 'q'}, {"stack", required_argument, 0, 's'}, {"thread", required_argument, 0, 'T'}, {"trace", optional_argument, 0, 't'}, {"user-threads", no_argument, 0, 'u'}, {"trigger", required_argument, 0, '0'}, {"filter", required_argument, 0, '1'}, {"dma-latency", required_argument, 0, '2'}, {"no-aa", no_argument, 0, '3'}, {"dump-tasks", no_argument, 0, '4'}, {"aa-only", required_argument, 0, '5'}, {0, 0, 0, 0} }; /* getopt_long stores the option index here. */ int option_index = 0; c = getopt_long(argc, argv, "a:c:C::d:De:hH:i:np:P:qs:t::T:u0:1:2:345:", long_options, &option_index); /* detect the end of the options. */ if (c == -1) break; switch (c) { case 'a': auto_thresh = get_llong_from_str(optarg); /* set thread stop to auto_thresh */ params->stop_total_us = auto_thresh; params->stop_us = auto_thresh; /* get stack trace */ params->print_stack = auto_thresh; /* set trace */ params->trace_output = "timerlat_trace.txt"; break; case '5': /* it is here because it is similar to -a */ auto_thresh = get_llong_from_str(optarg); /* set thread stop to auto_thresh */ params->stop_total_us = auto_thresh; params->stop_us = auto_thresh; /* get stack trace */ params->print_stack = auto_thresh; /* set aa_only to avoid parsing the trace */ params->aa_only = 1; break; case 'c': retval = parse_cpu_set(optarg, &params->monitored_cpus); if (retval) timerlat_top_usage("\nInvalid -c cpu list\n"); params->cpus = optarg; break; case 'C': params->cgroup = 1; if (!optarg) { /* will inherit this cgroup */ params->cgroup_name = NULL; } else if (*optarg == '=') { /* skip the = */ params->cgroup_name = ++optarg; } break; case 'D': config_debug = 1; break; case 'd': params->duration = parse_seconds_duration(optarg); if (!params->duration) timerlat_top_usage("Invalid -D duration\n"); break; case 'e': tevent = trace_event_alloc(optarg); if (!tevent) { err_msg("Error alloc trace event"); exit(EXIT_FAILURE); } if (params->events) tevent->next = params->events; params->events = tevent; break; case 'h': case '?': timerlat_top_usage(NULL); break; case 'H': params->hk_cpus = 1; retval = parse_cpu_set(optarg, &params->hk_cpu_set); if (retval) { err_msg("Error parsing house keeping CPUs\n"); exit(EXIT_FAILURE); } break; case 'i': params->stop_us = get_llong_from_str(optarg); break; case 'n': params->output_divisor = 1; break; case 'p': params->timerlat_period_us = get_llong_from_str(optarg); if (params->timerlat_period_us > 1000000) timerlat_top_usage("Period longer than 1 s\n"); break; case 'P': retval = parse_prio(optarg, &params->sched_param); if (retval == -1) timerlat_top_usage("Invalid -P priority"); params->set_sched = 1; break; case 'q': params->quiet = 1; break; case 's': params->print_stack = get_llong_from_str(optarg); break; case 'T': params->stop_total_us = get_llong_from_str(optarg); break; case 't': if (optarg) /* skip = */ params->trace_output = &optarg[1]; else params->trace_output = "timerlat_trace.txt"; break; case 'u': params->user_top = true; break; case '0': /* trigger */ if (params->events) { retval = trace_event_add_trigger(params->events, optarg); if (retval) { err_msg("Error adding trigger %s\n", optarg); exit(EXIT_FAILURE); } } else { timerlat_top_usage("--trigger requires a previous -e\n"); } break; case '1': /* filter */ if (params->events) { retval = trace_event_add_filter(params->events, optarg); if (retval) { err_msg("Error adding filter %s\n", optarg); exit(EXIT_FAILURE); } } else { timerlat_top_usage("--filter requires a previous -e\n"); } break; case '2': /* dma-latency */ params->dma_latency = get_llong_from_str(optarg); if (params->dma_latency < 0 || params->dma_latency > 10000) { err_msg("--dma-latency needs to be >= 0 and < 10000"); exit(EXIT_FAILURE); } break; case '3': /* no-aa */ params->no_aa = 1; break; case '4': params->dump_tasks = 1; break; default: timerlat_top_usage("Invalid option"); } } if (geteuid()) { err_msg("rtla needs root permission\n"); exit(EXIT_FAILURE); } /* * Auto analysis only happens if stop tracing, thus: */ if (!params->stop_us && !params->stop_total_us) params->no_aa = 1; if (params->no_aa && params->aa_only) timerlat_top_usage("--no-aa and --aa-only are mutually exclusive!"); return params; } /* * timerlat_top_apply_config - apply the top configs to the initialized tool */ static int timerlat_top_apply_config(struct osnoise_tool *top, struct timerlat_top_params *params) { int retval; int i; if (!params->sleep_time) params->sleep_time = 1; if (params->cpus) { retval = osnoise_set_cpus(top->context, params->cpus); if (retval) { err_msg("Failed to apply CPUs config\n"); goto out_err; } } else { for (i = 0; i < sysconf(_SC_NPROCESSORS_CONF); i++) CPU_SET(i, &params->monitored_cpus); } if (params->stop_us) { retval = osnoise_set_stop_us(top->context, params->stop_us); if (retval) { err_msg("Failed to set stop us\n"); goto out_err; } } if (params->stop_total_us) { retval = osnoise_set_stop_total_us(top->context, params->stop_total_us); if (retval) { err_msg("Failed to set stop total us\n"); goto out_err; } } if (params->timerlat_period_us) { retval = osnoise_set_timerlat_period_us(top->context, params->timerlat_period_us); if (retval) { err_msg("Failed to set timerlat period\n"); goto out_err; } } if (params->print_stack) { retval = osnoise_set_print_stack(top->context, params->print_stack); if (retval) { err_msg("Failed to set print stack\n"); goto out_err; } } if (params->hk_cpus) { retval = sched_setaffinity(getpid(), sizeof(params->hk_cpu_set), &params->hk_cpu_set); if (retval == -1) { err_msg("Failed to set rtla to the house keeping CPUs\n"); goto out_err; } } else if (params->cpus) { /* * Even if the user do not set a house-keeping CPU, try to * move rtla to a CPU set different to the one where the user * set the workload to run. * * No need to check results as this is an automatic attempt. */ auto_house_keeping(&params->monitored_cpus); } if (params->user_top) { retval = osnoise_set_workload(top->context, 0); if (retval) { err_msg("Failed to set OSNOISE_WORKLOAD option\n"); goto out_err; } } return 0; out_err: return -1; } /* * timerlat_init_top - initialize a timerlat top tool with parameters */ static struct osnoise_tool *timerlat_init_top(struct timerlat_top_params *params) { struct osnoise_tool *top; int nr_cpus; nr_cpus = sysconf(_SC_NPROCESSORS_CONF); top = osnoise_init_tool("timerlat_top"); if (!top) return NULL; top->data = timerlat_alloc_top(nr_cpus); if (!top->data) goto out_err; top->params = params; tep_register_event_handler(top->trace.tep, -1, "ftrace", "timerlat", timerlat_top_handler, top); return top; out_err: osnoise_destroy_tool(top); return NULL; } static int stop_tracing; static void stop_top(int sig) { stop_tracing = 1; } /* * timerlat_top_set_signals - handles the signal to stop the tool */ static void timerlat_top_set_signals(struct timerlat_top_params *params) { signal(SIGINT, stop_top); if (params->duration) { signal(SIGALRM, stop_top); alarm(params->duration); } } int timerlat_top_main(int argc, char *argv[]) { struct timerlat_top_params *params; struct osnoise_tool *record = NULL; struct timerlat_u_params params_u; struct osnoise_tool *top = NULL; struct osnoise_tool *aa = NULL; struct trace_instance *trace; int dma_latency_fd = -1; pthread_t timerlat_u; int return_value = 1; char *max_lat; int retval; params = timerlat_top_parse_args(argc, argv); if (!params) exit(1); top = timerlat_init_top(params); if (!top) { err_msg("Could not init osnoise top\n"); goto out_exit; } retval = timerlat_top_apply_config(top, params); if (retval) { err_msg("Could not apply config\n"); goto out_free; } trace = &top->trace; retval = enable_timerlat(trace); if (retval) { err_msg("Failed to enable timerlat tracer\n"); goto out_free; } if (params->set_sched) { retval = set_comm_sched_attr("timerlat/", &params->sched_param); if (retval) { err_msg("Failed to set sched parameters\n"); goto out_free; } } if (params->cgroup && !params->user_top) { retval = set_comm_cgroup("timerlat/", params->cgroup_name); if (!retval) { err_msg("Failed to move threads to cgroup\n"); goto out_free; } } if (params->dma_latency >= 0) { dma_latency_fd = set_cpu_dma_latency(params->dma_latency); if (dma_latency_fd < 0) { err_msg("Could not set /dev/cpu_dma_latency.\n"); goto out_free; } } if (params->trace_output) { record = osnoise_init_trace_tool("timerlat"); if (!record) { err_msg("Failed to enable the trace instance\n"); goto out_free; } if (params->events) { retval = trace_events_enable(&record->trace, params->events); if (retval) goto out_top; } } if (!params->no_aa) { if (params->aa_only) { /* as top is not used for display, use it for aa */ aa = top; } else { /* otherwise, a new instance is needed */ aa = osnoise_init_tool("timerlat_aa"); if (!aa) goto out_top; } retval = timerlat_aa_init(aa, params->dump_tasks); if (retval) { err_msg("Failed to enable the auto analysis instance\n"); goto out_top; } /* if it is re-using the main instance, there is no need to start it */ if (aa != top) { retval = enable_timerlat(&aa->trace); if (retval) { err_msg("Failed to enable timerlat tracer\n"); goto out_top; } } } /* * Start the tracers here, after having set all instances. * * Let the trace instance start first for the case of hitting a stop * tracing while enabling other instances. The trace instance is the * one with most valuable information. */ if (params->trace_output) trace_instance_start(&record->trace); if (!params->no_aa && aa != top) trace_instance_start(&aa->trace); trace_instance_start(trace); top->start_time = time(NULL); timerlat_top_set_signals(params); if (params->user_top) { /* rtla asked to stop */ params_u.should_run = 1; /* all threads left */ params_u.stopped_running = 0; params_u.set = &params->monitored_cpus; if (params->set_sched) params_u.sched_param = &params->sched_param; else params_u.sched_param = NULL; params_u.cgroup_name = params->cgroup_name; retval = pthread_create(&timerlat_u, NULL, timerlat_u_dispatcher, &params_u); if (retval) err_msg("Error creating timerlat user-space threads\n"); } while (!stop_tracing) { sleep(params->sleep_time); if (params->aa_only && !trace_is_off(&top->trace, &record->trace)) continue; retval = tracefs_iterate_raw_events(trace->tep, trace->inst, NULL, 0, collect_registered_events, trace); if (retval < 0) { err_msg("Error iterating on events\n"); goto out_top; } if (!params->quiet) timerlat_print_stats(params, top); if (trace_is_off(&top->trace, &record->trace)) break; /* is there still any user-threads ? */ if (params->user_top) { if (params_u.stopped_running) { debug_msg("timerlat user space threads stopped!\n"); break; } } } if (params->user_top && !params_u.stopped_running) { params_u.should_run = 0; sleep(1); } timerlat_print_stats(params, top); return_value = 0; if (trace_is_off(&top->trace, &record->trace)) { printf("rtla timerlat hit stop tracing\n"); if (!params->no_aa) timerlat_auto_analysis(params->stop_us, params->stop_total_us); if (params->trace_output) { printf(" Saving trace to %s\n", params->trace_output); save_trace_to_file(record->trace.inst, params->trace_output); } } else if (params->aa_only) { /* * If the trace did not stop with --aa-only, at least print the * max known latency. */ max_lat = tracefs_instance_file_read(trace->inst, "tracing_max_latency", NULL); if (max_lat) { printf(" Max latency was %s\n", max_lat); free(max_lat); } } out_top: timerlat_aa_destroy(); if (dma_latency_fd >= 0) close(dma_latency_fd); trace_events_destroy(&record->trace, params->events); params->events = NULL; out_free: timerlat_free_top(top->data); if (aa && aa != top) osnoise_destroy_tool(aa); osnoise_destroy_tool(record); osnoise_destroy_tool(top); free(params); out_exit: exit(return_value); }
linux-master
tools/tracing/rtla/src/timerlat_top.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #define _GNU_SOURCE #include <getopt.h> #include <stdlib.h> #include <string.h> #include <signal.h> #include <unistd.h> #include <stdio.h> #include <time.h> #include <sched.h> #include "osnoise.h" #include "utils.h" enum osnoise_mode { MODE_OSNOISE = 0, MODE_HWNOISE }; /* * osnoise top parameters */ struct osnoise_top_params { char *cpus; cpu_set_t monitored_cpus; char *trace_output; char *cgroup_name; unsigned long long runtime; unsigned long long period; long long threshold; long long stop_us; long long stop_total_us; int sleep_time; int duration; int quiet; int set_sched; int cgroup; int hk_cpus; cpu_set_t hk_cpu_set; struct sched_attr sched_param; struct trace_events *events; enum osnoise_mode mode; }; struct osnoise_top_cpu { unsigned long long sum_runtime; unsigned long long sum_noise; unsigned long long max_noise; unsigned long long max_sample; unsigned long long hw_count; unsigned long long nmi_count; unsigned long long irq_count; unsigned long long softirq_count; unsigned long long thread_count; int sum_cycles; }; struct osnoise_top_data { struct osnoise_top_cpu *cpu_data; int nr_cpus; }; /* * osnoise_free_top - free runtime data */ static void osnoise_free_top(struct osnoise_top_data *data) { free(data->cpu_data); free(data); } /* * osnoise_alloc_histogram - alloc runtime data */ static struct osnoise_top_data *osnoise_alloc_top(int nr_cpus) { struct osnoise_top_data *data; data = calloc(1, sizeof(*data)); if (!data) return NULL; data->nr_cpus = nr_cpus; /* one set of histograms per CPU */ data->cpu_data = calloc(1, sizeof(*data->cpu_data) * nr_cpus); if (!data->cpu_data) goto cleanup; return data; cleanup: osnoise_free_top(data); return NULL; } /* * osnoise_top_handler - this is the handler for osnoise tracer events */ static int osnoise_top_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { struct trace_instance *trace = context; struct osnoise_tool *tool; unsigned long long val; struct osnoise_top_cpu *cpu_data; struct osnoise_top_data *data; int cpu = record->cpu; tool = container_of(trace, struct osnoise_tool, trace); data = tool->data; cpu_data = &data->cpu_data[cpu]; cpu_data->sum_cycles++; tep_get_field_val(s, event, "runtime", record, &val, 1); update_sum(&cpu_data->sum_runtime, &val); tep_get_field_val(s, event, "noise", record, &val, 1); update_max(&cpu_data->max_noise, &val); update_sum(&cpu_data->sum_noise, &val); tep_get_field_val(s, event, "max_sample", record, &val, 1); update_max(&cpu_data->max_sample, &val); tep_get_field_val(s, event, "hw_count", record, &val, 1); update_sum(&cpu_data->hw_count, &val); tep_get_field_val(s, event, "nmi_count", record, &val, 1); update_sum(&cpu_data->nmi_count, &val); tep_get_field_val(s, event, "irq_count", record, &val, 1); update_sum(&cpu_data->irq_count, &val); tep_get_field_val(s, event, "softirq_count", record, &val, 1); update_sum(&cpu_data->softirq_count, &val); tep_get_field_val(s, event, "thread_count", record, &val, 1); update_sum(&cpu_data->thread_count, &val); return 0; } /* * osnoise_top_header - print the header of the tool output */ static void osnoise_top_header(struct osnoise_tool *top) { struct osnoise_top_params *params = top->params; struct trace_seq *s = top->trace.seq; char duration[26]; get_duration(top->start_time, duration, sizeof(duration)); trace_seq_printf(s, "\033[2;37;40m"); trace_seq_printf(s, " "); if (params->mode == MODE_OSNOISE) { trace_seq_printf(s, "Operating System Noise"); trace_seq_printf(s, " "); } else if (params->mode == MODE_HWNOISE) { trace_seq_printf(s, "Hardware-related Noise"); } trace_seq_printf(s, " "); trace_seq_printf(s, "\033[0;0;0m"); trace_seq_printf(s, "\n"); trace_seq_printf(s, "duration: %9s | time is in us\n", duration); trace_seq_printf(s, "\033[2;30;47m"); trace_seq_printf(s, "CPU Period Runtime "); trace_seq_printf(s, " Noise "); trace_seq_printf(s, " %% CPU Aval "); trace_seq_printf(s, " Max Noise Max Single "); trace_seq_printf(s, " HW NMI"); if (params->mode == MODE_HWNOISE) goto eol; trace_seq_printf(s, " IRQ Softirq Thread"); eol: trace_seq_printf(s, "\033[0;0;0m"); trace_seq_printf(s, "\n"); } /* * clear_terminal - clears the output terminal */ static void clear_terminal(struct trace_seq *seq) { if (!config_debug) trace_seq_printf(seq, "\033c"); } /* * osnoise_top_print - prints the output of a given CPU */ static void osnoise_top_print(struct osnoise_tool *tool, int cpu) { struct osnoise_top_params *params = tool->params; struct trace_seq *s = tool->trace.seq; struct osnoise_top_cpu *cpu_data; struct osnoise_top_data *data; int percentage; int decimal; data = tool->data; cpu_data = &data->cpu_data[cpu]; if (!cpu_data->sum_runtime) return; percentage = ((cpu_data->sum_runtime - cpu_data->sum_noise) * 10000000) / cpu_data->sum_runtime; decimal = percentage % 100000; percentage = percentage / 100000; trace_seq_printf(s, "%3d #%-6d %12llu ", cpu, cpu_data->sum_cycles, cpu_data->sum_runtime); trace_seq_printf(s, "%12llu ", cpu_data->sum_noise); trace_seq_printf(s, " %3d.%05d", percentage, decimal); trace_seq_printf(s, "%12llu %12llu", cpu_data->max_noise, cpu_data->max_sample); trace_seq_printf(s, "%12llu ", cpu_data->hw_count); trace_seq_printf(s, "%12llu ", cpu_data->nmi_count); if (params->mode == MODE_HWNOISE) { trace_seq_printf(s, "\n"); return; } trace_seq_printf(s, "%12llu ", cpu_data->irq_count); trace_seq_printf(s, "%12llu ", cpu_data->softirq_count); trace_seq_printf(s, "%12llu\n", cpu_data->thread_count); } /* * osnoise_print_stats - print data for all cpus */ static void osnoise_print_stats(struct osnoise_top_params *params, struct osnoise_tool *top) { struct trace_instance *trace = &top->trace; static int nr_cpus = -1; int i; if (nr_cpus == -1) nr_cpus = sysconf(_SC_NPROCESSORS_CONF); if (!params->quiet) clear_terminal(trace->seq); osnoise_top_header(top); for (i = 0; i < nr_cpus; i++) { if (params->cpus && !CPU_ISSET(i, &params->monitored_cpus)) continue; osnoise_top_print(top, i); } trace_seq_do_printf(trace->seq); trace_seq_reset(trace->seq); } /* * osnoise_top_usage - prints osnoise top usage message */ static void osnoise_top_usage(struct osnoise_top_params *params, char *usage) { int i; static const char * const msg[] = { " [-h] [-q] [-D] [-d s] [-a us] [-p us] [-r us] [-s us] [-S us] \\", " [-T us] [-t[=file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] \\", " [-c cpu-list] [-H cpu-list] [-P priority] [-C[=cgroup_name]]", "", " -h/--help: print this menu", " -a/--auto: set automatic trace mode, stopping the session if argument in us sample is hit", " -p/--period us: osnoise period in us", " -r/--runtime us: osnoise runtime in us", " -s/--stop us: stop trace if a single sample is higher than the argument in us", " -S/--stop-total us: stop trace if the total sample is higher than the argument in us", " -T/--threshold us: the minimum delta to be considered a noise", " -c/--cpus cpu-list: list of cpus to run osnoise threads", " -H/--house-keeping cpus: run rtla control threads only on the given cpus", " -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited", " -d/--duration time[s|m|h|d]: duration of the session", " -D/--debug: print debug info", " -t/--trace[=file]: save the stopped trace to [file|osnoise_trace.txt]", " -e/--event <sys:event>: enable the <sys:event> in the trace instance, multiple -e are allowed", " --filter <filter>: enable a trace event filter to the previous -e event", " --trigger <trigger>: enable a trace event trigger to the previous -e event", " -q/--quiet print only a summary at the end", " -P/--priority o:prio|r:prio|f:prio|d:runtime:period : set scheduling parameters", " o:prio - use SCHED_OTHER with prio", " r:prio - use SCHED_RR with prio", " f:prio - use SCHED_FIFO with prio", " d:runtime[us|ms|s]:period[us|ms|s] - use SCHED_DEADLINE with runtime and period", " in nanoseconds", NULL, }; if (usage) fprintf(stderr, "%s\n", usage); if (params->mode == MODE_OSNOISE) { fprintf(stderr, "rtla osnoise top: a per-cpu summary of the OS noise (version %s)\n", VERSION); fprintf(stderr, " usage: rtla osnoise [top]"); } if (params->mode == MODE_HWNOISE) { fprintf(stderr, "rtla hwnoise: a summary of hardware-related noise (version %s)\n", VERSION); fprintf(stderr, " usage: rtla hwnoise"); } for (i = 0; msg[i]; i++) fprintf(stderr, "%s\n", msg[i]); exit(1); } /* * osnoise_top_parse_args - allocs, parse and fill the cmd line parameters */ struct osnoise_top_params *osnoise_top_parse_args(int argc, char **argv) { struct osnoise_top_params *params; struct trace_events *tevent; int retval; int c; params = calloc(1, sizeof(*params)); if (!params) exit(1); if (strcmp(argv[0], "hwnoise") == 0) { params->mode = MODE_HWNOISE; /* * Reduce CPU usage for 75% to avoid killing the system. */ params->runtime = 750000; params->period = 1000000; } while (1) { static struct option long_options[] = { {"auto", required_argument, 0, 'a'}, {"cpus", required_argument, 0, 'c'}, {"cgroup", optional_argument, 0, 'C'}, {"debug", no_argument, 0, 'D'}, {"duration", required_argument, 0, 'd'}, {"event", required_argument, 0, 'e'}, {"house-keeping", required_argument, 0, 'H'}, {"help", no_argument, 0, 'h'}, {"period", required_argument, 0, 'p'}, {"priority", required_argument, 0, 'P'}, {"quiet", no_argument, 0, 'q'}, {"runtime", required_argument, 0, 'r'}, {"stop", required_argument, 0, 's'}, {"stop-total", required_argument, 0, 'S'}, {"threshold", required_argument, 0, 'T'}, {"trace", optional_argument, 0, 't'}, {"trigger", required_argument, 0, '0'}, {"filter", required_argument, 0, '1'}, {0, 0, 0, 0} }; /* getopt_long stores the option index here. */ int option_index = 0; c = getopt_long(argc, argv, "a:c:C::d:De:hH:p:P:qr:s:S:t::T:0:1:", long_options, &option_index); /* Detect the end of the options. */ if (c == -1) break; switch (c) { case 'a': /* set sample stop to auto_thresh */ params->stop_us = get_llong_from_str(optarg); /* set sample threshold to 1 */ params->threshold = 1; /* set trace */ params->trace_output = "osnoise_trace.txt"; break; case 'c': retval = parse_cpu_set(optarg, &params->monitored_cpus); if (retval) osnoise_top_usage(params, "\nInvalid -c cpu list\n"); params->cpus = optarg; break; case 'C': params->cgroup = 1; if (!optarg) { /* will inherit this cgroup */ params->cgroup_name = NULL; } else if (*optarg == '=') { /* skip the = */ params->cgroup_name = ++optarg; } break; case 'D': config_debug = 1; break; case 'd': params->duration = parse_seconds_duration(optarg); if (!params->duration) osnoise_top_usage(params, "Invalid -D duration\n"); break; case 'e': tevent = trace_event_alloc(optarg); if (!tevent) { err_msg("Error alloc trace event"); exit(EXIT_FAILURE); } if (params->events) tevent->next = params->events; params->events = tevent; break; case 'h': case '?': osnoise_top_usage(params, NULL); break; case 'H': params->hk_cpus = 1; retval = parse_cpu_set(optarg, &params->hk_cpu_set); if (retval) { err_msg("Error parsing house keeping CPUs\n"); exit(EXIT_FAILURE); } break; case 'p': params->period = get_llong_from_str(optarg); if (params->period > 10000000) osnoise_top_usage(params, "Period longer than 10 s\n"); break; case 'P': retval = parse_prio(optarg, &params->sched_param); if (retval == -1) osnoise_top_usage(params, "Invalid -P priority"); params->set_sched = 1; break; case 'q': params->quiet = 1; break; case 'r': params->runtime = get_llong_from_str(optarg); if (params->runtime < 100) osnoise_top_usage(params, "Runtime shorter than 100 us\n"); break; case 's': params->stop_us = get_llong_from_str(optarg); break; case 'S': params->stop_total_us = get_llong_from_str(optarg); break; case 't': if (optarg) /* skip = */ params->trace_output = &optarg[1]; else params->trace_output = "osnoise_trace.txt"; break; case 'T': params->threshold = get_llong_from_str(optarg); break; case '0': /* trigger */ if (params->events) { retval = trace_event_add_trigger(params->events, optarg); if (retval) { err_msg("Error adding trigger %s\n", optarg); exit(EXIT_FAILURE); } } else { osnoise_top_usage(params, "--trigger requires a previous -e\n"); } break; case '1': /* filter */ if (params->events) { retval = trace_event_add_filter(params->events, optarg); if (retval) { err_msg("Error adding filter %s\n", optarg); exit(EXIT_FAILURE); } } else { osnoise_top_usage(params, "--filter requires a previous -e\n"); } break; default: osnoise_top_usage(params, "Invalid option"); } } if (geteuid()) { err_msg("osnoise needs root permission\n"); exit(EXIT_FAILURE); } return params; } /* * osnoise_top_apply_config - apply the top configs to the initialized tool */ static int osnoise_top_apply_config(struct osnoise_tool *tool, struct osnoise_top_params *params) { int retval; if (!params->sleep_time) params->sleep_time = 1; if (params->cpus) { retval = osnoise_set_cpus(tool->context, params->cpus); if (retval) { err_msg("Failed to apply CPUs config\n"); goto out_err; } } if (params->runtime || params->period) { retval = osnoise_set_runtime_period(tool->context, params->runtime, params->period); if (retval) { err_msg("Failed to set runtime and/or period\n"); goto out_err; } } if (params->stop_us) { retval = osnoise_set_stop_us(tool->context, params->stop_us); if (retval) { err_msg("Failed to set stop us\n"); goto out_err; } } if (params->stop_total_us) { retval = osnoise_set_stop_total_us(tool->context, params->stop_total_us); if (retval) { err_msg("Failed to set stop total us\n"); goto out_err; } } if (params->threshold) { retval = osnoise_set_tracing_thresh(tool->context, params->threshold); if (retval) { err_msg("Failed to set tracing_thresh\n"); goto out_err; } } if (params->mode == MODE_HWNOISE) { retval = osnoise_set_irq_disable(tool->context, 1); if (retval) { err_msg("Failed to set OSNOISE_IRQ_DISABLE option\n"); goto out_err; } } if (params->hk_cpus) { retval = sched_setaffinity(getpid(), sizeof(params->hk_cpu_set), &params->hk_cpu_set); if (retval == -1) { err_msg("Failed to set rtla to the house keeping CPUs\n"); goto out_err; } } else if (params->cpus) { /* * Even if the user do not set a house-keeping CPU, try to * move rtla to a CPU set different to the one where the user * set the workload to run. * * No need to check results as this is an automatic attempt. */ auto_house_keeping(&params->monitored_cpus); } return 0; out_err: return -1; } /* * osnoise_init_top - initialize a osnoise top tool with parameters */ struct osnoise_tool *osnoise_init_top(struct osnoise_top_params *params) { struct osnoise_tool *tool; int nr_cpus; nr_cpus = sysconf(_SC_NPROCESSORS_CONF); tool = osnoise_init_tool("osnoise_top"); if (!tool) return NULL; tool->data = osnoise_alloc_top(nr_cpus); if (!tool->data) goto out_err; tool->params = params; tep_register_event_handler(tool->trace.tep, -1, "ftrace", "osnoise", osnoise_top_handler, NULL); return tool; out_err: osnoise_free_top(tool->data); osnoise_destroy_tool(tool); return NULL; } static int stop_tracing; static void stop_top(int sig) { stop_tracing = 1; } /* * osnoise_top_set_signals - handles the signal to stop the tool */ static void osnoise_top_set_signals(struct osnoise_top_params *params) { signal(SIGINT, stop_top); if (params->duration) { signal(SIGALRM, stop_top); alarm(params->duration); } } int osnoise_top_main(int argc, char **argv) { struct osnoise_top_params *params; struct osnoise_tool *record = NULL; struct osnoise_tool *tool = NULL; struct trace_instance *trace; int return_value = 1; int retval; params = osnoise_top_parse_args(argc, argv); if (!params) exit(1); tool = osnoise_init_top(params); if (!tool) { err_msg("Could not init osnoise top\n"); goto out_exit; } retval = osnoise_top_apply_config(tool, params); if (retval) { err_msg("Could not apply config\n"); goto out_free; } trace = &tool->trace; retval = enable_osnoise(trace); if (retval) { err_msg("Failed to enable osnoise tracer\n"); goto out_free; } if (params->set_sched) { retval = set_comm_sched_attr("osnoise/", &params->sched_param); if (retval) { err_msg("Failed to set sched parameters\n"); goto out_free; } } if (params->cgroup) { retval = set_comm_cgroup("osnoise/", params->cgroup_name); if (!retval) { err_msg("Failed to move threads to cgroup\n"); goto out_free; } } if (params->trace_output) { record = osnoise_init_trace_tool("osnoise"); if (!record) { err_msg("Failed to enable the trace instance\n"); goto out_free; } if (params->events) { retval = trace_events_enable(&record->trace, params->events); if (retval) goto out_top; } } /* * Start the tracer here, after having set all instances. * * Let the trace instance start first for the case of hitting a stop * tracing while enabling other instances. The trace instance is the * one with most valuable information. */ if (params->trace_output) trace_instance_start(&record->trace); trace_instance_start(trace); tool->start_time = time(NULL); osnoise_top_set_signals(params); while (!stop_tracing) { sleep(params->sleep_time); retval = tracefs_iterate_raw_events(trace->tep, trace->inst, NULL, 0, collect_registered_events, trace); if (retval < 0) { err_msg("Error iterating on events\n"); goto out_top; } if (!params->quiet) osnoise_print_stats(params, tool); if (trace_is_off(&tool->trace, &record->trace)) break; } osnoise_print_stats(params, tool); return_value = 0; if (trace_is_off(&tool->trace, &record->trace)) { printf("osnoise hit stop tracing\n"); if (params->trace_output) { printf(" Saving trace to %s\n", params->trace_output); save_trace_to_file(record->trace.inst, params->trace_output); } } out_top: trace_events_destroy(&record->trace, params->events); params->events = NULL; out_free: osnoise_free_top(tool->data); osnoise_destroy_tool(record); osnoise_destroy_tool(tool); free(params); out_exit: exit(return_value); }
linux-master
tools/tracing/rtla/src/osnoise_top.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2023 Red Hat Inc, Daniel Bristot de Oliveira <[email protected]> */ #define _GNU_SOURCE #include <sched.h> #include <fcntl.h> #include <stdlib.h> #include <unistd.h> #include <stdio.h> #include <errno.h> #include <string.h> #include <tracefs.h> #include <pthread.h> #include <sys/wait.h> #include <sys/prctl.h> #include "utils.h" #include "timerlat_u.h" /* * This is the user-space main for the tool timerlatu/ threads. * * It is as simple as this: * - set affinity * - set priority * - open tracer fd * - spin * - close */ static int timerlat_u_main(int cpu, struct timerlat_u_params *params) { struct sched_param sp = { .sched_priority = 95 }; char buffer[1024]; int timerlat_fd; cpu_set_t set; int retval; /* * This all is only setting up the tool. */ CPU_ZERO(&set); CPU_SET(cpu, &set); retval = sched_setaffinity(gettid(), sizeof(set), &set); if (retval == -1) { err_msg("Error setting user thread affinity\n"); exit(1); } if (!params->sched_param) { retval = sched_setscheduler(0, SCHED_FIFO, &sp); if (retval < 0) { err_msg("Error setting timerlat u default priority: %s\n", strerror(errno)); exit(1); } } else { retval = __set_sched_attr(getpid(), params->sched_param); if (retval) { /* __set_sched_attr prints an error message, so */ exit(0); } } if (params->cgroup_name) { retval = set_pid_cgroup(gettid(), params->cgroup_name); if (!retval) { err_msg("Error setting timerlat u cgroup pid\n"); pthread_exit(&retval); } } /* * This is the tool's loop. If you want to use as base for your own tool... * go ahead. */ snprintf(buffer, sizeof(buffer), "osnoise/per_cpu/cpu%d/timerlat_fd", cpu); timerlat_fd = tracefs_instance_file_open(NULL, buffer, O_RDONLY); if (timerlat_fd < 0) { err_msg("Error opening %s:%s\n", buffer, strerror(errno)); exit(1); } debug_msg("User-space timerlat pid %d on cpu %d\n", gettid(), cpu); /* add should continue with a signal handler */ while (true) { retval = read(timerlat_fd, buffer, 1024); if (retval < 0) break; } close(timerlat_fd); debug_msg("Leaving timerlat pid %d on cpu %d\n", gettid(), cpu); exit(0); } /* * timerlat_u_send_kill - send a kill signal for all processes * * Return the number of processes that received the kill. */ static int timerlat_u_send_kill(pid_t *procs, int nr_cpus) { int killed = 0; int i, retval; for (i = 0; i < nr_cpus; i++) { if (!procs[i]) continue; retval = kill(procs[i], SIGKILL); if (!retval) killed++; else err_msg("Error killing child process %d\n", procs[i]); } return killed; } /** * timerlat_u_dispatcher - dispatch one timerlatu/ process per monitored CPU * * This is a thread main that will fork one new process for each monitored * CPU. It will wait for: * * - rtla to tell to kill the child processes * - some child process to die, and the cleanup all the processes * * whichever comes first. * */ void *timerlat_u_dispatcher(void *data) { int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); struct timerlat_u_params *params = data; char proc_name[128]; int procs_count = 0; int retval = 1; pid_t *procs; int wstatus; pid_t pid; int i; debug_msg("Dispatching timerlat u procs\n"); procs = calloc(nr_cpus, sizeof(pid_t)); if (!procs) pthread_exit(&retval); for (i = 0; i < nr_cpus; i++) { if (params->set && !CPU_ISSET(i, params->set)) continue; pid = fork(); /* child */ if (!pid) { /* * rename the process */ snprintf(proc_name, sizeof(proc_name), "timerlatu/%d", i); pthread_setname_np(pthread_self(), proc_name); prctl(PR_SET_NAME, (unsigned long)proc_name, 0, 0, 0); timerlat_u_main(i, params); /* timerlat_u_main should exit()! Anyways... */ pthread_exit(&retval); } /* parent */ if (pid == -1) { timerlat_u_send_kill(procs, nr_cpus); debug_msg("Failed to create child processes"); pthread_exit(&retval); } procs_count++; procs[i] = pid; } while (params->should_run) { /* check if processes died */ pid = waitpid(-1, &wstatus, WNOHANG); if (pid != 0) { for (i = 0; i < nr_cpus; i++) { if (procs[i] == pid) { procs[i] = 0; procs_count--; } } break; } sleep(1); } timerlat_u_send_kill(procs, nr_cpus); while (procs_count) { pid = waitpid(-1, &wstatus, 0); if (pid == -1) { err_msg("Failed to monitor child processes"); pthread_exit(&retval); } for (i = 0; i < nr_cpus; i++) { if (procs[i] == pid) { procs[i] = 0; procs_count--; } } } params->stopped_running = 1; free(procs); retval = 0; pthread_exit(&retval); }
linux-master
tools/tracing/rtla/src/timerlat_u.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Secure Processor Dynamic Boost Control sample library * * Copyright (C) 2023 Advanced Micro Devices, Inc. * * Author: Mario Limonciello <[email protected]> */ #include <assert.h> #include <string.h> #include <sys/ioctl.h> /* if uapi header isn't installed, this might not yet exist */ #ifndef __packed #define __packed __attribute__((packed)) #endif #include <linux/psp-dbc.h> int get_nonce(int fd, void *nonce_out, void *signature) { struct dbc_user_nonce tmp = { .auth_needed = !!signature, }; int ret; assert(nonce_out); if (signature) memcpy(tmp.signature, signature, sizeof(tmp.signature)); ret = ioctl(fd, DBCIOCNONCE, &tmp); if (ret) return ret; memcpy(nonce_out, tmp.nonce, sizeof(tmp.nonce)); return 0; } int set_uid(int fd, __u8 *uid, __u8 *signature) { struct dbc_user_setuid tmp; assert(uid); assert(signature); memcpy(tmp.uid, uid, sizeof(tmp.uid)); memcpy(tmp.signature, signature, sizeof(tmp.signature)); return ioctl(fd, DBCIOCUID, &tmp); } int process_param(int fd, int msg_index, __u8 *signature, int *data) { struct dbc_user_param tmp = { .msg_index = msg_index, .param = *data, }; int ret; assert(signature); assert(data); memcpy(tmp.signature, signature, sizeof(tmp.signature)); ret = ioctl(fd, DBCIOCPARAM, &tmp); if (ret) return ret; *data = tmp.param; return 0; }
linux-master
tools/crypto/ccp/dbc.c
// SPDX-License-Identifier: GPL-2.0-only /** * Userspace PCI Endpoint Test Module * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <[email protected]> */ #include <errno.h> #include <fcntl.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <sys/ioctl.h> #include <unistd.h> #include <linux/pcitest.h> #define BILLION 1E9 static char *result[] = { "NOT OKAY", "OKAY" }; static char *irq[] = { "LEGACY", "MSI", "MSI-X" }; struct pci_test { char *device; char barnum; bool legacyirq; unsigned int msinum; unsigned int msixnum; int irqtype; bool set_irqtype; bool get_irqtype; bool clear_irq; bool read; bool write; bool copy; unsigned long size; bool use_dma; }; static int run_test(struct pci_test *test) { struct pci_endpoint_test_xfer_param param = {}; int ret = -EINVAL; int fd; fd = open(test->device, O_RDWR); if (fd < 0) { perror("can't open PCI Endpoint Test device"); return -ENODEV; } if (test->barnum >= 0 && test->barnum <= 5) { ret = ioctl(fd, PCITEST_BAR, test->barnum); fprintf(stdout, "BAR%d:\t\t", test->barnum); if (ret < 0) fprintf(stdout, "TEST FAILED\n"); else fprintf(stdout, "%s\n", result[ret]); } if (test->set_irqtype) { ret = ioctl(fd, PCITEST_SET_IRQTYPE, test->irqtype); fprintf(stdout, "SET IRQ TYPE TO %s:\t\t", irq[test->irqtype]); if (ret < 0) fprintf(stdout, "FAILED\n"); else fprintf(stdout, "%s\n", result[ret]); } if (test->get_irqtype) { ret = ioctl(fd, PCITEST_GET_IRQTYPE); fprintf(stdout, "GET IRQ TYPE:\t\t"); if (ret < 0) fprintf(stdout, "FAILED\n"); else fprintf(stdout, "%s\n", irq[ret]); } if (test->clear_irq) { ret = ioctl(fd, PCITEST_CLEAR_IRQ); fprintf(stdout, "CLEAR IRQ:\t\t"); if (ret < 0) fprintf(stdout, "FAILED\n"); else fprintf(stdout, "%s\n", result[ret]); } if (test->legacyirq) { ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0); fprintf(stdout, "LEGACY IRQ:\t"); if (ret < 0) fprintf(stdout, "TEST FAILED\n"); else fprintf(stdout, "%s\n", result[ret]); } if (test->msinum > 0 && test->msinum <= 32) { ret = ioctl(fd, PCITEST_MSI, test->msinum); fprintf(stdout, "MSI%d:\t\t", test->msinum); if (ret < 0) fprintf(stdout, "TEST FAILED\n"); else fprintf(stdout, "%s\n", result[ret]); } if (test->msixnum > 0 && test->msixnum <= 2048) { ret = ioctl(fd, PCITEST_MSIX, test->msixnum); fprintf(stdout, "MSI-X%d:\t\t", test->msixnum); if (ret < 0) fprintf(stdout, "TEST FAILED\n"); else fprintf(stdout, "%s\n", result[ret]); } if (test->write) { param.size = test->size; if (test->use_dma) param.flags = PCITEST_FLAGS_USE_DMA; ret = ioctl(fd, PCITEST_WRITE, &param); fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size); if (ret < 0) fprintf(stdout, "TEST FAILED\n"); else fprintf(stdout, "%s\n", result[ret]); } if (test->read) { param.size = test->size; if (test->use_dma) param.flags = PCITEST_FLAGS_USE_DMA; ret = ioctl(fd, PCITEST_READ, &param); fprintf(stdout, "READ (%7ld bytes):\t\t", test->size); if (ret < 0) fprintf(stdout, "TEST FAILED\n"); else fprintf(stdout, "%s\n", result[ret]); } if (test->copy) { param.size = test->size; if (test->use_dma) param.flags = PCITEST_FLAGS_USE_DMA; ret = ioctl(fd, PCITEST_COPY, &param); fprintf(stdout, "COPY (%7ld bytes):\t\t", test->size); if (ret < 0) fprintf(stdout, "TEST FAILED\n"); else fprintf(stdout, "%s\n", result[ret]); } fflush(stdout); close(fd); return (ret < 0) ? ret : 1 - ret; /* return 0 if test succeeded */ } int main(int argc, char **argv) { int c; struct pci_test *test; test = calloc(1, sizeof(*test)); if (!test) { perror("Fail to allocate memory for pci_test\n"); return -ENOMEM; } /* since '0' is a valid BAR number, initialize it to -1 */ test->barnum = -1; /* set default size as 100KB */ test->size = 0x19000; /* set default endpoint device */ test->device = "/dev/pci-endpoint-test.0"; while ((c = getopt(argc, argv, "D:b:m:x:i:deIlhrwcs:")) != EOF) switch (c) { case 'D': test->device = optarg; continue; case 'b': test->barnum = atoi(optarg); if (test->barnum < 0 || test->barnum > 5) goto usage; continue; case 'l': test->legacyirq = true; continue; case 'm': test->msinum = atoi(optarg); if (test->msinum < 1 || test->msinum > 32) goto usage; continue; case 'x': test->msixnum = atoi(optarg); if (test->msixnum < 1 || test->msixnum > 2048) goto usage; continue; case 'i': test->irqtype = atoi(optarg); if (test->irqtype < 0 || test->irqtype > 2) goto usage; test->set_irqtype = true; continue; case 'I': test->get_irqtype = true; continue; case 'r': test->read = true; continue; case 'w': test->write = true; continue; case 'c': test->copy = true; continue; case 'e': test->clear_irq = true; continue; case 's': test->size = strtoul(optarg, NULL, 0); continue; case 'd': test->use_dma = true; continue; case 'h': default: usage: fprintf(stderr, "usage: %s [options]\n" "Options:\n" "\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n" "\t-b <bar num> BAR test (bar number between 0..5)\n" "\t-m <msi num> MSI test (msi number between 1..32)\n" "\t-x <msix num> \tMSI-X test (msix number between 1..2048)\n" "\t-i <irq type> \tSet IRQ type (0 - Legacy, 1 - MSI, 2 - MSI-X)\n" "\t-e Clear IRQ\n" "\t-I Get current IRQ type configured\n" "\t-d Use DMA\n" "\t-l Legacy IRQ test\n" "\t-r Read buffer test\n" "\t-w Write buffer test\n" "\t-c Copy buffer test\n" "\t-s <size> Size of buffer {default: 100KB}\n" "\t-h Print this help message\n", argv[0]); return -EINVAL; } return run_test(test); }
linux-master
tools/pci/pcitest.c
// SPDX-License-Identifier: GPL-2.0-only /* * Parser/loader for IHEX formatted data. * * Copyright © 2008 David Woodhouse <[email protected]> * Copyright © 2005 Jan Harkes <[email protected]> */ #include <stdint.h> #include <arpa/inet.h> #include <stdio.h> #include <errno.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <fcntl.h> #include <string.h> #include <unistd.h> #include <stdlib.h> #define _GNU_SOURCE #include <getopt.h> #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) struct ihex_binrec { struct ihex_binrec *next; /* not part of the real data structure */ uint32_t addr; uint16_t len; uint8_t data[]; }; /** * nybble/hex are little helpers to parse hexadecimal numbers to a byte value **/ static uint8_t nybble(const uint8_t n) { if (n >= '0' && n <= '9') return n - '0'; else if (n >= 'A' && n <= 'F') return n - ('A' - 10); else if (n >= 'a' && n <= 'f') return n - ('a' - 10); return 0; } static uint8_t hex(const uint8_t *data, uint8_t *crc) { uint8_t val = (nybble(data[0]) << 4) | nybble(data[1]); *crc += val; return val; } static int process_ihex(uint8_t *data, ssize_t size); static void file_record(struct ihex_binrec *record); static int output_records(int outfd); static int sort_records = 0; static int wide_records = 0; static int include_jump = 0; static int usage(void) { fprintf(stderr, "ihex2fw: Convert ihex files into binary " "representation for use by Linux kernel\n"); fprintf(stderr, "usage: ihex2fw [<options>] <src.HEX> <dst.fw>\n"); fprintf(stderr, " -w: wide records (16-bit length)\n"); fprintf(stderr, " -s: sort records by address\n"); fprintf(stderr, " -j: include records for CS:IP/EIP address\n"); return 1; } int main(int argc, char **argv) { int infd, outfd; struct stat st; uint8_t *data; int opt; while ((opt = getopt(argc, argv, "wsj")) != -1) { switch (opt) { case 'w': wide_records = 1; break; case 's': sort_records = 1; break; case 'j': include_jump = 1; break; default: return usage(); } } if (optind + 2 != argc) return usage(); if (!strcmp(argv[optind], "-")) infd = 0; else infd = open(argv[optind], O_RDONLY); if (infd == -1) { fprintf(stderr, "Failed to open source file: %s", strerror(errno)); return usage(); } if (fstat(infd, &st)) { perror("stat"); return 1; } data = mmap(NULL, st.st_size, PROT_READ, MAP_SHARED, infd, 0); if (data == MAP_FAILED) { perror("mmap"); return 1; } if (!strcmp(argv[optind+1], "-")) outfd = 1; else outfd = open(argv[optind+1], O_TRUNC|O_CREAT|O_WRONLY, 0644); if (outfd == -1) { fprintf(stderr, "Failed to open destination file: %s", strerror(errno)); return usage(); } if (process_ihex(data, st.st_size)) return 1; return output_records(outfd); } static int process_ihex(uint8_t *data, ssize_t size) { struct ihex_binrec *record; size_t record_size; uint32_t offset = 0; uint32_t data32; uint8_t type, crc = 0, crcbyte = 0; int i, j; int line = 1; int len; i = 0; next_record: /* search for the start of record character */ while (i < size) { if (data[i] == '\n') line++; if (data[i++] == ':') break; } /* Minimum record length would be about 10 characters */ if (i + 10 > size) { fprintf(stderr, "Can't find valid record at line %d\n", line); return -EINVAL; } len = hex(data + i, &crc); i += 2; if (wide_records) { len <<= 8; len += hex(data + i, &crc); i += 2; } record_size = ALIGN(sizeof(*record) + len, 4); record = malloc(record_size); if (!record) { fprintf(stderr, "out of memory for records\n"); return -ENOMEM; } memset(record, 0, record_size); record->len = len; /* now check if we have enough data to read everything */ if (i + 8 + (record->len * 2) > size) { fprintf(stderr, "Not enough data to read complete record at line %d\n", line); return -EINVAL; } record->addr = hex(data + i, &crc) << 8; i += 2; record->addr |= hex(data + i, &crc); i += 2; type = hex(data + i, &crc); i += 2; for (j = 0; j < record->len; j++, i += 2) record->data[j] = hex(data + i, &crc); /* check CRC */ crcbyte = hex(data + i, &crc); i += 2; if (crc != 0) { fprintf(stderr, "CRC failure at line %d: got 0x%X, expected 0x%X\n", line, crcbyte, (unsigned char)(crcbyte-crc)); return -EINVAL; } /* Done reading the record */ switch (type) { case 0: /* old style EOF record? */ if (!record->len) break; record->addr += offset; file_record(record); goto next_record; case 1: /* End-Of-File Record */ if (record->addr || record->len) { fprintf(stderr, "Bad EOF record (type 01) format at line %d", line); return -EINVAL; } break; case 2: /* Extended Segment Address Record (HEX86) */ case 4: /* Extended Linear Address Record (HEX386) */ if (record->addr || record->len != 2) { fprintf(stderr, "Bad HEX86/HEX386 record (type %02X) at line %d\n", type, line); return -EINVAL; } /* We shouldn't really be using the offset for HEX86 because * the wraparound case is specified quite differently. */ offset = record->data[0] << 8 | record->data[1]; offset <<= (type == 2 ? 4 : 16); goto next_record; case 3: /* Start Segment Address Record */ case 5: /* Start Linear Address Record */ if (record->addr || record->len != 4) { fprintf(stderr, "Bad Start Address record (type %02X) at line %d\n", type, line); return -EINVAL; } memcpy(&data32, &record->data[0], sizeof(data32)); data32 = htonl(data32); memcpy(&record->data[0], &data32, sizeof(data32)); /* These records contain the CS/IP or EIP where execution * starts. If requested output this as a record. */ if (include_jump) file_record(record); goto next_record; default: fprintf(stderr, "Unknown record (type %02X)\n", type); return -EINVAL; } return 0; } static struct ihex_binrec *records; static void file_record(struct ihex_binrec *record) { struct ihex_binrec **p = &records; while ((*p) && (!sort_records || (*p)->addr < record->addr)) p = &((*p)->next); record->next = *p; *p = record; } static uint16_t ihex_binrec_size(struct ihex_binrec *p) { return p->len + sizeof(p->addr) + sizeof(p->len); } static int output_records(int outfd) { unsigned char zeroes[6] = {0, 0, 0, 0, 0, 0}; struct ihex_binrec *p = records; while (p) { uint16_t writelen = ALIGN(ihex_binrec_size(p), 4); p->addr = htonl(p->addr); p->len = htons(p->len); if (write(outfd, &p->addr, writelen) != writelen) return 1; p = p->next; } /* EOF record is zero length, since we don't bother to represent the type field in the binary version */ if (write(outfd, zeroes, 6) != 6) return 1; return 0; }
linux-master
tools/firmware/ihex2fw.c
// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/fou.yaml */ /* YNL-GEN user source */ #include <stdlib.h> #include <string.h> #include "fou-user.h" #include "ynl.h" #include <linux/fou.h> #include <libmnl/libmnl.h> #include <linux/genetlink.h> /* Enums */ static const char * const fou_op_strmap[] = { [FOU_CMD_ADD] = "add", [FOU_CMD_DEL] = "del", [FOU_CMD_GET] = "get", }; const char *fou_op_str(int op) { if (op < 0 || op >= (int)MNL_ARRAY_SIZE(fou_op_strmap)) return NULL; return fou_op_strmap[op]; } static const char * const fou_encap_type_strmap[] = { [0] = "unspec", [1] = "direct", [2] = "gue", }; const char *fou_encap_type_str(int value) { if (value < 0 || value >= (int)MNL_ARRAY_SIZE(fou_encap_type_strmap)) return NULL; return fou_encap_type_strmap[value]; } /* Policies */ struct ynl_policy_attr fou_policy[FOU_ATTR_MAX + 1] = { [FOU_ATTR_UNSPEC] = { .name = "unspec", .type = YNL_PT_REJECT, }, [FOU_ATTR_PORT] = { .name = "port", .type = YNL_PT_U16, }, [FOU_ATTR_AF] = { .name = "af", .type = YNL_PT_U8, }, [FOU_ATTR_IPPROTO] = { .name = "ipproto", .type = YNL_PT_U8, }, [FOU_ATTR_TYPE] = { .name = "type", .type = YNL_PT_U8, }, [FOU_ATTR_REMCSUM_NOPARTIAL] = { .name = "remcsum_nopartial", .type = YNL_PT_FLAG, }, [FOU_ATTR_LOCAL_V4] = { .name = "local_v4", .type = YNL_PT_U32, }, [FOU_ATTR_LOCAL_V6] = { .name = "local_v6", .type = YNL_PT_BINARY,}, [FOU_ATTR_PEER_V4] = { .name = "peer_v4", .type = YNL_PT_U32, }, [FOU_ATTR_PEER_V6] = { .name = "peer_v6", .type = YNL_PT_BINARY,}, [FOU_ATTR_PEER_PORT] = { .name = "peer_port", .type = YNL_PT_U16, }, [FOU_ATTR_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, }, }; struct ynl_policy_nest fou_nest = { .max_attr = FOU_ATTR_MAX, .table = fou_policy, }; /* Common nested types */ /* ============== FOU_CMD_ADD ============== */ /* FOU_CMD_ADD - do */ void fou_add_req_free(struct fou_add_req *req) { free(req->local_v6); free(req->peer_v6); free(req); } int fou_add(struct ynl_sock *ys, struct fou_add_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, FOU_CMD_ADD, 1); ys->req_policy = &fou_nest; if (req->_present.port) mnl_attr_put_u16(nlh, FOU_ATTR_PORT, req->port); if (req->_present.ipproto) mnl_attr_put_u8(nlh, FOU_ATTR_IPPROTO, req->ipproto); if (req->_present.type) mnl_attr_put_u8(nlh, FOU_ATTR_TYPE, req->type); if (req->_present.remcsum_nopartial) mnl_attr_put(nlh, FOU_ATTR_REMCSUM_NOPARTIAL, 0, NULL); if (req->_present.local_v4) mnl_attr_put_u32(nlh, FOU_ATTR_LOCAL_V4, req->local_v4); if (req->_present.peer_v4) mnl_attr_put_u32(nlh, FOU_ATTR_PEER_V4, req->peer_v4); if (req->_present.local_v6_len) mnl_attr_put(nlh, FOU_ATTR_LOCAL_V6, req->_present.local_v6_len, req->local_v6); if (req->_present.peer_v6_len) mnl_attr_put(nlh, FOU_ATTR_PEER_V6, req->_present.peer_v6_len, req->peer_v6); if (req->_present.peer_port) mnl_attr_put_u16(nlh, FOU_ATTR_PEER_PORT, req->peer_port); if (req->_present.ifindex) mnl_attr_put_u32(nlh, FOU_ATTR_IFINDEX, req->ifindex); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== FOU_CMD_DEL ============== */ /* FOU_CMD_DEL - do */ void fou_del_req_free(struct fou_del_req *req) { free(req->local_v6); free(req->peer_v6); free(req); } int fou_del(struct ynl_sock *ys, struct fou_del_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, FOU_CMD_DEL, 1); ys->req_policy = &fou_nest; if (req->_present.af) mnl_attr_put_u8(nlh, FOU_ATTR_AF, req->af); if (req->_present.ifindex) mnl_attr_put_u32(nlh, FOU_ATTR_IFINDEX, req->ifindex); if (req->_present.port) mnl_attr_put_u16(nlh, FOU_ATTR_PORT, req->port); if (req->_present.peer_port) mnl_attr_put_u16(nlh, FOU_ATTR_PEER_PORT, req->peer_port); if (req->_present.local_v4) mnl_attr_put_u32(nlh, FOU_ATTR_LOCAL_V4, req->local_v4); if (req->_present.peer_v4) mnl_attr_put_u32(nlh, FOU_ATTR_PEER_V4, req->peer_v4); if (req->_present.local_v6_len) mnl_attr_put(nlh, FOU_ATTR_LOCAL_V6, req->_present.local_v6_len, req->local_v6); if (req->_present.peer_v6_len) mnl_attr_put(nlh, FOU_ATTR_PEER_V6, req->_present.peer_v6_len, req->peer_v6); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== FOU_CMD_GET ============== */ /* FOU_CMD_GET - do */ void fou_get_req_free(struct fou_get_req *req) { free(req->local_v6); free(req->peer_v6); free(req); } void fou_get_rsp_free(struct fou_get_rsp *rsp) { free(rsp->local_v6); free(rsp->peer_v6); free(rsp); } int fou_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct fou_get_rsp *dst; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == FOU_ATTR_PORT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port = 1; dst->port = mnl_attr_get_u16(attr); } else if (type == FOU_ATTR_IPPROTO) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.ipproto = 1; dst->ipproto = mnl_attr_get_u8(attr); } else if (type == FOU_ATTR_TYPE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.type = 1; dst->type = mnl_attr_get_u8(attr); } else if (type == FOU_ATTR_REMCSUM_NOPARTIAL) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.remcsum_nopartial = 1; } else if (type == FOU_ATTR_LOCAL_V4) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.local_v4 = 1; dst->local_v4 = mnl_attr_get_u32(attr); } else if (type == FOU_ATTR_PEER_V4) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.peer_v4 = 1; dst->peer_v4 = mnl_attr_get_u32(attr); } else if (type == FOU_ATTR_LOCAL_V6) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = mnl_attr_get_payload_len(attr); dst->_present.local_v6_len = len; dst->local_v6 = malloc(len); memcpy(dst->local_v6, mnl_attr_get_payload(attr), len); } else if (type == FOU_ATTR_PEER_V6) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = mnl_attr_get_payload_len(attr); dst->_present.peer_v6_len = len; dst->peer_v6 = malloc(len); memcpy(dst->peer_v6, mnl_attr_get_payload(attr), len); } else if (type == FOU_ATTR_PEER_PORT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.peer_port = 1; dst->peer_port = mnl_attr_get_u16(attr); } else if (type == FOU_ATTR_IFINDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.ifindex = 1; dst->ifindex = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct fou_get_rsp *fou_get(struct ynl_sock *ys, struct fou_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct fou_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, FOU_CMD_GET, 1); ys->req_policy = &fou_nest; yrs.yarg.rsp_policy = &fou_nest; if (req->_present.af) mnl_attr_put_u8(nlh, FOU_ATTR_AF, req->af); if (req->_present.ifindex) mnl_attr_put_u32(nlh, FOU_ATTR_IFINDEX, req->ifindex); if (req->_present.port) mnl_attr_put_u16(nlh, FOU_ATTR_PORT, req->port); if (req->_present.peer_port) mnl_attr_put_u16(nlh, FOU_ATTR_PEER_PORT, req->peer_port); if (req->_present.local_v4) mnl_attr_put_u32(nlh, FOU_ATTR_LOCAL_V4, req->local_v4); if (req->_present.peer_v4) mnl_attr_put_u32(nlh, FOU_ATTR_PEER_V4, req->peer_v4); if (req->_present.local_v6_len) mnl_attr_put(nlh, FOU_ATTR_LOCAL_V6, req->_present.local_v6_len, req->local_v6); if (req->_present.peer_v6_len) mnl_attr_put(nlh, FOU_ATTR_PEER_V6, req->_present.peer_v6_len, req->peer_v6); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = fou_get_rsp_parse; yrs.rsp_cmd = FOU_CMD_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: fou_get_rsp_free(rsp); return NULL; } /* FOU_CMD_GET - dump */ void fou_get_list_free(struct fou_get_list *rsp) { struct fou_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.local_v6); free(rsp->obj.peer_v6); free(rsp); } } struct fou_get_list *fou_get_dump(struct ynl_sock *ys) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct fou_get_list); yds.cb = fou_get_rsp_parse; yds.rsp_cmd = FOU_CMD_GET; yds.rsp_policy = &fou_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, FOU_CMD_GET, 1); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: fou_get_list_free(yds.first); return NULL; } const struct ynl_family ynl_fou_family = { .name = "fou", };
linux-master
tools/net/ynl/generated/fou-user.c
// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/ethtool.yaml */ /* YNL-GEN user source */ /* YNL-ARG --user-header linux/ethtool_netlink.h --exclude-op stats-get */ #include <stdlib.h> #include <string.h> #include "ethtool-user.h" #include "ynl.h" #include <linux/ethtool.h> #include <libmnl/libmnl.h> #include <linux/genetlink.h> #include "linux/ethtool_netlink.h" /* Enums */ static const char * const ethtool_op_strmap[] = { [ETHTOOL_MSG_STRSET_GET] = "strset-get", [ETHTOOL_MSG_LINKINFO_GET] = "linkinfo-get", [3] = "linkinfo-ntf", [ETHTOOL_MSG_LINKMODES_GET] = "linkmodes-get", [5] = "linkmodes-ntf", [ETHTOOL_MSG_LINKSTATE_GET] = "linkstate-get", [ETHTOOL_MSG_DEBUG_GET] = "debug-get", [8] = "debug-ntf", [ETHTOOL_MSG_WOL_GET] = "wol-get", [10] = "wol-ntf", [ETHTOOL_MSG_FEATURES_GET] = "features-get", [ETHTOOL_MSG_FEATURES_SET] = "features-set", [13] = "features-ntf", [14] = "privflags-get", [15] = "privflags-ntf", [16] = "rings-get", [17] = "rings-ntf", [18] = "channels-get", [19] = "channels-ntf", [20] = "coalesce-get", [21] = "coalesce-ntf", [22] = "pause-get", [23] = "pause-ntf", [24] = "eee-get", [25] = "eee-ntf", [26] = "tsinfo-get", [27] = "cable-test-ntf", [28] = "cable-test-tdr-ntf", [29] = "tunnel-info-get", [30] = "fec-get", [31] = "fec-ntf", [32] = "module-eeprom-get", [34] = "phc-vclocks-get", [35] = "module-get", [36] = "module-ntf", [37] = "pse-get", [ETHTOOL_MSG_RSS_GET] = "rss-get", [ETHTOOL_MSG_PLCA_GET_CFG] = "plca-get-cfg", [40] = "plca-get-status", [41] = "plca-ntf", [ETHTOOL_MSG_MM_GET] = "mm-get", [43] = "mm-ntf", }; const char *ethtool_op_str(int op) { if (op < 0 || op >= (int)MNL_ARRAY_SIZE(ethtool_op_strmap)) return NULL; return ethtool_op_strmap[op]; } static const char * const ethtool_udp_tunnel_type_strmap[] = { [0] = "vxlan", [1] = "geneve", [2] = "vxlan-gpe", }; const char *ethtool_udp_tunnel_type_str(int value) { if (value < 0 || value >= (int)MNL_ARRAY_SIZE(ethtool_udp_tunnel_type_strmap)) return NULL; return ethtool_udp_tunnel_type_strmap[value]; } static const char * const ethtool_stringset_strmap[] = { }; const char *ethtool_stringset_str(enum ethtool_stringset value) { if (value < 0 || value >= (int)MNL_ARRAY_SIZE(ethtool_stringset_strmap)) return NULL; return ethtool_stringset_strmap[value]; } /* Policies */ struct ynl_policy_attr ethtool_header_policy[ETHTOOL_A_HEADER_MAX + 1] = { [ETHTOOL_A_HEADER_DEV_INDEX] = { .name = "dev-index", .type = YNL_PT_U32, }, [ETHTOOL_A_HEADER_DEV_NAME] = { .name = "dev-name", .type = YNL_PT_NUL_STR, }, [ETHTOOL_A_HEADER_FLAGS] = { .name = "flags", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_header_nest = { .max_attr = ETHTOOL_A_HEADER_MAX, .table = ethtool_header_policy, }; struct ynl_policy_attr ethtool_pause_stat_policy[ETHTOOL_A_PAUSE_STAT_MAX + 1] = { [ETHTOOL_A_PAUSE_STAT_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, }, [ETHTOOL_A_PAUSE_STAT_TX_FRAMES] = { .name = "tx-frames", .type = YNL_PT_U64, }, [ETHTOOL_A_PAUSE_STAT_RX_FRAMES] = { .name = "rx-frames", .type = YNL_PT_U64, }, }; struct ynl_policy_nest ethtool_pause_stat_nest = { .max_attr = ETHTOOL_A_PAUSE_STAT_MAX, .table = ethtool_pause_stat_policy, }; struct ynl_policy_attr ethtool_cable_test_tdr_cfg_policy[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1] = { [ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST] = { .name = "first", .type = YNL_PT_U32, }, [ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST] = { .name = "last", .type = YNL_PT_U32, }, [ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP] = { .name = "step", .type = YNL_PT_U32, }, [ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR] = { .name = "pair", .type = YNL_PT_U8, }, }; struct ynl_policy_nest ethtool_cable_test_tdr_cfg_nest = { .max_attr = ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX, .table = ethtool_cable_test_tdr_cfg_policy, }; struct ynl_policy_attr ethtool_fec_stat_policy[ETHTOOL_A_FEC_STAT_MAX + 1] = { [ETHTOOL_A_FEC_STAT_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, }, [ETHTOOL_A_FEC_STAT_CORRECTED] = { .name = "corrected", .type = YNL_PT_BINARY,}, [ETHTOOL_A_FEC_STAT_UNCORR] = { .name = "uncorr", .type = YNL_PT_BINARY,}, [ETHTOOL_A_FEC_STAT_CORR_BITS] = { .name = "corr-bits", .type = YNL_PT_BINARY,}, }; struct ynl_policy_nest ethtool_fec_stat_nest = { .max_attr = ETHTOOL_A_FEC_STAT_MAX, .table = ethtool_fec_stat_policy, }; struct ynl_policy_attr ethtool_mm_stat_policy[ETHTOOL_A_MM_STAT_MAX + 1] = { [ETHTOOL_A_MM_STAT_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, }, [ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS] = { .name = "reassembly-errors", .type = YNL_PT_U64, }, [ETHTOOL_A_MM_STAT_SMD_ERRORS] = { .name = "smd-errors", .type = YNL_PT_U64, }, [ETHTOOL_A_MM_STAT_REASSEMBLY_OK] = { .name = "reassembly-ok", .type = YNL_PT_U64, }, [ETHTOOL_A_MM_STAT_RX_FRAG_COUNT] = { .name = "rx-frag-count", .type = YNL_PT_U64, }, [ETHTOOL_A_MM_STAT_TX_FRAG_COUNT] = { .name = "tx-frag-count", .type = YNL_PT_U64, }, [ETHTOOL_A_MM_STAT_HOLD_COUNT] = { .name = "hold-count", .type = YNL_PT_U64, }, }; struct ynl_policy_nest ethtool_mm_stat_nest = { .max_attr = ETHTOOL_A_MM_STAT_MAX, .table = ethtool_mm_stat_policy, }; struct ynl_policy_attr ethtool_cable_result_policy[ETHTOOL_A_CABLE_RESULT_MAX + 1] = { [ETHTOOL_A_CABLE_RESULT_PAIR] = { .name = "pair", .type = YNL_PT_U8, }, [ETHTOOL_A_CABLE_RESULT_CODE] = { .name = "code", .type = YNL_PT_U8, }, }; struct ynl_policy_nest ethtool_cable_result_nest = { .max_attr = ETHTOOL_A_CABLE_RESULT_MAX, .table = ethtool_cable_result_policy, }; struct ynl_policy_attr ethtool_cable_fault_length_policy[ETHTOOL_A_CABLE_FAULT_LENGTH_MAX + 1] = { [ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR] = { .name = "pair", .type = YNL_PT_U8, }, [ETHTOOL_A_CABLE_FAULT_LENGTH_CM] = { .name = "cm", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_cable_fault_length_nest = { .max_attr = ETHTOOL_A_CABLE_FAULT_LENGTH_MAX, .table = ethtool_cable_fault_length_policy, }; struct ynl_policy_attr ethtool_bitset_bit_policy[ETHTOOL_A_BITSET_BIT_MAX + 1] = { [ETHTOOL_A_BITSET_BIT_INDEX] = { .name = "index", .type = YNL_PT_U32, }, [ETHTOOL_A_BITSET_BIT_NAME] = { .name = "name", .type = YNL_PT_NUL_STR, }, [ETHTOOL_A_BITSET_BIT_VALUE] = { .name = "value", .type = YNL_PT_FLAG, }, }; struct ynl_policy_nest ethtool_bitset_bit_nest = { .max_attr = ETHTOOL_A_BITSET_BIT_MAX, .table = ethtool_bitset_bit_policy, }; struct ynl_policy_attr ethtool_tunnel_udp_entry_policy[ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX + 1] = { [ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT] = { .name = "port", .type = YNL_PT_U16, }, [ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE] = { .name = "type", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_tunnel_udp_entry_nest = { .max_attr = ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX, .table = ethtool_tunnel_udp_entry_policy, }; struct ynl_policy_attr ethtool_string_policy[ETHTOOL_A_STRING_MAX + 1] = { [ETHTOOL_A_STRING_INDEX] = { .name = "index", .type = YNL_PT_U32, }, [ETHTOOL_A_STRING_VALUE] = { .name = "value", .type = YNL_PT_NUL_STR, }, }; struct ynl_policy_nest ethtool_string_nest = { .max_attr = ETHTOOL_A_STRING_MAX, .table = ethtool_string_policy, }; struct ynl_policy_attr ethtool_cable_nest_policy[ETHTOOL_A_CABLE_NEST_MAX + 1] = { [ETHTOOL_A_CABLE_NEST_RESULT] = { .name = "result", .type = YNL_PT_NEST, .nest = &ethtool_cable_result_nest, }, [ETHTOOL_A_CABLE_NEST_FAULT_LENGTH] = { .name = "fault-length", .type = YNL_PT_NEST, .nest = &ethtool_cable_fault_length_nest, }, }; struct ynl_policy_nest ethtool_cable_nest_nest = { .max_attr = ETHTOOL_A_CABLE_NEST_MAX, .table = ethtool_cable_nest_policy, }; struct ynl_policy_attr ethtool_bitset_bits_policy[ETHTOOL_A_BITSET_BITS_MAX + 1] = { [ETHTOOL_A_BITSET_BITS_BIT] = { .name = "bit", .type = YNL_PT_NEST, .nest = &ethtool_bitset_bit_nest, }, }; struct ynl_policy_nest ethtool_bitset_bits_nest = { .max_attr = ETHTOOL_A_BITSET_BITS_MAX, .table = ethtool_bitset_bits_policy, }; struct ynl_policy_attr ethtool_strings_policy[ETHTOOL_A_STRINGS_MAX + 1] = { [ETHTOOL_A_STRINGS_STRING] = { .name = "string", .type = YNL_PT_NEST, .nest = &ethtool_string_nest, }, }; struct ynl_policy_nest ethtool_strings_nest = { .max_attr = ETHTOOL_A_STRINGS_MAX, .table = ethtool_strings_policy, }; struct ynl_policy_attr ethtool_bitset_policy[ETHTOOL_A_BITSET_MAX + 1] = { [ETHTOOL_A_BITSET_NOMASK] = { .name = "nomask", .type = YNL_PT_FLAG, }, [ETHTOOL_A_BITSET_SIZE] = { .name = "size", .type = YNL_PT_U32, }, [ETHTOOL_A_BITSET_BITS] = { .name = "bits", .type = YNL_PT_NEST, .nest = &ethtool_bitset_bits_nest, }, }; struct ynl_policy_nest ethtool_bitset_nest = { .max_attr = ETHTOOL_A_BITSET_MAX, .table = ethtool_bitset_policy, }; struct ynl_policy_attr ethtool_stringset_policy[ETHTOOL_A_STRINGSET_MAX + 1] = { [ETHTOOL_A_STRINGSET_ID] = { .name = "id", .type = YNL_PT_U32, }, [ETHTOOL_A_STRINGSET_COUNT] = { .name = "count", .type = YNL_PT_U32, }, [ETHTOOL_A_STRINGSET_STRINGS] = { .name = "strings", .type = YNL_PT_NEST, .nest = &ethtool_strings_nest, }, }; struct ynl_policy_nest ethtool_stringset_nest = { .max_attr = ETHTOOL_A_STRINGSET_MAX, .table = ethtool_stringset_policy, }; struct ynl_policy_attr ethtool_tunnel_udp_table_policy[ETHTOOL_A_TUNNEL_UDP_TABLE_MAX + 1] = { [ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE] = { .name = "size", .type = YNL_PT_U32, }, [ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES] = { .name = "types", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY] = { .name = "entry", .type = YNL_PT_NEST, .nest = &ethtool_tunnel_udp_entry_nest, }, }; struct ynl_policy_nest ethtool_tunnel_udp_table_nest = { .max_attr = ETHTOOL_A_TUNNEL_UDP_TABLE_MAX, .table = ethtool_tunnel_udp_table_policy, }; struct ynl_policy_attr ethtool_stringsets_policy[ETHTOOL_A_STRINGSETS_MAX + 1] = { [ETHTOOL_A_STRINGSETS_STRINGSET] = { .name = "stringset", .type = YNL_PT_NEST, .nest = &ethtool_stringset_nest, }, }; struct ynl_policy_nest ethtool_stringsets_nest = { .max_attr = ETHTOOL_A_STRINGSETS_MAX, .table = ethtool_stringsets_policy, }; struct ynl_policy_attr ethtool_tunnel_udp_policy[ETHTOOL_A_TUNNEL_UDP_MAX + 1] = { [ETHTOOL_A_TUNNEL_UDP_TABLE] = { .name = "table", .type = YNL_PT_NEST, .nest = &ethtool_tunnel_udp_table_nest, }, }; struct ynl_policy_nest ethtool_tunnel_udp_nest = { .max_attr = ETHTOOL_A_TUNNEL_UDP_MAX, .table = ethtool_tunnel_udp_policy, }; struct ynl_policy_attr ethtool_strset_policy[ETHTOOL_A_STRSET_MAX + 1] = { [ETHTOOL_A_STRSET_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_STRSET_STRINGSETS] = { .name = "stringsets", .type = YNL_PT_NEST, .nest = &ethtool_stringsets_nest, }, [ETHTOOL_A_STRSET_COUNTS_ONLY] = { .name = "counts-only", .type = YNL_PT_FLAG, }, }; struct ynl_policy_nest ethtool_strset_nest = { .max_attr = ETHTOOL_A_STRSET_MAX, .table = ethtool_strset_policy, }; struct ynl_policy_attr ethtool_linkinfo_policy[ETHTOOL_A_LINKINFO_MAX + 1] = { [ETHTOOL_A_LINKINFO_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_LINKINFO_PORT] = { .name = "port", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKINFO_PHYADDR] = { .name = "phyaddr", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKINFO_TP_MDIX] = { .name = "tp-mdix", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKINFO_TP_MDIX_CTRL] = { .name = "tp-mdix-ctrl", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKINFO_TRANSCEIVER] = { .name = "transceiver", .type = YNL_PT_U8, }, }; struct ynl_policy_nest ethtool_linkinfo_nest = { .max_attr = ETHTOOL_A_LINKINFO_MAX, .table = ethtool_linkinfo_policy, }; struct ynl_policy_attr ethtool_linkmodes_policy[ETHTOOL_A_LINKMODES_MAX + 1] = { [ETHTOOL_A_LINKMODES_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_LINKMODES_AUTONEG] = { .name = "autoneg", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKMODES_OURS] = { .name = "ours", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_LINKMODES_PEER] = { .name = "peer", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_LINKMODES_SPEED] = { .name = "speed", .type = YNL_PT_U32, }, [ETHTOOL_A_LINKMODES_DUPLEX] = { .name = "duplex", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG] = { .name = "master-slave-cfg", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE] = { .name = "master-slave-state", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKMODES_LANES] = { .name = "lanes", .type = YNL_PT_U32, }, [ETHTOOL_A_LINKMODES_RATE_MATCHING] = { .name = "rate-matching", .type = YNL_PT_U8, }, }; struct ynl_policy_nest ethtool_linkmodes_nest = { .max_attr = ETHTOOL_A_LINKMODES_MAX, .table = ethtool_linkmodes_policy, }; struct ynl_policy_attr ethtool_linkstate_policy[ETHTOOL_A_LINKSTATE_MAX + 1] = { [ETHTOOL_A_LINKSTATE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_LINKSTATE_LINK] = { .name = "link", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKSTATE_SQI] = { .name = "sqi", .type = YNL_PT_U32, }, [ETHTOOL_A_LINKSTATE_SQI_MAX] = { .name = "sqi-max", .type = YNL_PT_U32, }, [ETHTOOL_A_LINKSTATE_EXT_STATE] = { .name = "ext-state", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKSTATE_EXT_SUBSTATE] = { .name = "ext-substate", .type = YNL_PT_U8, }, [ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT] = { .name = "ext-down-cnt", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_linkstate_nest = { .max_attr = ETHTOOL_A_LINKSTATE_MAX, .table = ethtool_linkstate_policy, }; struct ynl_policy_attr ethtool_debug_policy[ETHTOOL_A_DEBUG_MAX + 1] = { [ETHTOOL_A_DEBUG_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_DEBUG_MSGMASK] = { .name = "msgmask", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, }; struct ynl_policy_nest ethtool_debug_nest = { .max_attr = ETHTOOL_A_DEBUG_MAX, .table = ethtool_debug_policy, }; struct ynl_policy_attr ethtool_wol_policy[ETHTOOL_A_WOL_MAX + 1] = { [ETHTOOL_A_WOL_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_WOL_MODES] = { .name = "modes", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_WOL_SOPASS] = { .name = "sopass", .type = YNL_PT_BINARY,}, }; struct ynl_policy_nest ethtool_wol_nest = { .max_attr = ETHTOOL_A_WOL_MAX, .table = ethtool_wol_policy, }; struct ynl_policy_attr ethtool_features_policy[ETHTOOL_A_FEATURES_MAX + 1] = { [ETHTOOL_A_FEATURES_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_FEATURES_HW] = { .name = "hw", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_FEATURES_WANTED] = { .name = "wanted", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_FEATURES_ACTIVE] = { .name = "active", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_FEATURES_NOCHANGE] = { .name = "nochange", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, }; struct ynl_policy_nest ethtool_features_nest = { .max_attr = ETHTOOL_A_FEATURES_MAX, .table = ethtool_features_policy, }; struct ynl_policy_attr ethtool_privflags_policy[ETHTOOL_A_PRIVFLAGS_MAX + 1] = { [ETHTOOL_A_PRIVFLAGS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_PRIVFLAGS_FLAGS] = { .name = "flags", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, }; struct ynl_policy_nest ethtool_privflags_nest = { .max_attr = ETHTOOL_A_PRIVFLAGS_MAX, .table = ethtool_privflags_policy, }; struct ynl_policy_attr ethtool_rings_policy[ETHTOOL_A_RINGS_MAX + 1] = { [ETHTOOL_A_RINGS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_RINGS_RX_MAX] = { .name = "rx-max", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .name = "rx-mini-max", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .name = "rx-jumbo-max", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_TX_MAX] = { .name = "tx-max", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_RX] = { .name = "rx", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_RX_MINI] = { .name = "rx-mini", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_RX_JUMBO] = { .name = "rx-jumbo", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_TX] = { .name = "tx", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_RX_BUF_LEN] = { .name = "rx-buf-len", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_TCP_DATA_SPLIT] = { .name = "tcp-data-split", .type = YNL_PT_U8, }, [ETHTOOL_A_RINGS_CQE_SIZE] = { .name = "cqe-size", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_TX_PUSH] = { .name = "tx-push", .type = YNL_PT_U8, }, [ETHTOOL_A_RINGS_RX_PUSH] = { .name = "rx-push", .type = YNL_PT_U8, }, [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .name = "tx-push-buf-len", .type = YNL_PT_U32, }, [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX] = { .name = "tx-push-buf-len-max", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_rings_nest = { .max_attr = ETHTOOL_A_RINGS_MAX, .table = ethtool_rings_policy, }; struct ynl_policy_attr ethtool_channels_policy[ETHTOOL_A_CHANNELS_MAX + 1] = { [ETHTOOL_A_CHANNELS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_CHANNELS_RX_MAX] = { .name = "rx-max", .type = YNL_PT_U32, }, [ETHTOOL_A_CHANNELS_TX_MAX] = { .name = "tx-max", .type = YNL_PT_U32, }, [ETHTOOL_A_CHANNELS_OTHER_MAX] = { .name = "other-max", .type = YNL_PT_U32, }, [ETHTOOL_A_CHANNELS_COMBINED_MAX] = { .name = "combined-max", .type = YNL_PT_U32, }, [ETHTOOL_A_CHANNELS_RX_COUNT] = { .name = "rx-count", .type = YNL_PT_U32, }, [ETHTOOL_A_CHANNELS_TX_COUNT] = { .name = "tx-count", .type = YNL_PT_U32, }, [ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .name = "other-count", .type = YNL_PT_U32, }, [ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .name = "combined-count", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_channels_nest = { .max_attr = ETHTOOL_A_CHANNELS_MAX, .table = ethtool_channels_policy, }; struct ynl_policy_attr ethtool_coalesce_policy[ETHTOOL_A_COALESCE_MAX + 1] = { [ETHTOOL_A_COALESCE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_COALESCE_RX_USECS] = { .name = "rx-usecs", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_RX_MAX_FRAMES] = { .name = "rx-max-frames", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_RX_USECS_IRQ] = { .name = "rx-usecs-irq", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ] = { .name = "rx-max-frames-irq", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_USECS] = { .name = "tx-usecs", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_MAX_FRAMES] = { .name = "tx-max-frames", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_USECS_IRQ] = { .name = "tx-usecs-irq", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ] = { .name = "tx-max-frames-irq", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_STATS_BLOCK_USECS] = { .name = "stats-block-usecs", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX] = { .name = "use-adaptive-rx", .type = YNL_PT_U8, }, [ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX] = { .name = "use-adaptive-tx", .type = YNL_PT_U8, }, [ETHTOOL_A_COALESCE_PKT_RATE_LOW] = { .name = "pkt-rate-low", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_RX_USECS_LOW] = { .name = "rx-usecs-low", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW] = { .name = "rx-max-frames-low", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_USECS_LOW] = { .name = "tx-usecs-low", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW] = { .name = "tx-max-frames-low", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_PKT_RATE_HIGH] = { .name = "pkt-rate-high", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_RX_USECS_HIGH] = { .name = "rx-usecs-high", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH] = { .name = "rx-max-frames-high", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_USECS_HIGH] = { .name = "tx-usecs-high", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH] = { .name = "tx-max-frames-high", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL] = { .name = "rate-sample-interval", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_USE_CQE_MODE_TX] = { .name = "use-cqe-mode-tx", .type = YNL_PT_U8, }, [ETHTOOL_A_COALESCE_USE_CQE_MODE_RX] = { .name = "use-cqe-mode-rx", .type = YNL_PT_U8, }, [ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES] = { .name = "tx-aggr-max-bytes", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES] = { .name = "tx-aggr-max-frames", .type = YNL_PT_U32, }, [ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS] = { .name = "tx-aggr-time-usecs", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_coalesce_nest = { .max_attr = ETHTOOL_A_COALESCE_MAX, .table = ethtool_coalesce_policy, }; struct ynl_policy_attr ethtool_pause_policy[ETHTOOL_A_PAUSE_MAX + 1] = { [ETHTOOL_A_PAUSE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_PAUSE_AUTONEG] = { .name = "autoneg", .type = YNL_PT_U8, }, [ETHTOOL_A_PAUSE_RX] = { .name = "rx", .type = YNL_PT_U8, }, [ETHTOOL_A_PAUSE_TX] = { .name = "tx", .type = YNL_PT_U8, }, [ETHTOOL_A_PAUSE_STATS] = { .name = "stats", .type = YNL_PT_NEST, .nest = &ethtool_pause_stat_nest, }, [ETHTOOL_A_PAUSE_STATS_SRC] = { .name = "stats-src", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_pause_nest = { .max_attr = ETHTOOL_A_PAUSE_MAX, .table = ethtool_pause_policy, }; struct ynl_policy_attr ethtool_eee_policy[ETHTOOL_A_EEE_MAX + 1] = { [ETHTOOL_A_EEE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_EEE_MODES_OURS] = { .name = "modes-ours", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_EEE_MODES_PEER] = { .name = "modes-peer", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_EEE_ACTIVE] = { .name = "active", .type = YNL_PT_U8, }, [ETHTOOL_A_EEE_ENABLED] = { .name = "enabled", .type = YNL_PT_U8, }, [ETHTOOL_A_EEE_TX_LPI_ENABLED] = { .name = "tx-lpi-enabled", .type = YNL_PT_U8, }, [ETHTOOL_A_EEE_TX_LPI_TIMER] = { .name = "tx-lpi-timer", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_eee_nest = { .max_attr = ETHTOOL_A_EEE_MAX, .table = ethtool_eee_policy, }; struct ynl_policy_attr ethtool_tsinfo_policy[ETHTOOL_A_TSINFO_MAX + 1] = { [ETHTOOL_A_TSINFO_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_TSINFO_TIMESTAMPING] = { .name = "timestamping", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_TSINFO_TX_TYPES] = { .name = "tx-types", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_TSINFO_RX_FILTERS] = { .name = "rx-filters", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_TSINFO_PHC_INDEX] = { .name = "phc-index", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_tsinfo_nest = { .max_attr = ETHTOOL_A_TSINFO_MAX, .table = ethtool_tsinfo_policy, }; struct ynl_policy_attr ethtool_cable_test_policy[ETHTOOL_A_CABLE_TEST_MAX + 1] = { [ETHTOOL_A_CABLE_TEST_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, }; struct ynl_policy_nest ethtool_cable_test_nest = { .max_attr = ETHTOOL_A_CABLE_TEST_MAX, .table = ethtool_cable_test_policy, }; struct ynl_policy_attr ethtool_cable_test_ntf_policy[ETHTOOL_A_CABLE_TEST_NTF_MAX + 1] = { [ETHTOOL_A_CABLE_TEST_NTF_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_CABLE_TEST_NTF_STATUS] = { .name = "status", .type = YNL_PT_U8, }, [ETHTOOL_A_CABLE_TEST_NTF_NEST] = { .name = "nest", .type = YNL_PT_NEST, .nest = &ethtool_cable_nest_nest, }, }; struct ynl_policy_nest ethtool_cable_test_ntf_nest = { .max_attr = ETHTOOL_A_CABLE_TEST_NTF_MAX, .table = ethtool_cable_test_ntf_policy, }; struct ynl_policy_attr ethtool_cable_test_tdr_policy[ETHTOOL_A_CABLE_TEST_TDR_MAX + 1] = { [ETHTOOL_A_CABLE_TEST_TDR_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_CABLE_TEST_TDR_CFG] = { .name = "cfg", .type = YNL_PT_NEST, .nest = &ethtool_cable_test_tdr_cfg_nest, }, }; struct ynl_policy_nest ethtool_cable_test_tdr_nest = { .max_attr = ETHTOOL_A_CABLE_TEST_TDR_MAX, .table = ethtool_cable_test_tdr_policy, }; struct ynl_policy_attr ethtool_cable_test_tdr_ntf_policy[ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX + 1] = { [ETHTOOL_A_CABLE_TEST_TDR_NTF_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_CABLE_TEST_TDR_NTF_STATUS] = { .name = "status", .type = YNL_PT_U8, }, [ETHTOOL_A_CABLE_TEST_TDR_NTF_NEST] = { .name = "nest", .type = YNL_PT_NEST, .nest = &ethtool_cable_nest_nest, }, }; struct ynl_policy_nest ethtool_cable_test_tdr_ntf_nest = { .max_attr = ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX, .table = ethtool_cable_test_tdr_ntf_policy, }; struct ynl_policy_attr ethtool_tunnel_info_policy[ETHTOOL_A_TUNNEL_INFO_MAX + 1] = { [ETHTOOL_A_TUNNEL_INFO_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_TUNNEL_INFO_UDP_PORTS] = { .name = "udp-ports", .type = YNL_PT_NEST, .nest = &ethtool_tunnel_udp_nest, }, }; struct ynl_policy_nest ethtool_tunnel_info_nest = { .max_attr = ETHTOOL_A_TUNNEL_INFO_MAX, .table = ethtool_tunnel_info_policy, }; struct ynl_policy_attr ethtool_fec_policy[ETHTOOL_A_FEC_MAX + 1] = { [ETHTOOL_A_FEC_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_FEC_MODES] = { .name = "modes", .type = YNL_PT_NEST, .nest = &ethtool_bitset_nest, }, [ETHTOOL_A_FEC_AUTO] = { .name = "auto", .type = YNL_PT_U8, }, [ETHTOOL_A_FEC_ACTIVE] = { .name = "active", .type = YNL_PT_U32, }, [ETHTOOL_A_FEC_STATS] = { .name = "stats", .type = YNL_PT_NEST, .nest = &ethtool_fec_stat_nest, }, }; struct ynl_policy_nest ethtool_fec_nest = { .max_attr = ETHTOOL_A_FEC_MAX, .table = ethtool_fec_policy, }; struct ynl_policy_attr ethtool_module_eeprom_policy[ETHTOOL_A_MODULE_EEPROM_MAX + 1] = { [ETHTOOL_A_MODULE_EEPROM_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_MODULE_EEPROM_OFFSET] = { .name = "offset", .type = YNL_PT_U32, }, [ETHTOOL_A_MODULE_EEPROM_LENGTH] = { .name = "length", .type = YNL_PT_U32, }, [ETHTOOL_A_MODULE_EEPROM_PAGE] = { .name = "page", .type = YNL_PT_U8, }, [ETHTOOL_A_MODULE_EEPROM_BANK] = { .name = "bank", .type = YNL_PT_U8, }, [ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS] = { .name = "i2c-address", .type = YNL_PT_U8, }, [ETHTOOL_A_MODULE_EEPROM_DATA] = { .name = "data", .type = YNL_PT_BINARY,}, }; struct ynl_policy_nest ethtool_module_eeprom_nest = { .max_attr = ETHTOOL_A_MODULE_EEPROM_MAX, .table = ethtool_module_eeprom_policy, }; struct ynl_policy_attr ethtool_phc_vclocks_policy[ETHTOOL_A_PHC_VCLOCKS_MAX + 1] = { [ETHTOOL_A_PHC_VCLOCKS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_PHC_VCLOCKS_NUM] = { .name = "num", .type = YNL_PT_U32, }, [ETHTOOL_A_PHC_VCLOCKS_INDEX] = { .name = "index", .type = YNL_PT_BINARY,}, }; struct ynl_policy_nest ethtool_phc_vclocks_nest = { .max_attr = ETHTOOL_A_PHC_VCLOCKS_MAX, .table = ethtool_phc_vclocks_policy, }; struct ynl_policy_attr ethtool_module_policy[ETHTOOL_A_MODULE_MAX + 1] = { [ETHTOOL_A_MODULE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_MODULE_POWER_MODE_POLICY] = { .name = "power-mode-policy", .type = YNL_PT_U8, }, [ETHTOOL_A_MODULE_POWER_MODE] = { .name = "power-mode", .type = YNL_PT_U8, }, }; struct ynl_policy_nest ethtool_module_nest = { .max_attr = ETHTOOL_A_MODULE_MAX, .table = ethtool_module_policy, }; struct ynl_policy_attr ethtool_pse_policy[ETHTOOL_A_PSE_MAX + 1] = { [ETHTOOL_A_PSE_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_PODL_PSE_ADMIN_STATE] = { .name = "admin-state", .type = YNL_PT_U32, }, [ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] = { .name = "admin-control", .type = YNL_PT_U32, }, [ETHTOOL_A_PODL_PSE_PW_D_STATUS] = { .name = "pw-d-status", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_pse_nest = { .max_attr = ETHTOOL_A_PSE_MAX, .table = ethtool_pse_policy, }; struct ynl_policy_attr ethtool_rss_policy[ETHTOOL_A_RSS_MAX + 1] = { [ETHTOOL_A_RSS_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_RSS_CONTEXT] = { .name = "context", .type = YNL_PT_U32, }, [ETHTOOL_A_RSS_HFUNC] = { .name = "hfunc", .type = YNL_PT_U32, }, [ETHTOOL_A_RSS_INDIR] = { .name = "indir", .type = YNL_PT_BINARY,}, [ETHTOOL_A_RSS_HKEY] = { .name = "hkey", .type = YNL_PT_BINARY,}, }; struct ynl_policy_nest ethtool_rss_nest = { .max_attr = ETHTOOL_A_RSS_MAX, .table = ethtool_rss_policy, }; struct ynl_policy_attr ethtool_plca_policy[ETHTOOL_A_PLCA_MAX + 1] = { [ETHTOOL_A_PLCA_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_PLCA_VERSION] = { .name = "version", .type = YNL_PT_U16, }, [ETHTOOL_A_PLCA_ENABLED] = { .name = "enabled", .type = YNL_PT_U8, }, [ETHTOOL_A_PLCA_STATUS] = { .name = "status", .type = YNL_PT_U8, }, [ETHTOOL_A_PLCA_NODE_CNT] = { .name = "node-cnt", .type = YNL_PT_U32, }, [ETHTOOL_A_PLCA_NODE_ID] = { .name = "node-id", .type = YNL_PT_U32, }, [ETHTOOL_A_PLCA_TO_TMR] = { .name = "to-tmr", .type = YNL_PT_U32, }, [ETHTOOL_A_PLCA_BURST_CNT] = { .name = "burst-cnt", .type = YNL_PT_U32, }, [ETHTOOL_A_PLCA_BURST_TMR] = { .name = "burst-tmr", .type = YNL_PT_U32, }, }; struct ynl_policy_nest ethtool_plca_nest = { .max_attr = ETHTOOL_A_PLCA_MAX, .table = ethtool_plca_policy, }; struct ynl_policy_attr ethtool_mm_policy[ETHTOOL_A_MM_MAX + 1] = { [ETHTOOL_A_MM_HEADER] = { .name = "header", .type = YNL_PT_NEST, .nest = &ethtool_header_nest, }, [ETHTOOL_A_MM_PMAC_ENABLED] = { .name = "pmac-enabled", .type = YNL_PT_U8, }, [ETHTOOL_A_MM_TX_ENABLED] = { .name = "tx-enabled", .type = YNL_PT_U8, }, [ETHTOOL_A_MM_TX_ACTIVE] = { .name = "tx-active", .type = YNL_PT_U8, }, [ETHTOOL_A_MM_TX_MIN_FRAG_SIZE] = { .name = "tx-min-frag-size", .type = YNL_PT_U32, }, [ETHTOOL_A_MM_RX_MIN_FRAG_SIZE] = { .name = "rx-min-frag-size", .type = YNL_PT_U32, }, [ETHTOOL_A_MM_VERIFY_ENABLED] = { .name = "verify-enabled", .type = YNL_PT_U8, }, [ETHTOOL_A_MM_VERIFY_STATUS] = { .name = "verify-status", .type = YNL_PT_U8, }, [ETHTOOL_A_MM_VERIFY_TIME] = { .name = "verify-time", .type = YNL_PT_U32, }, [ETHTOOL_A_MM_MAX_VERIFY_TIME] = { .name = "max-verify-time", .type = YNL_PT_U32, }, [ETHTOOL_A_MM_STATS] = { .name = "stats", .type = YNL_PT_NEST, .nest = &ethtool_mm_stat_nest, }, }; struct ynl_policy_nest ethtool_mm_nest = { .max_attr = ETHTOOL_A_MM_MAX, .table = ethtool_mm_policy, }; /* Common nested types */ void ethtool_header_free(struct ethtool_header *obj) { free(obj->dev_name); } int ethtool_header_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_header *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); if (obj->_present.dev_index) mnl_attr_put_u32(nlh, ETHTOOL_A_HEADER_DEV_INDEX, obj->dev_index); if (obj->_present.dev_name_len) mnl_attr_put_strz(nlh, ETHTOOL_A_HEADER_DEV_NAME, obj->dev_name); if (obj->_present.flags) mnl_attr_put_u32(nlh, ETHTOOL_A_HEADER_FLAGS, obj->flags); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_header_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_header *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_HEADER_DEV_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.dev_index = 1; dst->dev_index = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_HEADER_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == ETHTOOL_A_HEADER_FLAGS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.flags = 1; dst->flags = mnl_attr_get_u32(attr); } } return 0; } void ethtool_pause_stat_free(struct ethtool_pause_stat *obj) { } int ethtool_pause_stat_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_pause_stat *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); if (obj->_present.tx_frames) mnl_attr_put_u64(nlh, ETHTOOL_A_PAUSE_STAT_TX_FRAMES, obj->tx_frames); if (obj->_present.rx_frames) mnl_attr_put_u64(nlh, ETHTOOL_A_PAUSE_STAT_RX_FRAMES, obj->rx_frames); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_pause_stat_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_pause_stat *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_PAUSE_STAT_TX_FRAMES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_frames = 1; dst->tx_frames = mnl_attr_get_u64(attr); } else if (type == ETHTOOL_A_PAUSE_STAT_RX_FRAMES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_frames = 1; dst->rx_frames = mnl_attr_get_u64(attr); } } return 0; } void ethtool_cable_test_tdr_cfg_free(struct ethtool_cable_test_tdr_cfg *obj) { } void ethtool_fec_stat_free(struct ethtool_fec_stat *obj) { free(obj->corrected); free(obj->uncorr); free(obj->corr_bits); } int ethtool_fec_stat_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_fec_stat *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); if (obj->_present.corrected_len) mnl_attr_put(nlh, ETHTOOL_A_FEC_STAT_CORRECTED, obj->_present.corrected_len, obj->corrected); if (obj->_present.uncorr_len) mnl_attr_put(nlh, ETHTOOL_A_FEC_STAT_UNCORR, obj->_present.uncorr_len, obj->uncorr); if (obj->_present.corr_bits_len) mnl_attr_put(nlh, ETHTOOL_A_FEC_STAT_CORR_BITS, obj->_present.corr_bits_len, obj->corr_bits); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_fec_stat_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_fec_stat *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_FEC_STAT_CORRECTED) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = mnl_attr_get_payload_len(attr); dst->_present.corrected_len = len; dst->corrected = malloc(len); memcpy(dst->corrected, mnl_attr_get_payload(attr), len); } else if (type == ETHTOOL_A_FEC_STAT_UNCORR) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = mnl_attr_get_payload_len(attr); dst->_present.uncorr_len = len; dst->uncorr = malloc(len); memcpy(dst->uncorr, mnl_attr_get_payload(attr), len); } else if (type == ETHTOOL_A_FEC_STAT_CORR_BITS) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = mnl_attr_get_payload_len(attr); dst->_present.corr_bits_len = len; dst->corr_bits = malloc(len); memcpy(dst->corr_bits, mnl_attr_get_payload(attr), len); } } return 0; } void ethtool_mm_stat_free(struct ethtool_mm_stat *obj) { } int ethtool_mm_stat_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_mm_stat *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_MM_STAT_REASSEMBLY_ERRORS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.reassembly_errors = 1; dst->reassembly_errors = mnl_attr_get_u64(attr); } else if (type == ETHTOOL_A_MM_STAT_SMD_ERRORS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.smd_errors = 1; dst->smd_errors = mnl_attr_get_u64(attr); } else if (type == ETHTOOL_A_MM_STAT_REASSEMBLY_OK) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.reassembly_ok = 1; dst->reassembly_ok = mnl_attr_get_u64(attr); } else if (type == ETHTOOL_A_MM_STAT_RX_FRAG_COUNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_frag_count = 1; dst->rx_frag_count = mnl_attr_get_u64(attr); } else if (type == ETHTOOL_A_MM_STAT_TX_FRAG_COUNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_frag_count = 1; dst->tx_frag_count = mnl_attr_get_u64(attr); } else if (type == ETHTOOL_A_MM_STAT_HOLD_COUNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.hold_count = 1; dst->hold_count = mnl_attr_get_u64(attr); } } return 0; } void ethtool_cable_result_free(struct ethtool_cable_result *obj) { } int ethtool_cable_result_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_cable_result *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_CABLE_RESULT_PAIR) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.pair = 1; dst->pair = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_CABLE_RESULT_CODE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.code = 1; dst->code = mnl_attr_get_u8(attr); } } return 0; } void ethtool_cable_fault_length_free(struct ethtool_cable_fault_length *obj) { } int ethtool_cable_fault_length_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_cable_fault_length *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.pair = 1; dst->pair = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_CABLE_FAULT_LENGTH_CM) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.cm = 1; dst->cm = mnl_attr_get_u32(attr); } } return 0; } void ethtool_bitset_bit_free(struct ethtool_bitset_bit *obj) { free(obj->name); } int ethtool_bitset_bit_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_bitset_bit *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); if (obj->_present.index) mnl_attr_put_u32(nlh, ETHTOOL_A_BITSET_BIT_INDEX, obj->index); if (obj->_present.name_len) mnl_attr_put_strz(nlh, ETHTOOL_A_BITSET_BIT_NAME, obj->name); if (obj->_present.value) mnl_attr_put(nlh, ETHTOOL_A_BITSET_BIT_VALUE, 0, NULL); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_bitset_bit_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_bitset_bit *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_BITSET_BIT_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.index = 1; dst->index = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_BITSET_BIT_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.name_len = len; dst->name = malloc(len + 1); memcpy(dst->name, mnl_attr_get_str(attr), len); dst->name[len] = 0; } else if (type == ETHTOOL_A_BITSET_BIT_VALUE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.value = 1; } } return 0; } void ethtool_tunnel_udp_entry_free(struct ethtool_tunnel_udp_entry *obj) { } int ethtool_tunnel_udp_entry_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_tunnel_udp_entry *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port = 1; dst->port = mnl_attr_get_u16(attr); } else if (type == ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.type = 1; dst->type = mnl_attr_get_u32(attr); } } return 0; } void ethtool_string_free(struct ethtool_string *obj) { free(obj->value); } int ethtool_string_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_string *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); if (obj->_present.index) mnl_attr_put_u32(nlh, ETHTOOL_A_STRING_INDEX, obj->index); if (obj->_present.value_len) mnl_attr_put_strz(nlh, ETHTOOL_A_STRING_VALUE, obj->value); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_string_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_string *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_STRING_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.index = 1; dst->index = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_STRING_VALUE) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.value_len = len; dst->value = malloc(len + 1); memcpy(dst->value, mnl_attr_get_str(attr), len); dst->value[len] = 0; } } return 0; } void ethtool_cable_nest_free(struct ethtool_cable_nest *obj) { ethtool_cable_result_free(&obj->result); ethtool_cable_fault_length_free(&obj->fault_length); } int ethtool_cable_nest_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_cable_nest *dst = yarg->data; const struct nlattr *attr; struct ynl_parse_arg parg; parg.ys = yarg->ys; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_CABLE_NEST_RESULT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.result = 1; parg.rsp_policy = &ethtool_cable_result_nest; parg.data = &dst->result; if (ethtool_cable_result_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_CABLE_NEST_FAULT_LENGTH) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.fault_length = 1; parg.rsp_policy = &ethtool_cable_fault_length_nest; parg.data = &dst->fault_length; if (ethtool_cable_fault_length_parse(&parg, attr)) return MNL_CB_ERROR; } } return 0; } void ethtool_bitset_bits_free(struct ethtool_bitset_bits *obj) { unsigned int i; for (i = 0; i < obj->n_bit; i++) ethtool_bitset_bit_free(&obj->bit[i]); free(obj->bit); } int ethtool_bitset_bits_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_bitset_bits *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); for (unsigned int i = 0; i < obj->n_bit; i++) ethtool_bitset_bit_put(nlh, ETHTOOL_A_BITSET_BITS_BIT, &obj->bit[i]); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_bitset_bits_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_bitset_bits *dst = yarg->data; const struct nlattr *attr; struct ynl_parse_arg parg; unsigned int n_bit = 0; int i; parg.ys = yarg->ys; if (dst->bit) return ynl_error_parse(yarg, "attribute already present (bitset-bits.bit)"); mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_BITSET_BITS_BIT) { n_bit++; } } if (n_bit) { dst->bit = calloc(n_bit, sizeof(*dst->bit)); dst->n_bit = n_bit; i = 0; parg.rsp_policy = &ethtool_bitset_bit_nest; mnl_attr_for_each_nested(attr, nested) { if (mnl_attr_get_type(attr) == ETHTOOL_A_BITSET_BITS_BIT) { parg.data = &dst->bit[i]; if (ethtool_bitset_bit_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } return 0; } void ethtool_strings_free(struct ethtool_strings *obj) { unsigned int i; for (i = 0; i < obj->n_string; i++) ethtool_string_free(&obj->string[i]); free(obj->string); } int ethtool_strings_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_strings *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); for (unsigned int i = 0; i < obj->n_string; i++) ethtool_string_put(nlh, ETHTOOL_A_STRINGS_STRING, &obj->string[i]); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_strings_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_strings *dst = yarg->data; const struct nlattr *attr; struct ynl_parse_arg parg; unsigned int n_string = 0; int i; parg.ys = yarg->ys; if (dst->string) return ynl_error_parse(yarg, "attribute already present (strings.string)"); mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_STRINGS_STRING) { n_string++; } } if (n_string) { dst->string = calloc(n_string, sizeof(*dst->string)); dst->n_string = n_string; i = 0; parg.rsp_policy = &ethtool_string_nest; mnl_attr_for_each_nested(attr, nested) { if (mnl_attr_get_type(attr) == ETHTOOL_A_STRINGS_STRING) { parg.data = &dst->string[i]; if (ethtool_string_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } return 0; } void ethtool_bitset_free(struct ethtool_bitset *obj) { ethtool_bitset_bits_free(&obj->bits); } int ethtool_bitset_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_bitset *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); if (obj->_present.nomask) mnl_attr_put(nlh, ETHTOOL_A_BITSET_NOMASK, 0, NULL); if (obj->_present.size) mnl_attr_put_u32(nlh, ETHTOOL_A_BITSET_SIZE, obj->size); if (obj->_present.bits) ethtool_bitset_bits_put(nlh, ETHTOOL_A_BITSET_BITS, &obj->bits); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_bitset_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_bitset *dst = yarg->data; const struct nlattr *attr; struct ynl_parse_arg parg; parg.ys = yarg->ys; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_BITSET_NOMASK) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.nomask = 1; } else if (type == ETHTOOL_A_BITSET_SIZE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.size = 1; dst->size = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_BITSET_BITS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.bits = 1; parg.rsp_policy = &ethtool_bitset_bits_nest; parg.data = &dst->bits; if (ethtool_bitset_bits_parse(&parg, attr)) return MNL_CB_ERROR; } } return 0; } void ethtool_stringset_free(struct ethtool_stringset_ *obj) { unsigned int i; for (i = 0; i < obj->n_strings; i++) ethtool_strings_free(&obj->strings[i]); free(obj->strings); } int ethtool_stringset_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_stringset_ *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); if (obj->_present.id) mnl_attr_put_u32(nlh, ETHTOOL_A_STRINGSET_ID, obj->id); if (obj->_present.count) mnl_attr_put_u32(nlh, ETHTOOL_A_STRINGSET_COUNT, obj->count); for (unsigned int i = 0; i < obj->n_strings; i++) ethtool_strings_put(nlh, ETHTOOL_A_STRINGSET_STRINGS, &obj->strings[i]); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_stringset_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_stringset_ *dst = yarg->data; unsigned int n_strings = 0; const struct nlattr *attr; struct ynl_parse_arg parg; int i; parg.ys = yarg->ys; if (dst->strings) return ynl_error_parse(yarg, "attribute already present (stringset.strings)"); mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_STRINGSET_ID) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.id = 1; dst->id = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_STRINGSET_COUNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.count = 1; dst->count = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_STRINGSET_STRINGS) { n_strings++; } } if (n_strings) { dst->strings = calloc(n_strings, sizeof(*dst->strings)); dst->n_strings = n_strings; i = 0; parg.rsp_policy = &ethtool_strings_nest; mnl_attr_for_each_nested(attr, nested) { if (mnl_attr_get_type(attr) == ETHTOOL_A_STRINGSET_STRINGS) { parg.data = &dst->strings[i]; if (ethtool_strings_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } return 0; } void ethtool_tunnel_udp_table_free(struct ethtool_tunnel_udp_table *obj) { unsigned int i; ethtool_bitset_free(&obj->types); for (i = 0; i < obj->n_entry; i++) ethtool_tunnel_udp_entry_free(&obj->entry[i]); free(obj->entry); } int ethtool_tunnel_udp_table_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_tunnel_udp_table *dst = yarg->data; const struct nlattr *attr; struct ynl_parse_arg parg; unsigned int n_entry = 0; int i; parg.ys = yarg->ys; if (dst->entry) return ynl_error_parse(yarg, "attribute already present (tunnel-udp-table.entry)"); mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.size = 1; dst->size = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.types = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->types; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY) { n_entry++; } } if (n_entry) { dst->entry = calloc(n_entry, sizeof(*dst->entry)); dst->n_entry = n_entry; i = 0; parg.rsp_policy = &ethtool_tunnel_udp_entry_nest; mnl_attr_for_each_nested(attr, nested) { if (mnl_attr_get_type(attr) == ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY) { parg.data = &dst->entry[i]; if (ethtool_tunnel_udp_entry_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } return 0; } void ethtool_stringsets_free(struct ethtool_stringsets *obj) { unsigned int i; for (i = 0; i < obj->n_stringset; i++) ethtool_stringset_free(&obj->stringset[i]); free(obj->stringset); } int ethtool_stringsets_put(struct nlmsghdr *nlh, unsigned int attr_type, struct ethtool_stringsets *obj) { struct nlattr *nest; nest = mnl_attr_nest_start(nlh, attr_type); for (unsigned int i = 0; i < obj->n_stringset; i++) ethtool_stringset_put(nlh, ETHTOOL_A_STRINGSETS_STRINGSET, &obj->stringset[i]); mnl_attr_nest_end(nlh, nest); return 0; } int ethtool_stringsets_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_stringsets *dst = yarg->data; unsigned int n_stringset = 0; const struct nlattr *attr; struct ynl_parse_arg parg; int i; parg.ys = yarg->ys; if (dst->stringset) return ynl_error_parse(yarg, "attribute already present (stringsets.stringset)"); mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_STRINGSETS_STRINGSET) { n_stringset++; } } if (n_stringset) { dst->stringset = calloc(n_stringset, sizeof(*dst->stringset)); dst->n_stringset = n_stringset; i = 0; parg.rsp_policy = &ethtool_stringset_nest; mnl_attr_for_each_nested(attr, nested) { if (mnl_attr_get_type(attr) == ETHTOOL_A_STRINGSETS_STRINGSET) { parg.data = &dst->stringset[i]; if (ethtool_stringset_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } return 0; } void ethtool_tunnel_udp_free(struct ethtool_tunnel_udp *obj) { ethtool_tunnel_udp_table_free(&obj->table); } int ethtool_tunnel_udp_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct ethtool_tunnel_udp *dst = yarg->data; const struct nlattr *attr; struct ynl_parse_arg parg; parg.ys = yarg->ys; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_TUNNEL_UDP_TABLE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.table = 1; parg.rsp_policy = &ethtool_tunnel_udp_table_nest; parg.data = &dst->table; if (ethtool_tunnel_udp_table_parse(&parg, attr)) return MNL_CB_ERROR; } } return 0; } /* ============== ETHTOOL_MSG_STRSET_GET ============== */ /* ETHTOOL_MSG_STRSET_GET - do */ void ethtool_strset_get_req_free(struct ethtool_strset_get_req *req) { ethtool_header_free(&req->header); ethtool_stringsets_free(&req->stringsets); free(req); } void ethtool_strset_get_rsp_free(struct ethtool_strset_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_stringsets_free(&rsp->stringsets); free(rsp); } int ethtool_strset_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_strset_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_STRSET_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_STRSET_STRINGSETS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.stringsets = 1; parg.rsp_policy = &ethtool_stringsets_nest; parg.data = &dst->stringsets; if (ethtool_stringsets_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } struct ethtool_strset_get_rsp * ethtool_strset_get(struct ynl_sock *ys, struct ethtool_strset_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_strset_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_STRSET_GET, 1); ys->req_policy = &ethtool_strset_nest; yrs.yarg.rsp_policy = &ethtool_strset_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_STRSET_HEADER, &req->header); if (req->_present.stringsets) ethtool_stringsets_put(nlh, ETHTOOL_A_STRSET_STRINGSETS, &req->stringsets); if (req->_present.counts_only) mnl_attr_put(nlh, ETHTOOL_A_STRSET_COUNTS_ONLY, 0, NULL); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_strset_get_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_STRSET_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_strset_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_STRSET_GET - dump */ void ethtool_strset_get_list_free(struct ethtool_strset_get_list *rsp) { struct ethtool_strset_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_stringsets_free(&rsp->obj.stringsets); free(rsp); } } struct ethtool_strset_get_list * ethtool_strset_get_dump(struct ynl_sock *ys, struct ethtool_strset_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_strset_get_list); yds.cb = ethtool_strset_get_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_STRSET_GET; yds.rsp_policy = &ethtool_strset_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_STRSET_GET, 1); ys->req_policy = &ethtool_strset_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_STRSET_HEADER, &req->header); if (req->_present.stringsets) ethtool_stringsets_put(nlh, ETHTOOL_A_STRSET_STRINGSETS, &req->stringsets); if (req->_present.counts_only) mnl_attr_put(nlh, ETHTOOL_A_STRSET_COUNTS_ONLY, 0, NULL); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_strset_get_list_free(yds.first); return NULL; } /* ============== ETHTOOL_MSG_LINKINFO_GET ============== */ /* ETHTOOL_MSG_LINKINFO_GET - do */ void ethtool_linkinfo_get_req_free(struct ethtool_linkinfo_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_linkinfo_get_rsp_free(struct ethtool_linkinfo_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_linkinfo_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_linkinfo_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_LINKINFO_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_LINKINFO_PORT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port = 1; dst->port = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKINFO_PHYADDR) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.phyaddr = 1; dst->phyaddr = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKINFO_TP_MDIX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tp_mdix = 1; dst->tp_mdix = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKINFO_TP_MDIX_CTRL) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tp_mdix_ctrl = 1; dst->tp_mdix_ctrl = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKINFO_TRANSCEIVER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.transceiver = 1; dst->transceiver = mnl_attr_get_u8(attr); } } return MNL_CB_OK; } struct ethtool_linkinfo_get_rsp * ethtool_linkinfo_get(struct ynl_sock *ys, struct ethtool_linkinfo_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_linkinfo_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKINFO_GET, 1); ys->req_policy = &ethtool_linkinfo_nest; yrs.yarg.rsp_policy = &ethtool_linkinfo_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_LINKINFO_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_linkinfo_get_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_LINKINFO_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_linkinfo_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_LINKINFO_GET - dump */ void ethtool_linkinfo_get_list_free(struct ethtool_linkinfo_get_list *rsp) { struct ethtool_linkinfo_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_linkinfo_get_list * ethtool_linkinfo_get_dump(struct ynl_sock *ys, struct ethtool_linkinfo_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_linkinfo_get_list); yds.cb = ethtool_linkinfo_get_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_LINKINFO_GET; yds.rsp_policy = &ethtool_linkinfo_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_LINKINFO_GET, 1); ys->req_policy = &ethtool_linkinfo_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_LINKINFO_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_linkinfo_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_LINKINFO_GET - notify */ void ethtool_linkinfo_get_ntf_free(struct ethtool_linkinfo_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); free(rsp); } /* ============== ETHTOOL_MSG_LINKINFO_SET ============== */ /* ETHTOOL_MSG_LINKINFO_SET - do */ void ethtool_linkinfo_set_req_free(struct ethtool_linkinfo_set_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_linkinfo_set(struct ynl_sock *ys, struct ethtool_linkinfo_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKINFO_SET, 1); ys->req_policy = &ethtool_linkinfo_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_LINKINFO_HEADER, &req->header); if (req->_present.port) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_PORT, req->port); if (req->_present.phyaddr) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_PHYADDR, req->phyaddr); if (req->_present.tp_mdix) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_TP_MDIX, req->tp_mdix); if (req->_present.tp_mdix_ctrl) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_TP_MDIX_CTRL, req->tp_mdix_ctrl); if (req->_present.transceiver) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKINFO_TRANSCEIVER, req->transceiver); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_LINKMODES_GET ============== */ /* ETHTOOL_MSG_LINKMODES_GET - do */ void ethtool_linkmodes_get_req_free(struct ethtool_linkmodes_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_linkmodes_get_rsp_free(struct ethtool_linkmodes_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_bitset_free(&rsp->ours); ethtool_bitset_free(&rsp->peer); free(rsp); } int ethtool_linkmodes_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_linkmodes_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_LINKMODES_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_LINKMODES_AUTONEG) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.autoneg = 1; dst->autoneg = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKMODES_OURS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.ours = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->ours; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_LINKMODES_PEER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.peer = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->peer; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_LINKMODES_SPEED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.speed = 1; dst->speed = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_LINKMODES_DUPLEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.duplex = 1; dst->duplex = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.master_slave_cfg = 1; dst->master_slave_cfg = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.master_slave_state = 1; dst->master_slave_state = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKMODES_LANES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.lanes = 1; dst->lanes = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_LINKMODES_RATE_MATCHING) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rate_matching = 1; dst->rate_matching = mnl_attr_get_u8(attr); } } return MNL_CB_OK; } struct ethtool_linkmodes_get_rsp * ethtool_linkmodes_get(struct ynl_sock *ys, struct ethtool_linkmodes_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_linkmodes_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKMODES_GET, 1); ys->req_policy = &ethtool_linkmodes_nest; yrs.yarg.rsp_policy = &ethtool_linkmodes_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_LINKMODES_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_linkmodes_get_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_LINKMODES_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_linkmodes_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_LINKMODES_GET - dump */ void ethtool_linkmodes_get_list_free(struct ethtool_linkmodes_get_list *rsp) { struct ethtool_linkmodes_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.ours); ethtool_bitset_free(&rsp->obj.peer); free(rsp); } } struct ethtool_linkmodes_get_list * ethtool_linkmodes_get_dump(struct ynl_sock *ys, struct ethtool_linkmodes_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_linkmodes_get_list); yds.cb = ethtool_linkmodes_get_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_LINKMODES_GET; yds.rsp_policy = &ethtool_linkmodes_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_LINKMODES_GET, 1); ys->req_policy = &ethtool_linkmodes_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_LINKMODES_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_linkmodes_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_LINKMODES_GET - notify */ void ethtool_linkmodes_get_ntf_free(struct ethtool_linkmodes_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.ours); ethtool_bitset_free(&rsp->obj.peer); free(rsp); } /* ============== ETHTOOL_MSG_LINKMODES_SET ============== */ /* ETHTOOL_MSG_LINKMODES_SET - do */ void ethtool_linkmodes_set_req_free(struct ethtool_linkmodes_set_req *req) { ethtool_header_free(&req->header); ethtool_bitset_free(&req->ours); ethtool_bitset_free(&req->peer); free(req); } int ethtool_linkmodes_set(struct ynl_sock *ys, struct ethtool_linkmodes_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKMODES_SET, 1); ys->req_policy = &ethtool_linkmodes_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_LINKMODES_HEADER, &req->header); if (req->_present.autoneg) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_AUTONEG, req->autoneg); if (req->_present.ours) ethtool_bitset_put(nlh, ETHTOOL_A_LINKMODES_OURS, &req->ours); if (req->_present.peer) ethtool_bitset_put(nlh, ETHTOOL_A_LINKMODES_PEER, &req->peer); if (req->_present.speed) mnl_attr_put_u32(nlh, ETHTOOL_A_LINKMODES_SPEED, req->speed); if (req->_present.duplex) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_DUPLEX, req->duplex); if (req->_present.master_slave_cfg) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, req->master_slave_cfg); if (req->_present.master_slave_state) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, req->master_slave_state); if (req->_present.lanes) mnl_attr_put_u32(nlh, ETHTOOL_A_LINKMODES_LANES, req->lanes); if (req->_present.rate_matching) mnl_attr_put_u8(nlh, ETHTOOL_A_LINKMODES_RATE_MATCHING, req->rate_matching); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_LINKSTATE_GET ============== */ /* ETHTOOL_MSG_LINKSTATE_GET - do */ void ethtool_linkstate_get_req_free(struct ethtool_linkstate_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_linkstate_get_rsp_free(struct ethtool_linkstate_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_linkstate_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_linkstate_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_LINKSTATE_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_LINKSTATE_LINK) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.link = 1; dst->link = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKSTATE_SQI) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sqi = 1; dst->sqi = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_LINKSTATE_SQI_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sqi_max = 1; dst->sqi_max = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_LINKSTATE_EXT_STATE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.ext_state = 1; dst->ext_state = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKSTATE_EXT_SUBSTATE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.ext_substate = 1; dst->ext_substate = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.ext_down_cnt = 1; dst->ext_down_cnt = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_linkstate_get_rsp * ethtool_linkstate_get(struct ynl_sock *ys, struct ethtool_linkstate_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_linkstate_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_LINKSTATE_GET, 1); ys->req_policy = &ethtool_linkstate_nest; yrs.yarg.rsp_policy = &ethtool_linkstate_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_LINKSTATE_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_linkstate_get_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_LINKSTATE_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_linkstate_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_LINKSTATE_GET - dump */ void ethtool_linkstate_get_list_free(struct ethtool_linkstate_get_list *rsp) { struct ethtool_linkstate_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_linkstate_get_list * ethtool_linkstate_get_dump(struct ynl_sock *ys, struct ethtool_linkstate_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_linkstate_get_list); yds.cb = ethtool_linkstate_get_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_LINKSTATE_GET; yds.rsp_policy = &ethtool_linkstate_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_LINKSTATE_GET, 1); ys->req_policy = &ethtool_linkstate_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_LINKSTATE_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_linkstate_get_list_free(yds.first); return NULL; } /* ============== ETHTOOL_MSG_DEBUG_GET ============== */ /* ETHTOOL_MSG_DEBUG_GET - do */ void ethtool_debug_get_req_free(struct ethtool_debug_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_debug_get_rsp_free(struct ethtool_debug_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_bitset_free(&rsp->msgmask); free(rsp); } int ethtool_debug_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_debug_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_DEBUG_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_DEBUG_MSGMASK) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.msgmask = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->msgmask; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } struct ethtool_debug_get_rsp * ethtool_debug_get(struct ynl_sock *ys, struct ethtool_debug_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_debug_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_DEBUG_GET, 1); ys->req_policy = &ethtool_debug_nest; yrs.yarg.rsp_policy = &ethtool_debug_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_DEBUG_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_debug_get_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_DEBUG_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_debug_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_DEBUG_GET - dump */ void ethtool_debug_get_list_free(struct ethtool_debug_get_list *rsp) { struct ethtool_debug_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.msgmask); free(rsp); } } struct ethtool_debug_get_list * ethtool_debug_get_dump(struct ynl_sock *ys, struct ethtool_debug_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_debug_get_list); yds.cb = ethtool_debug_get_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_DEBUG_GET; yds.rsp_policy = &ethtool_debug_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_DEBUG_GET, 1); ys->req_policy = &ethtool_debug_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_DEBUG_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_debug_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_DEBUG_GET - notify */ void ethtool_debug_get_ntf_free(struct ethtool_debug_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.msgmask); free(rsp); } /* ============== ETHTOOL_MSG_DEBUG_SET ============== */ /* ETHTOOL_MSG_DEBUG_SET - do */ void ethtool_debug_set_req_free(struct ethtool_debug_set_req *req) { ethtool_header_free(&req->header); ethtool_bitset_free(&req->msgmask); free(req); } int ethtool_debug_set(struct ynl_sock *ys, struct ethtool_debug_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_DEBUG_SET, 1); ys->req_policy = &ethtool_debug_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_DEBUG_HEADER, &req->header); if (req->_present.msgmask) ethtool_bitset_put(nlh, ETHTOOL_A_DEBUG_MSGMASK, &req->msgmask); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_WOL_GET ============== */ /* ETHTOOL_MSG_WOL_GET - do */ void ethtool_wol_get_req_free(struct ethtool_wol_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_wol_get_rsp_free(struct ethtool_wol_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_bitset_free(&rsp->modes); free(rsp->sopass); free(rsp); } int ethtool_wol_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct ethtool_wol_get_rsp *dst; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_WOL_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_WOL_MODES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.modes = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->modes; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_WOL_SOPASS) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = mnl_attr_get_payload_len(attr); dst->_present.sopass_len = len; dst->sopass = malloc(len); memcpy(dst->sopass, mnl_attr_get_payload(attr), len); } } return MNL_CB_OK; } struct ethtool_wol_get_rsp * ethtool_wol_get(struct ynl_sock *ys, struct ethtool_wol_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_wol_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_WOL_GET, 1); ys->req_policy = &ethtool_wol_nest; yrs.yarg.rsp_policy = &ethtool_wol_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_WOL_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_wol_get_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_WOL_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_wol_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_WOL_GET - dump */ void ethtool_wol_get_list_free(struct ethtool_wol_get_list *rsp) { struct ethtool_wol_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.modes); free(rsp->obj.sopass); free(rsp); } } struct ethtool_wol_get_list * ethtool_wol_get_dump(struct ynl_sock *ys, struct ethtool_wol_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_wol_get_list); yds.cb = ethtool_wol_get_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_WOL_GET; yds.rsp_policy = &ethtool_wol_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_WOL_GET, 1); ys->req_policy = &ethtool_wol_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_WOL_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_wol_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_WOL_GET - notify */ void ethtool_wol_get_ntf_free(struct ethtool_wol_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.modes); free(rsp->obj.sopass); free(rsp); } /* ============== ETHTOOL_MSG_WOL_SET ============== */ /* ETHTOOL_MSG_WOL_SET - do */ void ethtool_wol_set_req_free(struct ethtool_wol_set_req *req) { ethtool_header_free(&req->header); ethtool_bitset_free(&req->modes); free(req->sopass); free(req); } int ethtool_wol_set(struct ynl_sock *ys, struct ethtool_wol_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_WOL_SET, 1); ys->req_policy = &ethtool_wol_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_WOL_HEADER, &req->header); if (req->_present.modes) ethtool_bitset_put(nlh, ETHTOOL_A_WOL_MODES, &req->modes); if (req->_present.sopass_len) mnl_attr_put(nlh, ETHTOOL_A_WOL_SOPASS, req->_present.sopass_len, req->sopass); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_FEATURES_GET ============== */ /* ETHTOOL_MSG_FEATURES_GET - do */ void ethtool_features_get_req_free(struct ethtool_features_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_features_get_rsp_free(struct ethtool_features_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_bitset_free(&rsp->hw); ethtool_bitset_free(&rsp->wanted); ethtool_bitset_free(&rsp->active); ethtool_bitset_free(&rsp->nochange); free(rsp); } int ethtool_features_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_features_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_FEATURES_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEATURES_HW) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.hw = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->hw; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEATURES_WANTED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.wanted = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->wanted; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEATURES_ACTIVE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.active = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->active; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEATURES_NOCHANGE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.nochange = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->nochange; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } struct ethtool_features_get_rsp * ethtool_features_get(struct ynl_sock *ys, struct ethtool_features_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_features_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_FEATURES_GET, 1); ys->req_policy = &ethtool_features_nest; yrs.yarg.rsp_policy = &ethtool_features_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_FEATURES_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_features_get_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_FEATURES_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_features_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_FEATURES_GET - dump */ void ethtool_features_get_list_free(struct ethtool_features_get_list *rsp) { struct ethtool_features_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.hw); ethtool_bitset_free(&rsp->obj.wanted); ethtool_bitset_free(&rsp->obj.active); ethtool_bitset_free(&rsp->obj.nochange); free(rsp); } } struct ethtool_features_get_list * ethtool_features_get_dump(struct ynl_sock *ys, struct ethtool_features_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_features_get_list); yds.cb = ethtool_features_get_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_FEATURES_GET; yds.rsp_policy = &ethtool_features_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_FEATURES_GET, 1); ys->req_policy = &ethtool_features_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_FEATURES_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_features_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_FEATURES_GET - notify */ void ethtool_features_get_ntf_free(struct ethtool_features_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.hw); ethtool_bitset_free(&rsp->obj.wanted); ethtool_bitset_free(&rsp->obj.active); ethtool_bitset_free(&rsp->obj.nochange); free(rsp); } /* ============== ETHTOOL_MSG_FEATURES_SET ============== */ /* ETHTOOL_MSG_FEATURES_SET - do */ void ethtool_features_set_req_free(struct ethtool_features_set_req *req) { ethtool_header_free(&req->header); ethtool_bitset_free(&req->hw); ethtool_bitset_free(&req->wanted); ethtool_bitset_free(&req->active); ethtool_bitset_free(&req->nochange); free(req); } void ethtool_features_set_rsp_free(struct ethtool_features_set_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_bitset_free(&rsp->hw); ethtool_bitset_free(&rsp->wanted); ethtool_bitset_free(&rsp->active); ethtool_bitset_free(&rsp->nochange); free(rsp); } int ethtool_features_set_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_features_set_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_FEATURES_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEATURES_HW) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.hw = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->hw; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEATURES_WANTED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.wanted = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->wanted; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEATURES_ACTIVE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.active = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->active; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEATURES_NOCHANGE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.nochange = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->nochange; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } struct ethtool_features_set_rsp * ethtool_features_set(struct ynl_sock *ys, struct ethtool_features_set_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_features_set_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_FEATURES_SET, 1); ys->req_policy = &ethtool_features_nest; yrs.yarg.rsp_policy = &ethtool_features_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_FEATURES_HEADER, &req->header); if (req->_present.hw) ethtool_bitset_put(nlh, ETHTOOL_A_FEATURES_HW, &req->hw); if (req->_present.wanted) ethtool_bitset_put(nlh, ETHTOOL_A_FEATURES_WANTED, &req->wanted); if (req->_present.active) ethtool_bitset_put(nlh, ETHTOOL_A_FEATURES_ACTIVE, &req->active); if (req->_present.nochange) ethtool_bitset_put(nlh, ETHTOOL_A_FEATURES_NOCHANGE, &req->nochange); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_features_set_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_FEATURES_SET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_features_set_rsp_free(rsp); return NULL; } /* ============== ETHTOOL_MSG_PRIVFLAGS_GET ============== */ /* ETHTOOL_MSG_PRIVFLAGS_GET - do */ void ethtool_privflags_get_req_free(struct ethtool_privflags_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_privflags_get_rsp_free(struct ethtool_privflags_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_bitset_free(&rsp->flags); free(rsp); } int ethtool_privflags_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_privflags_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_PRIVFLAGS_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_PRIVFLAGS_FLAGS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.flags = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->flags; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } struct ethtool_privflags_get_rsp * ethtool_privflags_get(struct ynl_sock *ys, struct ethtool_privflags_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_privflags_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PRIVFLAGS_GET, 1); ys->req_policy = &ethtool_privflags_nest; yrs.yarg.rsp_policy = &ethtool_privflags_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PRIVFLAGS_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_privflags_get_rsp_parse; yrs.rsp_cmd = 14; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_privflags_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_PRIVFLAGS_GET - dump */ void ethtool_privflags_get_list_free(struct ethtool_privflags_get_list *rsp) { struct ethtool_privflags_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.flags); free(rsp); } } struct ethtool_privflags_get_list * ethtool_privflags_get_dump(struct ynl_sock *ys, struct ethtool_privflags_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_privflags_get_list); yds.cb = ethtool_privflags_get_rsp_parse; yds.rsp_cmd = 14; yds.rsp_policy = &ethtool_privflags_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PRIVFLAGS_GET, 1); ys->req_policy = &ethtool_privflags_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PRIVFLAGS_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_privflags_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_PRIVFLAGS_GET - notify */ void ethtool_privflags_get_ntf_free(struct ethtool_privflags_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.flags); free(rsp); } /* ============== ETHTOOL_MSG_PRIVFLAGS_SET ============== */ /* ETHTOOL_MSG_PRIVFLAGS_SET - do */ void ethtool_privflags_set_req_free(struct ethtool_privflags_set_req *req) { ethtool_header_free(&req->header); ethtool_bitset_free(&req->flags); free(req); } int ethtool_privflags_set(struct ynl_sock *ys, struct ethtool_privflags_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PRIVFLAGS_SET, 1); ys->req_policy = &ethtool_privflags_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PRIVFLAGS_HEADER, &req->header); if (req->_present.flags) ethtool_bitset_put(nlh, ETHTOOL_A_PRIVFLAGS_FLAGS, &req->flags); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_RINGS_GET ============== */ /* ETHTOOL_MSG_RINGS_GET - do */ void ethtool_rings_get_req_free(struct ethtool_rings_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_rings_get_rsp_free(struct ethtool_rings_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_rings_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_rings_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_RINGS_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_RINGS_RX_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_max = 1; dst->rx_max = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_RX_MINI_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_mini_max = 1; dst->rx_mini_max = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_RX_JUMBO_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_jumbo_max = 1; dst->rx_jumbo_max = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_TX_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_max = 1; dst->tx_max = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_RX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx = 1; dst->rx = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_RX_MINI) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_mini = 1; dst->rx_mini = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_RX_JUMBO) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_jumbo = 1; dst->rx_jumbo = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_TX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx = 1; dst->tx = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_RX_BUF_LEN) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_buf_len = 1; dst->rx_buf_len = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_TCP_DATA_SPLIT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tcp_data_split = 1; dst->tcp_data_split = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_RINGS_CQE_SIZE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.cqe_size = 1; dst->cqe_size = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_TX_PUSH) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_push = 1; dst->tx_push = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_RINGS_RX_PUSH) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_push = 1; dst->rx_push = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_push_buf_len = 1; dst->tx_push_buf_len = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_push_buf_len_max = 1; dst->tx_push_buf_len_max = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_rings_get_rsp * ethtool_rings_get(struct ynl_sock *ys, struct ethtool_rings_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_rings_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_RINGS_GET, 1); ys->req_policy = &ethtool_rings_nest; yrs.yarg.rsp_policy = &ethtool_rings_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_RINGS_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_rings_get_rsp_parse; yrs.rsp_cmd = 16; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_rings_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_RINGS_GET - dump */ void ethtool_rings_get_list_free(struct ethtool_rings_get_list *rsp) { struct ethtool_rings_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_rings_get_list * ethtool_rings_get_dump(struct ynl_sock *ys, struct ethtool_rings_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_rings_get_list); yds.cb = ethtool_rings_get_rsp_parse; yds.rsp_cmd = 16; yds.rsp_policy = &ethtool_rings_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_RINGS_GET, 1); ys->req_policy = &ethtool_rings_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_RINGS_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_rings_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_RINGS_GET - notify */ void ethtool_rings_get_ntf_free(struct ethtool_rings_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); free(rsp); } /* ============== ETHTOOL_MSG_RINGS_SET ============== */ /* ETHTOOL_MSG_RINGS_SET - do */ void ethtool_rings_set_req_free(struct ethtool_rings_set_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_rings_set(struct ynl_sock *ys, struct ethtool_rings_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_RINGS_SET, 1); ys->req_policy = &ethtool_rings_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_RINGS_HEADER, &req->header); if (req->_present.rx_max) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_MAX, req->rx_max); if (req->_present.rx_mini_max) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_MINI_MAX, req->rx_mini_max); if (req->_present.rx_jumbo_max) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_JUMBO_MAX, req->rx_jumbo_max); if (req->_present.tx_max) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX_MAX, req->tx_max); if (req->_present.rx) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX, req->rx); if (req->_present.rx_mini) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_MINI, req->rx_mini); if (req->_present.rx_jumbo) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_JUMBO, req->rx_jumbo); if (req->_present.tx) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX, req->tx); if (req->_present.rx_buf_len) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_RX_BUF_LEN, req->rx_buf_len); if (req->_present.tcp_data_split) mnl_attr_put_u8(nlh, ETHTOOL_A_RINGS_TCP_DATA_SPLIT, req->tcp_data_split); if (req->_present.cqe_size) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_CQE_SIZE, req->cqe_size); if (req->_present.tx_push) mnl_attr_put_u8(nlh, ETHTOOL_A_RINGS_TX_PUSH, req->tx_push); if (req->_present.rx_push) mnl_attr_put_u8(nlh, ETHTOOL_A_RINGS_RX_PUSH, req->rx_push); if (req->_present.tx_push_buf_len) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, req->tx_push_buf_len); if (req->_present.tx_push_buf_len_max) mnl_attr_put_u32(nlh, ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, req->tx_push_buf_len_max); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_CHANNELS_GET ============== */ /* ETHTOOL_MSG_CHANNELS_GET - do */ void ethtool_channels_get_req_free(struct ethtool_channels_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_channels_get_rsp_free(struct ethtool_channels_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_channels_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_channels_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_CHANNELS_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_CHANNELS_RX_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_max = 1; dst->rx_max = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_CHANNELS_TX_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_max = 1; dst->tx_max = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_CHANNELS_OTHER_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.other_max = 1; dst->other_max = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_CHANNELS_COMBINED_MAX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.combined_max = 1; dst->combined_max = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_CHANNELS_RX_COUNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_count = 1; dst->rx_count = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_CHANNELS_TX_COUNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_count = 1; dst->tx_count = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_CHANNELS_OTHER_COUNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.other_count = 1; dst->other_count = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_CHANNELS_COMBINED_COUNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.combined_count = 1; dst->combined_count = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_channels_get_rsp * ethtool_channels_get(struct ynl_sock *ys, struct ethtool_channels_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_channels_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_CHANNELS_GET, 1); ys->req_policy = &ethtool_channels_nest; yrs.yarg.rsp_policy = &ethtool_channels_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_CHANNELS_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_channels_get_rsp_parse; yrs.rsp_cmd = 18; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_channels_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_CHANNELS_GET - dump */ void ethtool_channels_get_list_free(struct ethtool_channels_get_list *rsp) { struct ethtool_channels_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_channels_get_list * ethtool_channels_get_dump(struct ynl_sock *ys, struct ethtool_channels_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_channels_get_list); yds.cb = ethtool_channels_get_rsp_parse; yds.rsp_cmd = 18; yds.rsp_policy = &ethtool_channels_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_CHANNELS_GET, 1); ys->req_policy = &ethtool_channels_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_CHANNELS_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_channels_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_CHANNELS_GET - notify */ void ethtool_channels_get_ntf_free(struct ethtool_channels_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); free(rsp); } /* ============== ETHTOOL_MSG_CHANNELS_SET ============== */ /* ETHTOOL_MSG_CHANNELS_SET - do */ void ethtool_channels_set_req_free(struct ethtool_channels_set_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_channels_set(struct ynl_sock *ys, struct ethtool_channels_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_CHANNELS_SET, 1); ys->req_policy = &ethtool_channels_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_CHANNELS_HEADER, &req->header); if (req->_present.rx_max) mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_RX_MAX, req->rx_max); if (req->_present.tx_max) mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_TX_MAX, req->tx_max); if (req->_present.other_max) mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_OTHER_MAX, req->other_max); if (req->_present.combined_max) mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_COMBINED_MAX, req->combined_max); if (req->_present.rx_count) mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_RX_COUNT, req->rx_count); if (req->_present.tx_count) mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_TX_COUNT, req->tx_count); if (req->_present.other_count) mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_OTHER_COUNT, req->other_count); if (req->_present.combined_count) mnl_attr_put_u32(nlh, ETHTOOL_A_CHANNELS_COMBINED_COUNT, req->combined_count); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_COALESCE_GET ============== */ /* ETHTOOL_MSG_COALESCE_GET - do */ void ethtool_coalesce_get_req_free(struct ethtool_coalesce_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_coalesce_get_rsp_free(struct ethtool_coalesce_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_coalesce_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_coalesce_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_COALESCE_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_COALESCE_RX_USECS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_usecs = 1; dst->rx_usecs = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_RX_MAX_FRAMES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_max_frames = 1; dst->rx_max_frames = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_RX_USECS_IRQ) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_usecs_irq = 1; dst->rx_usecs_irq = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_max_frames_irq = 1; dst->rx_max_frames_irq = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_USECS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_usecs = 1; dst->tx_usecs = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_MAX_FRAMES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_max_frames = 1; dst->tx_max_frames = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_USECS_IRQ) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_usecs_irq = 1; dst->tx_usecs_irq = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_max_frames_irq = 1; dst->tx_max_frames_irq = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_STATS_BLOCK_USECS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.stats_block_usecs = 1; dst->stats_block_usecs = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.use_adaptive_rx = 1; dst->use_adaptive_rx = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.use_adaptive_tx = 1; dst->use_adaptive_tx = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_COALESCE_PKT_RATE_LOW) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.pkt_rate_low = 1; dst->pkt_rate_low = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_RX_USECS_LOW) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_usecs_low = 1; dst->rx_usecs_low = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_max_frames_low = 1; dst->rx_max_frames_low = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_USECS_LOW) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_usecs_low = 1; dst->tx_usecs_low = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_max_frames_low = 1; dst->tx_max_frames_low = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_PKT_RATE_HIGH) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.pkt_rate_high = 1; dst->pkt_rate_high = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_RX_USECS_HIGH) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_usecs_high = 1; dst->rx_usecs_high = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_max_frames_high = 1; dst->rx_max_frames_high = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_USECS_HIGH) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_usecs_high = 1; dst->tx_usecs_high = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_max_frames_high = 1; dst->tx_max_frames_high = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rate_sample_interval = 1; dst->rate_sample_interval = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_USE_CQE_MODE_TX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.use_cqe_mode_tx = 1; dst->use_cqe_mode_tx = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_COALESCE_USE_CQE_MODE_RX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.use_cqe_mode_rx = 1; dst->use_cqe_mode_rx = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_aggr_max_bytes = 1; dst->tx_aggr_max_bytes = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_aggr_max_frames = 1; dst->tx_aggr_max_frames = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_aggr_time_usecs = 1; dst->tx_aggr_time_usecs = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_coalesce_get_rsp * ethtool_coalesce_get(struct ynl_sock *ys, struct ethtool_coalesce_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_coalesce_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_COALESCE_GET, 1); ys->req_policy = &ethtool_coalesce_nest; yrs.yarg.rsp_policy = &ethtool_coalesce_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_COALESCE_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_coalesce_get_rsp_parse; yrs.rsp_cmd = 20; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_coalesce_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_COALESCE_GET - dump */ void ethtool_coalesce_get_list_free(struct ethtool_coalesce_get_list *rsp) { struct ethtool_coalesce_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_coalesce_get_list * ethtool_coalesce_get_dump(struct ynl_sock *ys, struct ethtool_coalesce_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_coalesce_get_list); yds.cb = ethtool_coalesce_get_rsp_parse; yds.rsp_cmd = 20; yds.rsp_policy = &ethtool_coalesce_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_COALESCE_GET, 1); ys->req_policy = &ethtool_coalesce_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_COALESCE_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_coalesce_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_COALESCE_GET - notify */ void ethtool_coalesce_get_ntf_free(struct ethtool_coalesce_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); free(rsp); } /* ============== ETHTOOL_MSG_COALESCE_SET ============== */ /* ETHTOOL_MSG_COALESCE_SET - do */ void ethtool_coalesce_set_req_free(struct ethtool_coalesce_set_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_coalesce_set(struct ynl_sock *ys, struct ethtool_coalesce_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_COALESCE_SET, 1); ys->req_policy = &ethtool_coalesce_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_COALESCE_HEADER, &req->header); if (req->_present.rx_usecs) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_USECS, req->rx_usecs); if (req->_present.rx_max_frames) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_MAX_FRAMES, req->rx_max_frames); if (req->_present.rx_usecs_irq) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_USECS_IRQ, req->rx_usecs_irq); if (req->_present.rx_max_frames_irq) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ, req->rx_max_frames_irq); if (req->_present.tx_usecs) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_USECS, req->tx_usecs); if (req->_present.tx_max_frames) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_MAX_FRAMES, req->tx_max_frames); if (req->_present.tx_usecs_irq) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_USECS_IRQ, req->tx_usecs_irq); if (req->_present.tx_max_frames_irq) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ, req->tx_max_frames_irq); if (req->_present.stats_block_usecs) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_STATS_BLOCK_USECS, req->stats_block_usecs); if (req->_present.use_adaptive_rx) mnl_attr_put_u8(nlh, ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX, req->use_adaptive_rx); if (req->_present.use_adaptive_tx) mnl_attr_put_u8(nlh, ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX, req->use_adaptive_tx); if (req->_present.pkt_rate_low) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_PKT_RATE_LOW, req->pkt_rate_low); if (req->_present.rx_usecs_low) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_USECS_LOW, req->rx_usecs_low); if (req->_present.rx_max_frames_low) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW, req->rx_max_frames_low); if (req->_present.tx_usecs_low) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_USECS_LOW, req->tx_usecs_low); if (req->_present.tx_max_frames_low) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW, req->tx_max_frames_low); if (req->_present.pkt_rate_high) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_PKT_RATE_HIGH, req->pkt_rate_high); if (req->_present.rx_usecs_high) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_USECS_HIGH, req->rx_usecs_high); if (req->_present.rx_max_frames_high) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH, req->rx_max_frames_high); if (req->_present.tx_usecs_high) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_USECS_HIGH, req->tx_usecs_high); if (req->_present.tx_max_frames_high) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH, req->tx_max_frames_high); if (req->_present.rate_sample_interval) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL, req->rate_sample_interval); if (req->_present.use_cqe_mode_tx) mnl_attr_put_u8(nlh, ETHTOOL_A_COALESCE_USE_CQE_MODE_TX, req->use_cqe_mode_tx); if (req->_present.use_cqe_mode_rx) mnl_attr_put_u8(nlh, ETHTOOL_A_COALESCE_USE_CQE_MODE_RX, req->use_cqe_mode_rx); if (req->_present.tx_aggr_max_bytes) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, req->tx_aggr_max_bytes); if (req->_present.tx_aggr_max_frames) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, req->tx_aggr_max_frames); if (req->_present.tx_aggr_time_usecs) mnl_attr_put_u32(nlh, ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, req->tx_aggr_time_usecs); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_PAUSE_GET ============== */ /* ETHTOOL_MSG_PAUSE_GET - do */ void ethtool_pause_get_req_free(struct ethtool_pause_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_pause_get_rsp_free(struct ethtool_pause_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_pause_stat_free(&rsp->stats); free(rsp); } int ethtool_pause_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_pause_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_PAUSE_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_PAUSE_AUTONEG) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.autoneg = 1; dst->autoneg = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_PAUSE_RX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx = 1; dst->rx = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_PAUSE_TX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx = 1; dst->tx = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_PAUSE_STATS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.stats = 1; parg.rsp_policy = &ethtool_pause_stat_nest; parg.data = &dst->stats; if (ethtool_pause_stat_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_PAUSE_STATS_SRC) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.stats_src = 1; dst->stats_src = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_pause_get_rsp * ethtool_pause_get(struct ynl_sock *ys, struct ethtool_pause_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_pause_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PAUSE_GET, 1); ys->req_policy = &ethtool_pause_nest; yrs.yarg.rsp_policy = &ethtool_pause_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PAUSE_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_pause_get_rsp_parse; yrs.rsp_cmd = 22; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_pause_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_PAUSE_GET - dump */ void ethtool_pause_get_list_free(struct ethtool_pause_get_list *rsp) { struct ethtool_pause_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_pause_stat_free(&rsp->obj.stats); free(rsp); } } struct ethtool_pause_get_list * ethtool_pause_get_dump(struct ynl_sock *ys, struct ethtool_pause_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_pause_get_list); yds.cb = ethtool_pause_get_rsp_parse; yds.rsp_cmd = 22; yds.rsp_policy = &ethtool_pause_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PAUSE_GET, 1); ys->req_policy = &ethtool_pause_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PAUSE_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_pause_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_PAUSE_GET - notify */ void ethtool_pause_get_ntf_free(struct ethtool_pause_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_pause_stat_free(&rsp->obj.stats); free(rsp); } /* ============== ETHTOOL_MSG_PAUSE_SET ============== */ /* ETHTOOL_MSG_PAUSE_SET - do */ void ethtool_pause_set_req_free(struct ethtool_pause_set_req *req) { ethtool_header_free(&req->header); ethtool_pause_stat_free(&req->stats); free(req); } int ethtool_pause_set(struct ynl_sock *ys, struct ethtool_pause_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PAUSE_SET, 1); ys->req_policy = &ethtool_pause_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PAUSE_HEADER, &req->header); if (req->_present.autoneg) mnl_attr_put_u8(nlh, ETHTOOL_A_PAUSE_AUTONEG, req->autoneg); if (req->_present.rx) mnl_attr_put_u8(nlh, ETHTOOL_A_PAUSE_RX, req->rx); if (req->_present.tx) mnl_attr_put_u8(nlh, ETHTOOL_A_PAUSE_TX, req->tx); if (req->_present.stats) ethtool_pause_stat_put(nlh, ETHTOOL_A_PAUSE_STATS, &req->stats); if (req->_present.stats_src) mnl_attr_put_u32(nlh, ETHTOOL_A_PAUSE_STATS_SRC, req->stats_src); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_EEE_GET ============== */ /* ETHTOOL_MSG_EEE_GET - do */ void ethtool_eee_get_req_free(struct ethtool_eee_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_eee_get_rsp_free(struct ethtool_eee_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_bitset_free(&rsp->modes_ours); ethtool_bitset_free(&rsp->modes_peer); free(rsp); } int ethtool_eee_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct ethtool_eee_get_rsp *dst; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_EEE_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_EEE_MODES_OURS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.modes_ours = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->modes_ours; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_EEE_MODES_PEER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.modes_peer = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->modes_peer; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_EEE_ACTIVE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.active = 1; dst->active = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_EEE_ENABLED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.enabled = 1; dst->enabled = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_EEE_TX_LPI_ENABLED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_lpi_enabled = 1; dst->tx_lpi_enabled = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_EEE_TX_LPI_TIMER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_lpi_timer = 1; dst->tx_lpi_timer = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_eee_get_rsp * ethtool_eee_get(struct ynl_sock *ys, struct ethtool_eee_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_eee_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_EEE_GET, 1); ys->req_policy = &ethtool_eee_nest; yrs.yarg.rsp_policy = &ethtool_eee_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_EEE_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_eee_get_rsp_parse; yrs.rsp_cmd = 24; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_eee_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_EEE_GET - dump */ void ethtool_eee_get_list_free(struct ethtool_eee_get_list *rsp) { struct ethtool_eee_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.modes_ours); ethtool_bitset_free(&rsp->obj.modes_peer); free(rsp); } } struct ethtool_eee_get_list * ethtool_eee_get_dump(struct ynl_sock *ys, struct ethtool_eee_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_eee_get_list); yds.cb = ethtool_eee_get_rsp_parse; yds.rsp_cmd = 24; yds.rsp_policy = &ethtool_eee_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_EEE_GET, 1); ys->req_policy = &ethtool_eee_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_EEE_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_eee_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_EEE_GET - notify */ void ethtool_eee_get_ntf_free(struct ethtool_eee_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.modes_ours); ethtool_bitset_free(&rsp->obj.modes_peer); free(rsp); } /* ============== ETHTOOL_MSG_EEE_SET ============== */ /* ETHTOOL_MSG_EEE_SET - do */ void ethtool_eee_set_req_free(struct ethtool_eee_set_req *req) { ethtool_header_free(&req->header); ethtool_bitset_free(&req->modes_ours); ethtool_bitset_free(&req->modes_peer); free(req); } int ethtool_eee_set(struct ynl_sock *ys, struct ethtool_eee_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_EEE_SET, 1); ys->req_policy = &ethtool_eee_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_EEE_HEADER, &req->header); if (req->_present.modes_ours) ethtool_bitset_put(nlh, ETHTOOL_A_EEE_MODES_OURS, &req->modes_ours); if (req->_present.modes_peer) ethtool_bitset_put(nlh, ETHTOOL_A_EEE_MODES_PEER, &req->modes_peer); if (req->_present.active) mnl_attr_put_u8(nlh, ETHTOOL_A_EEE_ACTIVE, req->active); if (req->_present.enabled) mnl_attr_put_u8(nlh, ETHTOOL_A_EEE_ENABLED, req->enabled); if (req->_present.tx_lpi_enabled) mnl_attr_put_u8(nlh, ETHTOOL_A_EEE_TX_LPI_ENABLED, req->tx_lpi_enabled); if (req->_present.tx_lpi_timer) mnl_attr_put_u32(nlh, ETHTOOL_A_EEE_TX_LPI_TIMER, req->tx_lpi_timer); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_TSINFO_GET ============== */ /* ETHTOOL_MSG_TSINFO_GET - do */ void ethtool_tsinfo_get_req_free(struct ethtool_tsinfo_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_tsinfo_get_rsp_free(struct ethtool_tsinfo_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_bitset_free(&rsp->timestamping); ethtool_bitset_free(&rsp->tx_types); ethtool_bitset_free(&rsp->rx_filters); free(rsp); } int ethtool_tsinfo_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_tsinfo_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_TSINFO_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_TSINFO_TIMESTAMPING) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.timestamping = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->timestamping; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_TSINFO_TX_TYPES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_types = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->tx_types; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_TSINFO_RX_FILTERS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_filters = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->rx_filters; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_TSINFO_PHC_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.phc_index = 1; dst->phc_index = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_tsinfo_get_rsp * ethtool_tsinfo_get(struct ynl_sock *ys, struct ethtool_tsinfo_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_tsinfo_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_TSINFO_GET, 1); ys->req_policy = &ethtool_tsinfo_nest; yrs.yarg.rsp_policy = &ethtool_tsinfo_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_TSINFO_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_tsinfo_get_rsp_parse; yrs.rsp_cmd = 26; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_tsinfo_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_TSINFO_GET - dump */ void ethtool_tsinfo_get_list_free(struct ethtool_tsinfo_get_list *rsp) { struct ethtool_tsinfo_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.timestamping); ethtool_bitset_free(&rsp->obj.tx_types); ethtool_bitset_free(&rsp->obj.rx_filters); free(rsp); } } struct ethtool_tsinfo_get_list * ethtool_tsinfo_get_dump(struct ynl_sock *ys, struct ethtool_tsinfo_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_tsinfo_get_list); yds.cb = ethtool_tsinfo_get_rsp_parse; yds.rsp_cmd = 26; yds.rsp_policy = &ethtool_tsinfo_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_TSINFO_GET, 1); ys->req_policy = &ethtool_tsinfo_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_TSINFO_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_tsinfo_get_list_free(yds.first); return NULL; } /* ============== ETHTOOL_MSG_CABLE_TEST_ACT ============== */ /* ETHTOOL_MSG_CABLE_TEST_ACT - do */ void ethtool_cable_test_act_req_free(struct ethtool_cable_test_act_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_cable_test_act(struct ynl_sock *ys, struct ethtool_cable_test_act_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_CABLE_TEST_ACT, 1); ys->req_policy = &ethtool_cable_test_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_CABLE_TEST_HEADER, &req->header); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_CABLE_TEST_TDR_ACT ============== */ /* ETHTOOL_MSG_CABLE_TEST_TDR_ACT - do */ void ethtool_cable_test_tdr_act_req_free(struct ethtool_cable_test_tdr_act_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_cable_test_tdr_act(struct ynl_sock *ys, struct ethtool_cable_test_tdr_act_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_CABLE_TEST_TDR_ACT, 1); ys->req_policy = &ethtool_cable_test_tdr_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_CABLE_TEST_TDR_HEADER, &req->header); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_TUNNEL_INFO_GET ============== */ /* ETHTOOL_MSG_TUNNEL_INFO_GET - do */ void ethtool_tunnel_info_get_req_free(struct ethtool_tunnel_info_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_tunnel_info_get_rsp_free(struct ethtool_tunnel_info_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_tunnel_udp_free(&rsp->udp_ports); free(rsp); } int ethtool_tunnel_info_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_tunnel_info_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_TUNNEL_INFO_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_TUNNEL_INFO_UDP_PORTS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.udp_ports = 1; parg.rsp_policy = &ethtool_tunnel_udp_nest; parg.data = &dst->udp_ports; if (ethtool_tunnel_udp_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } struct ethtool_tunnel_info_get_rsp * ethtool_tunnel_info_get(struct ynl_sock *ys, struct ethtool_tunnel_info_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_tunnel_info_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_TUNNEL_INFO_GET, 1); ys->req_policy = &ethtool_tunnel_info_nest; yrs.yarg.rsp_policy = &ethtool_tunnel_info_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_TUNNEL_INFO_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_tunnel_info_get_rsp_parse; yrs.rsp_cmd = 29; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_tunnel_info_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_TUNNEL_INFO_GET - dump */ void ethtool_tunnel_info_get_list_free(struct ethtool_tunnel_info_get_list *rsp) { struct ethtool_tunnel_info_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_tunnel_udp_free(&rsp->obj.udp_ports); free(rsp); } } struct ethtool_tunnel_info_get_list * ethtool_tunnel_info_get_dump(struct ynl_sock *ys, struct ethtool_tunnel_info_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_tunnel_info_get_list); yds.cb = ethtool_tunnel_info_get_rsp_parse; yds.rsp_cmd = 29; yds.rsp_policy = &ethtool_tunnel_info_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_TUNNEL_INFO_GET, 1); ys->req_policy = &ethtool_tunnel_info_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_TUNNEL_INFO_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_tunnel_info_get_list_free(yds.first); return NULL; } /* ============== ETHTOOL_MSG_FEC_GET ============== */ /* ETHTOOL_MSG_FEC_GET - do */ void ethtool_fec_get_req_free(struct ethtool_fec_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_fec_get_rsp_free(struct ethtool_fec_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_bitset_free(&rsp->modes); ethtool_fec_stat_free(&rsp->stats); free(rsp); } int ethtool_fec_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct ethtool_fec_get_rsp *dst; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_FEC_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEC_MODES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.modes = 1; parg.rsp_policy = &ethtool_bitset_nest; parg.data = &dst->modes; if (ethtool_bitset_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_FEC_AUTO) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.auto_ = 1; dst->auto_ = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_FEC_ACTIVE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.active = 1; dst->active = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_FEC_STATS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.stats = 1; parg.rsp_policy = &ethtool_fec_stat_nest; parg.data = &dst->stats; if (ethtool_fec_stat_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } struct ethtool_fec_get_rsp * ethtool_fec_get(struct ynl_sock *ys, struct ethtool_fec_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_fec_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_FEC_GET, 1); ys->req_policy = &ethtool_fec_nest; yrs.yarg.rsp_policy = &ethtool_fec_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_FEC_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_fec_get_rsp_parse; yrs.rsp_cmd = 30; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_fec_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_FEC_GET - dump */ void ethtool_fec_get_list_free(struct ethtool_fec_get_list *rsp) { struct ethtool_fec_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.modes); ethtool_fec_stat_free(&rsp->obj.stats); free(rsp); } } struct ethtool_fec_get_list * ethtool_fec_get_dump(struct ynl_sock *ys, struct ethtool_fec_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_fec_get_list); yds.cb = ethtool_fec_get_rsp_parse; yds.rsp_cmd = 30; yds.rsp_policy = &ethtool_fec_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_FEC_GET, 1); ys->req_policy = &ethtool_fec_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_FEC_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_fec_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_FEC_GET - notify */ void ethtool_fec_get_ntf_free(struct ethtool_fec_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_bitset_free(&rsp->obj.modes); ethtool_fec_stat_free(&rsp->obj.stats); free(rsp); } /* ============== ETHTOOL_MSG_FEC_SET ============== */ /* ETHTOOL_MSG_FEC_SET - do */ void ethtool_fec_set_req_free(struct ethtool_fec_set_req *req) { ethtool_header_free(&req->header); ethtool_bitset_free(&req->modes); ethtool_fec_stat_free(&req->stats); free(req); } int ethtool_fec_set(struct ynl_sock *ys, struct ethtool_fec_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_FEC_SET, 1); ys->req_policy = &ethtool_fec_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_FEC_HEADER, &req->header); if (req->_present.modes) ethtool_bitset_put(nlh, ETHTOOL_A_FEC_MODES, &req->modes); if (req->_present.auto_) mnl_attr_put_u8(nlh, ETHTOOL_A_FEC_AUTO, req->auto_); if (req->_present.active) mnl_attr_put_u32(nlh, ETHTOOL_A_FEC_ACTIVE, req->active); if (req->_present.stats) ethtool_fec_stat_put(nlh, ETHTOOL_A_FEC_STATS, &req->stats); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_MODULE_EEPROM_GET ============== */ /* ETHTOOL_MSG_MODULE_EEPROM_GET - do */ void ethtool_module_eeprom_get_req_free(struct ethtool_module_eeprom_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_module_eeprom_get_rsp_free(struct ethtool_module_eeprom_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp->data); free(rsp); } int ethtool_module_eeprom_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_module_eeprom_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_MODULE_EEPROM_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_MODULE_EEPROM_OFFSET) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.offset = 1; dst->offset = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_MODULE_EEPROM_LENGTH) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.length = 1; dst->length = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_MODULE_EEPROM_PAGE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.page = 1; dst->page = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_MODULE_EEPROM_BANK) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.bank = 1; dst->bank = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.i2c_address = 1; dst->i2c_address = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_MODULE_EEPROM_DATA) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = mnl_attr_get_payload_len(attr); dst->_present.data_len = len; dst->data = malloc(len); memcpy(dst->data, mnl_attr_get_payload(attr), len); } } return MNL_CB_OK; } struct ethtool_module_eeprom_get_rsp * ethtool_module_eeprom_get(struct ynl_sock *ys, struct ethtool_module_eeprom_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_module_eeprom_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MODULE_EEPROM_GET, 1); ys->req_policy = &ethtool_module_eeprom_nest; yrs.yarg.rsp_policy = &ethtool_module_eeprom_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_MODULE_EEPROM_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_module_eeprom_get_rsp_parse; yrs.rsp_cmd = 32; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_module_eeprom_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_MODULE_EEPROM_GET - dump */ void ethtool_module_eeprom_get_list_free(struct ethtool_module_eeprom_get_list *rsp) { struct ethtool_module_eeprom_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp->obj.data); free(rsp); } } struct ethtool_module_eeprom_get_list * ethtool_module_eeprom_get_dump(struct ynl_sock *ys, struct ethtool_module_eeprom_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_module_eeprom_get_list); yds.cb = ethtool_module_eeprom_get_rsp_parse; yds.rsp_cmd = 32; yds.rsp_policy = &ethtool_module_eeprom_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_MODULE_EEPROM_GET, 1); ys->req_policy = &ethtool_module_eeprom_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_MODULE_EEPROM_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_module_eeprom_get_list_free(yds.first); return NULL; } /* ============== ETHTOOL_MSG_PHC_VCLOCKS_GET ============== */ /* ETHTOOL_MSG_PHC_VCLOCKS_GET - do */ void ethtool_phc_vclocks_get_req_free(struct ethtool_phc_vclocks_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_phc_vclocks_get_rsp_free(struct ethtool_phc_vclocks_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_phc_vclocks_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_phc_vclocks_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_PHC_VCLOCKS_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_PHC_VCLOCKS_NUM) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.num = 1; dst->num = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_phc_vclocks_get_rsp * ethtool_phc_vclocks_get(struct ynl_sock *ys, struct ethtool_phc_vclocks_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_phc_vclocks_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PHC_VCLOCKS_GET, 1); ys->req_policy = &ethtool_phc_vclocks_nest; yrs.yarg.rsp_policy = &ethtool_phc_vclocks_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PHC_VCLOCKS_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_phc_vclocks_get_rsp_parse; yrs.rsp_cmd = 34; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_phc_vclocks_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_PHC_VCLOCKS_GET - dump */ void ethtool_phc_vclocks_get_list_free(struct ethtool_phc_vclocks_get_list *rsp) { struct ethtool_phc_vclocks_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_phc_vclocks_get_list * ethtool_phc_vclocks_get_dump(struct ynl_sock *ys, struct ethtool_phc_vclocks_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_phc_vclocks_get_list); yds.cb = ethtool_phc_vclocks_get_rsp_parse; yds.rsp_cmd = 34; yds.rsp_policy = &ethtool_phc_vclocks_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PHC_VCLOCKS_GET, 1); ys->req_policy = &ethtool_phc_vclocks_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PHC_VCLOCKS_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_phc_vclocks_get_list_free(yds.first); return NULL; } /* ============== ETHTOOL_MSG_MODULE_GET ============== */ /* ETHTOOL_MSG_MODULE_GET - do */ void ethtool_module_get_req_free(struct ethtool_module_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_module_get_rsp_free(struct ethtool_module_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_module_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_module_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_MODULE_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_MODULE_POWER_MODE_POLICY) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.power_mode_policy = 1; dst->power_mode_policy = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_MODULE_POWER_MODE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.power_mode = 1; dst->power_mode = mnl_attr_get_u8(attr); } } return MNL_CB_OK; } struct ethtool_module_get_rsp * ethtool_module_get(struct ynl_sock *ys, struct ethtool_module_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_module_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MODULE_GET, 1); ys->req_policy = &ethtool_module_nest; yrs.yarg.rsp_policy = &ethtool_module_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_MODULE_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_module_get_rsp_parse; yrs.rsp_cmd = 35; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_module_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_MODULE_GET - dump */ void ethtool_module_get_list_free(struct ethtool_module_get_list *rsp) { struct ethtool_module_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_module_get_list * ethtool_module_get_dump(struct ynl_sock *ys, struct ethtool_module_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_module_get_list); yds.cb = ethtool_module_get_rsp_parse; yds.rsp_cmd = 35; yds.rsp_policy = &ethtool_module_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_MODULE_GET, 1); ys->req_policy = &ethtool_module_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_MODULE_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_module_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_MODULE_GET - notify */ void ethtool_module_get_ntf_free(struct ethtool_module_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); free(rsp); } /* ============== ETHTOOL_MSG_MODULE_SET ============== */ /* ETHTOOL_MSG_MODULE_SET - do */ void ethtool_module_set_req_free(struct ethtool_module_set_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_module_set(struct ynl_sock *ys, struct ethtool_module_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MODULE_SET, 1); ys->req_policy = &ethtool_module_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_MODULE_HEADER, &req->header); if (req->_present.power_mode_policy) mnl_attr_put_u8(nlh, ETHTOOL_A_MODULE_POWER_MODE_POLICY, req->power_mode_policy); if (req->_present.power_mode) mnl_attr_put_u8(nlh, ETHTOOL_A_MODULE_POWER_MODE, req->power_mode); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_PSE_GET ============== */ /* ETHTOOL_MSG_PSE_GET - do */ void ethtool_pse_get_req_free(struct ethtool_pse_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_pse_get_rsp_free(struct ethtool_pse_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_pse_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct ethtool_pse_get_rsp *dst; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_PSE_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_PODL_PSE_ADMIN_STATE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.admin_state = 1; dst->admin_state = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PODL_PSE_ADMIN_CONTROL) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.admin_control = 1; dst->admin_control = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PODL_PSE_PW_D_STATUS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.pw_d_status = 1; dst->pw_d_status = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_pse_get_rsp * ethtool_pse_get(struct ynl_sock *ys, struct ethtool_pse_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_pse_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PSE_GET, 1); ys->req_policy = &ethtool_pse_nest; yrs.yarg.rsp_policy = &ethtool_pse_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PSE_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_pse_get_rsp_parse; yrs.rsp_cmd = 37; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_pse_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_PSE_GET - dump */ void ethtool_pse_get_list_free(struct ethtool_pse_get_list *rsp) { struct ethtool_pse_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_pse_get_list * ethtool_pse_get_dump(struct ynl_sock *ys, struct ethtool_pse_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_pse_get_list); yds.cb = ethtool_pse_get_rsp_parse; yds.rsp_cmd = 37; yds.rsp_policy = &ethtool_pse_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PSE_GET, 1); ys->req_policy = &ethtool_pse_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PSE_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_pse_get_list_free(yds.first); return NULL; } /* ============== ETHTOOL_MSG_PSE_SET ============== */ /* ETHTOOL_MSG_PSE_SET - do */ void ethtool_pse_set_req_free(struct ethtool_pse_set_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_pse_set(struct ynl_sock *ys, struct ethtool_pse_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PSE_SET, 1); ys->req_policy = &ethtool_pse_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PSE_HEADER, &req->header); if (req->_present.admin_state) mnl_attr_put_u32(nlh, ETHTOOL_A_PODL_PSE_ADMIN_STATE, req->admin_state); if (req->_present.admin_control) mnl_attr_put_u32(nlh, ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, req->admin_control); if (req->_present.pw_d_status) mnl_attr_put_u32(nlh, ETHTOOL_A_PODL_PSE_PW_D_STATUS, req->pw_d_status); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_RSS_GET ============== */ /* ETHTOOL_MSG_RSS_GET - do */ void ethtool_rss_get_req_free(struct ethtool_rss_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_rss_get_rsp_free(struct ethtool_rss_get_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp->indir); free(rsp->hkey); free(rsp); } int ethtool_rss_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct ethtool_rss_get_rsp *dst; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_RSS_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_RSS_CONTEXT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.context = 1; dst->context = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RSS_HFUNC) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.hfunc = 1; dst->hfunc = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_RSS_INDIR) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = mnl_attr_get_payload_len(attr); dst->_present.indir_len = len; dst->indir = malloc(len); memcpy(dst->indir, mnl_attr_get_payload(attr), len); } else if (type == ETHTOOL_A_RSS_HKEY) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = mnl_attr_get_payload_len(attr); dst->_present.hkey_len = len; dst->hkey = malloc(len); memcpy(dst->hkey, mnl_attr_get_payload(attr), len); } } return MNL_CB_OK; } struct ethtool_rss_get_rsp * ethtool_rss_get(struct ynl_sock *ys, struct ethtool_rss_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_rss_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_RSS_GET, 1); ys->req_policy = &ethtool_rss_nest; yrs.yarg.rsp_policy = &ethtool_rss_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_RSS_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_rss_get_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_RSS_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_rss_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_RSS_GET - dump */ void ethtool_rss_get_list_free(struct ethtool_rss_get_list *rsp) { struct ethtool_rss_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp->obj.indir); free(rsp->obj.hkey); free(rsp); } } struct ethtool_rss_get_list * ethtool_rss_get_dump(struct ynl_sock *ys, struct ethtool_rss_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_rss_get_list); yds.cb = ethtool_rss_get_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_RSS_GET; yds.rsp_policy = &ethtool_rss_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_RSS_GET, 1); ys->req_policy = &ethtool_rss_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_RSS_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_rss_get_list_free(yds.first); return NULL; } /* ============== ETHTOOL_MSG_PLCA_GET_CFG ============== */ /* ETHTOOL_MSG_PLCA_GET_CFG - do */ void ethtool_plca_get_cfg_req_free(struct ethtool_plca_get_cfg_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_plca_get_cfg_rsp_free(struct ethtool_plca_get_cfg_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_plca_get_cfg_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_plca_get_cfg_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_PLCA_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_PLCA_VERSION) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.version = 1; dst->version = mnl_attr_get_u16(attr); } else if (type == ETHTOOL_A_PLCA_ENABLED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.enabled = 1; dst->enabled = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_PLCA_STATUS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.status = 1; dst->status = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_PLCA_NODE_CNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.node_cnt = 1; dst->node_cnt = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PLCA_NODE_ID) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.node_id = 1; dst->node_id = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PLCA_TO_TMR) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.to_tmr = 1; dst->to_tmr = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PLCA_BURST_CNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.burst_cnt = 1; dst->burst_cnt = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PLCA_BURST_TMR) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.burst_tmr = 1; dst->burst_tmr = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_plca_get_cfg_rsp * ethtool_plca_get_cfg(struct ynl_sock *ys, struct ethtool_plca_get_cfg_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_plca_get_cfg_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PLCA_GET_CFG, 1); ys->req_policy = &ethtool_plca_nest; yrs.yarg.rsp_policy = &ethtool_plca_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_plca_get_cfg_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_PLCA_GET_CFG; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_plca_get_cfg_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_PLCA_GET_CFG - dump */ void ethtool_plca_get_cfg_list_free(struct ethtool_plca_get_cfg_list *rsp) { struct ethtool_plca_get_cfg_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_plca_get_cfg_list * ethtool_plca_get_cfg_dump(struct ynl_sock *ys, struct ethtool_plca_get_cfg_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_plca_get_cfg_list); yds.cb = ethtool_plca_get_cfg_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_PLCA_GET_CFG; yds.rsp_policy = &ethtool_plca_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PLCA_GET_CFG, 1); ys->req_policy = &ethtool_plca_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_plca_get_cfg_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_PLCA_GET_CFG - notify */ void ethtool_plca_get_cfg_ntf_free(struct ethtool_plca_get_cfg_ntf *rsp) { ethtool_header_free(&rsp->obj.header); free(rsp); } /* ============== ETHTOOL_MSG_PLCA_SET_CFG ============== */ /* ETHTOOL_MSG_PLCA_SET_CFG - do */ void ethtool_plca_set_cfg_req_free(struct ethtool_plca_set_cfg_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_plca_set_cfg(struct ynl_sock *ys, struct ethtool_plca_set_cfg_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PLCA_SET_CFG, 1); ys->req_policy = &ethtool_plca_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header); if (req->_present.version) mnl_attr_put_u16(nlh, ETHTOOL_A_PLCA_VERSION, req->version); if (req->_present.enabled) mnl_attr_put_u8(nlh, ETHTOOL_A_PLCA_ENABLED, req->enabled); if (req->_present.status) mnl_attr_put_u8(nlh, ETHTOOL_A_PLCA_STATUS, req->status); if (req->_present.node_cnt) mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_NODE_CNT, req->node_cnt); if (req->_present.node_id) mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_NODE_ID, req->node_id); if (req->_present.to_tmr) mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_TO_TMR, req->to_tmr); if (req->_present.burst_cnt) mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_BURST_CNT, req->burst_cnt); if (req->_present.burst_tmr) mnl_attr_put_u32(nlh, ETHTOOL_A_PLCA_BURST_TMR, req->burst_tmr); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ============== ETHTOOL_MSG_PLCA_GET_STATUS ============== */ /* ETHTOOL_MSG_PLCA_GET_STATUS - do */ void ethtool_plca_get_status_req_free(struct ethtool_plca_get_status_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_plca_get_status_rsp_free(struct ethtool_plca_get_status_rsp *rsp) { ethtool_header_free(&rsp->header); free(rsp); } int ethtool_plca_get_status_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_plca_get_status_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_PLCA_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_PLCA_VERSION) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.version = 1; dst->version = mnl_attr_get_u16(attr); } else if (type == ETHTOOL_A_PLCA_ENABLED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.enabled = 1; dst->enabled = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_PLCA_STATUS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.status = 1; dst->status = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_PLCA_NODE_CNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.node_cnt = 1; dst->node_cnt = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PLCA_NODE_ID) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.node_id = 1; dst->node_id = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PLCA_TO_TMR) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.to_tmr = 1; dst->to_tmr = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PLCA_BURST_CNT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.burst_cnt = 1; dst->burst_cnt = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_PLCA_BURST_TMR) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.burst_tmr = 1; dst->burst_tmr = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct ethtool_plca_get_status_rsp * ethtool_plca_get_status(struct ynl_sock *ys, struct ethtool_plca_get_status_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_plca_get_status_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_PLCA_GET_STATUS, 1); ys->req_policy = &ethtool_plca_nest; yrs.yarg.rsp_policy = &ethtool_plca_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_plca_get_status_rsp_parse; yrs.rsp_cmd = 40; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_plca_get_status_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_PLCA_GET_STATUS - dump */ void ethtool_plca_get_status_list_free(struct ethtool_plca_get_status_list *rsp) { struct ethtool_plca_get_status_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); free(rsp); } } struct ethtool_plca_get_status_list * ethtool_plca_get_status_dump(struct ynl_sock *ys, struct ethtool_plca_get_status_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_plca_get_status_list); yds.cb = ethtool_plca_get_status_rsp_parse; yds.rsp_cmd = 40; yds.rsp_policy = &ethtool_plca_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_PLCA_GET_STATUS, 1); ys->req_policy = &ethtool_plca_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_PLCA_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_plca_get_status_list_free(yds.first); return NULL; } /* ============== ETHTOOL_MSG_MM_GET ============== */ /* ETHTOOL_MSG_MM_GET - do */ void ethtool_mm_get_req_free(struct ethtool_mm_get_req *req) { ethtool_header_free(&req->header); free(req); } void ethtool_mm_get_rsp_free(struct ethtool_mm_get_rsp *rsp) { ethtool_header_free(&rsp->header); ethtool_mm_stat_free(&rsp->stats); free(rsp); } int ethtool_mm_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct ethtool_mm_get_rsp *dst; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_MM_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_MM_PMAC_ENABLED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.pmac_enabled = 1; dst->pmac_enabled = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_MM_TX_ENABLED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_enabled = 1; dst->tx_enabled = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_MM_TX_ACTIVE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_active = 1; dst->tx_active = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_MM_TX_MIN_FRAG_SIZE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.tx_min_frag_size = 1; dst->tx_min_frag_size = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_MM_RX_MIN_FRAG_SIZE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.rx_min_frag_size = 1; dst->rx_min_frag_size = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_MM_VERIFY_ENABLED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.verify_enabled = 1; dst->verify_enabled = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_MM_VERIFY_TIME) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.verify_time = 1; dst->verify_time = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_MM_MAX_VERIFY_TIME) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.max_verify_time = 1; dst->max_verify_time = mnl_attr_get_u32(attr); } else if (type == ETHTOOL_A_MM_STATS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.stats = 1; parg.rsp_policy = &ethtool_mm_stat_nest; parg.data = &dst->stats; if (ethtool_mm_stat_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } struct ethtool_mm_get_rsp * ethtool_mm_get(struct ynl_sock *ys, struct ethtool_mm_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct ethtool_mm_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MM_GET, 1); ys->req_policy = &ethtool_mm_nest; yrs.yarg.rsp_policy = &ethtool_mm_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_MM_HEADER, &req->header); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = ethtool_mm_get_rsp_parse; yrs.rsp_cmd = ETHTOOL_MSG_MM_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: ethtool_mm_get_rsp_free(rsp); return NULL; } /* ETHTOOL_MSG_MM_GET - dump */ void ethtool_mm_get_list_free(struct ethtool_mm_get_list *rsp) { struct ethtool_mm_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; ethtool_header_free(&rsp->obj.header); ethtool_mm_stat_free(&rsp->obj.stats); free(rsp); } } struct ethtool_mm_get_list * ethtool_mm_get_dump(struct ynl_sock *ys, struct ethtool_mm_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct ethtool_mm_get_list); yds.cb = ethtool_mm_get_rsp_parse; yds.rsp_cmd = ETHTOOL_MSG_MM_GET; yds.rsp_policy = &ethtool_mm_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, ETHTOOL_MSG_MM_GET, 1); ys->req_policy = &ethtool_mm_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_MM_HEADER, &req->header); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: ethtool_mm_get_list_free(yds.first); return NULL; } /* ETHTOOL_MSG_MM_GET - notify */ void ethtool_mm_get_ntf_free(struct ethtool_mm_get_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_mm_stat_free(&rsp->obj.stats); free(rsp); } /* ============== ETHTOOL_MSG_MM_SET ============== */ /* ETHTOOL_MSG_MM_SET - do */ void ethtool_mm_set_req_free(struct ethtool_mm_set_req *req) { ethtool_header_free(&req->header); free(req); } int ethtool_mm_set(struct ynl_sock *ys, struct ethtool_mm_set_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, ETHTOOL_MSG_MM_SET, 1); ys->req_policy = &ethtool_mm_nest; if (req->_present.header) ethtool_header_put(nlh, ETHTOOL_A_MM_HEADER, &req->header); if (req->_present.verify_enabled) mnl_attr_put_u8(nlh, ETHTOOL_A_MM_VERIFY_ENABLED, req->verify_enabled); if (req->_present.verify_time) mnl_attr_put_u32(nlh, ETHTOOL_A_MM_VERIFY_TIME, req->verify_time); if (req->_present.tx_enabled) mnl_attr_put_u8(nlh, ETHTOOL_A_MM_TX_ENABLED, req->tx_enabled); if (req->_present.pmac_enabled) mnl_attr_put_u8(nlh, ETHTOOL_A_MM_PMAC_ENABLED, req->pmac_enabled); if (req->_present.tx_min_frag_size) mnl_attr_put_u32(nlh, ETHTOOL_A_MM_TX_MIN_FRAG_SIZE, req->tx_min_frag_size); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } /* ETHTOOL_MSG_CABLE_TEST_NTF - event */ int ethtool_cable_test_ntf_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_cable_test_ntf_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_CABLE_TEST_NTF_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_CABLE_TEST_NTF_STATUS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.status = 1; dst->status = mnl_attr_get_u8(attr); } } return MNL_CB_OK; } void ethtool_cable_test_ntf_free(struct ethtool_cable_test_ntf *rsp) { ethtool_header_free(&rsp->obj.header); free(rsp); } /* ETHTOOL_MSG_CABLE_TEST_TDR_NTF - event */ int ethtool_cable_test_tdr_ntf_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ethtool_cable_test_tdr_ntf_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == ETHTOOL_A_CABLE_TEST_TDR_NTF_HEADER) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.header = 1; parg.rsp_policy = &ethtool_header_nest; parg.data = &dst->header; if (ethtool_header_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == ETHTOOL_A_CABLE_TEST_TDR_NTF_STATUS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.status = 1; dst->status = mnl_attr_get_u8(attr); } else if (type == ETHTOOL_A_CABLE_TEST_TDR_NTF_NEST) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.nest = 1; parg.rsp_policy = &ethtool_cable_nest_nest; parg.data = &dst->nest; if (ethtool_cable_nest_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } void ethtool_cable_test_tdr_ntf_free(struct ethtool_cable_test_tdr_ntf *rsp) { ethtool_header_free(&rsp->obj.header); ethtool_cable_nest_free(&rsp->obj.nest); free(rsp); } static const struct ynl_ntf_info ethtool_ntf_info[] = { [ETHTOOL_MSG_LINKINFO_NTF] = { .alloc_sz = sizeof(struct ethtool_linkinfo_get_ntf), .cb = ethtool_linkinfo_get_rsp_parse, .policy = &ethtool_linkinfo_nest, .free = (void *)ethtool_linkinfo_get_ntf_free, }, [ETHTOOL_MSG_LINKMODES_NTF] = { .alloc_sz = sizeof(struct ethtool_linkmodes_get_ntf), .cb = ethtool_linkmodes_get_rsp_parse, .policy = &ethtool_linkmodes_nest, .free = (void *)ethtool_linkmodes_get_ntf_free, }, [ETHTOOL_MSG_DEBUG_NTF] = { .alloc_sz = sizeof(struct ethtool_debug_get_ntf), .cb = ethtool_debug_get_rsp_parse, .policy = &ethtool_debug_nest, .free = (void *)ethtool_debug_get_ntf_free, }, [ETHTOOL_MSG_WOL_NTF] = { .alloc_sz = sizeof(struct ethtool_wol_get_ntf), .cb = ethtool_wol_get_rsp_parse, .policy = &ethtool_wol_nest, .free = (void *)ethtool_wol_get_ntf_free, }, [ETHTOOL_MSG_FEATURES_NTF] = { .alloc_sz = sizeof(struct ethtool_features_get_ntf), .cb = ethtool_features_get_rsp_parse, .policy = &ethtool_features_nest, .free = (void *)ethtool_features_get_ntf_free, }, [ETHTOOL_MSG_PRIVFLAGS_NTF] = { .alloc_sz = sizeof(struct ethtool_privflags_get_ntf), .cb = ethtool_privflags_get_rsp_parse, .policy = &ethtool_privflags_nest, .free = (void *)ethtool_privflags_get_ntf_free, }, [ETHTOOL_MSG_RINGS_NTF] = { .alloc_sz = sizeof(struct ethtool_rings_get_ntf), .cb = ethtool_rings_get_rsp_parse, .policy = &ethtool_rings_nest, .free = (void *)ethtool_rings_get_ntf_free, }, [ETHTOOL_MSG_CHANNELS_NTF] = { .alloc_sz = sizeof(struct ethtool_channels_get_ntf), .cb = ethtool_channels_get_rsp_parse, .policy = &ethtool_channels_nest, .free = (void *)ethtool_channels_get_ntf_free, }, [ETHTOOL_MSG_COALESCE_NTF] = { .alloc_sz = sizeof(struct ethtool_coalesce_get_ntf), .cb = ethtool_coalesce_get_rsp_parse, .policy = &ethtool_coalesce_nest, .free = (void *)ethtool_coalesce_get_ntf_free, }, [ETHTOOL_MSG_PAUSE_NTF] = { .alloc_sz = sizeof(struct ethtool_pause_get_ntf), .cb = ethtool_pause_get_rsp_parse, .policy = &ethtool_pause_nest, .free = (void *)ethtool_pause_get_ntf_free, }, [ETHTOOL_MSG_EEE_NTF] = { .alloc_sz = sizeof(struct ethtool_eee_get_ntf), .cb = ethtool_eee_get_rsp_parse, .policy = &ethtool_eee_nest, .free = (void *)ethtool_eee_get_ntf_free, }, [ETHTOOL_MSG_CABLE_TEST_NTF] = { .alloc_sz = sizeof(struct ethtool_cable_test_ntf), .cb = ethtool_cable_test_ntf_rsp_parse, .policy = &ethtool_cable_test_ntf_nest, .free = (void *)ethtool_cable_test_ntf_free, }, [ETHTOOL_MSG_CABLE_TEST_TDR_NTF] = { .alloc_sz = sizeof(struct ethtool_cable_test_tdr_ntf), .cb = ethtool_cable_test_tdr_ntf_rsp_parse, .policy = &ethtool_cable_test_tdr_ntf_nest, .free = (void *)ethtool_cable_test_tdr_ntf_free, }, [ETHTOOL_MSG_FEC_NTF] = { .alloc_sz = sizeof(struct ethtool_fec_get_ntf), .cb = ethtool_fec_get_rsp_parse, .policy = &ethtool_fec_nest, .free = (void *)ethtool_fec_get_ntf_free, }, [ETHTOOL_MSG_MODULE_NTF] = { .alloc_sz = sizeof(struct ethtool_module_get_ntf), .cb = ethtool_module_get_rsp_parse, .policy = &ethtool_module_nest, .free = (void *)ethtool_module_get_ntf_free, }, [ETHTOOL_MSG_PLCA_NTF] = { .alloc_sz = sizeof(struct ethtool_plca_get_cfg_ntf), .cb = ethtool_plca_get_cfg_rsp_parse, .policy = &ethtool_plca_nest, .free = (void *)ethtool_plca_get_cfg_ntf_free, }, [ETHTOOL_MSG_MM_NTF] = { .alloc_sz = sizeof(struct ethtool_mm_get_ntf), .cb = ethtool_mm_get_rsp_parse, .policy = &ethtool_mm_nest, .free = (void *)ethtool_mm_get_ntf_free, }, }; const struct ynl_family ynl_ethtool_family = { .name = "ethtool", .ntf_info = ethtool_ntf_info, .ntf_info_size = MNL_ARRAY_SIZE(ethtool_ntf_info), };
linux-master
tools/net/ynl/generated/ethtool-user.c
// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/netdev.yaml */ /* YNL-GEN user source */ #include <stdlib.h> #include <string.h> #include "netdev-user.h" #include "ynl.h" #include <linux/netdev.h> #include <libmnl/libmnl.h> #include <linux/genetlink.h> /* Enums */ static const char * const netdev_op_strmap[] = { [NETDEV_CMD_DEV_GET] = "dev-get", [NETDEV_CMD_DEV_ADD_NTF] = "dev-add-ntf", [NETDEV_CMD_DEV_DEL_NTF] = "dev-del-ntf", [NETDEV_CMD_DEV_CHANGE_NTF] = "dev-change-ntf", }; const char *netdev_op_str(int op) { if (op < 0 || op >= (int)MNL_ARRAY_SIZE(netdev_op_strmap)) return NULL; return netdev_op_strmap[op]; } static const char * const netdev_xdp_act_strmap[] = { [0] = "basic", [1] = "redirect", [2] = "ndo-xmit", [3] = "xsk-zerocopy", [4] = "hw-offload", [5] = "rx-sg", [6] = "ndo-xmit-sg", }; const char *netdev_xdp_act_str(enum netdev_xdp_act value) { value = ffs(value) - 1; if (value < 0 || value >= (int)MNL_ARRAY_SIZE(netdev_xdp_act_strmap)) return NULL; return netdev_xdp_act_strmap[value]; } /* Policies */ struct ynl_policy_attr netdev_dev_policy[NETDEV_A_DEV_MAX + 1] = { [NETDEV_A_DEV_IFINDEX] = { .name = "ifindex", .type = YNL_PT_U32, }, [NETDEV_A_DEV_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, }, [NETDEV_A_DEV_XDP_FEATURES] = { .name = "xdp-features", .type = YNL_PT_U64, }, [NETDEV_A_DEV_XDP_ZC_MAX_SEGS] = { .name = "xdp-zc-max-segs", .type = YNL_PT_U32, }, }; struct ynl_policy_nest netdev_dev_nest = { .max_attr = NETDEV_A_DEV_MAX, .table = netdev_dev_policy, }; /* Common nested types */ /* ============== NETDEV_CMD_DEV_GET ============== */ /* NETDEV_CMD_DEV_GET - do */ void netdev_dev_get_req_free(struct netdev_dev_get_req *req) { free(req); } void netdev_dev_get_rsp_free(struct netdev_dev_get_rsp *rsp) { free(rsp); } int netdev_dev_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct netdev_dev_get_rsp *dst; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == NETDEV_A_DEV_IFINDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.ifindex = 1; dst->ifindex = mnl_attr_get_u32(attr); } else if (type == NETDEV_A_DEV_XDP_FEATURES) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.xdp_features = 1; dst->xdp_features = mnl_attr_get_u64(attr); } else if (type == NETDEV_A_DEV_XDP_ZC_MAX_SEGS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.xdp_zc_max_segs = 1; dst->xdp_zc_max_segs = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct netdev_dev_get_rsp * netdev_dev_get(struct ynl_sock *ys, struct netdev_dev_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct netdev_dev_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, NETDEV_CMD_DEV_GET, 1); ys->req_policy = &netdev_dev_nest; yrs.yarg.rsp_policy = &netdev_dev_nest; if (req->_present.ifindex) mnl_attr_put_u32(nlh, NETDEV_A_DEV_IFINDEX, req->ifindex); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = netdev_dev_get_rsp_parse; yrs.rsp_cmd = NETDEV_CMD_DEV_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: netdev_dev_get_rsp_free(rsp); return NULL; } /* NETDEV_CMD_DEV_GET - dump */ void netdev_dev_get_list_free(struct netdev_dev_get_list *rsp) { struct netdev_dev_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp); } } struct netdev_dev_get_list *netdev_dev_get_dump(struct ynl_sock *ys) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct netdev_dev_get_list); yds.cb = netdev_dev_get_rsp_parse; yds.rsp_cmd = NETDEV_CMD_DEV_GET; yds.rsp_policy = &netdev_dev_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, NETDEV_CMD_DEV_GET, 1); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: netdev_dev_get_list_free(yds.first); return NULL; } /* NETDEV_CMD_DEV_GET - notify */ void netdev_dev_get_ntf_free(struct netdev_dev_get_ntf *rsp) { free(rsp); } static const struct ynl_ntf_info netdev_ntf_info[] = { [NETDEV_CMD_DEV_ADD_NTF] = { .alloc_sz = sizeof(struct netdev_dev_get_ntf), .cb = netdev_dev_get_rsp_parse, .policy = &netdev_dev_nest, .free = (void *)netdev_dev_get_ntf_free, }, [NETDEV_CMD_DEV_DEL_NTF] = { .alloc_sz = sizeof(struct netdev_dev_get_ntf), .cb = netdev_dev_get_rsp_parse, .policy = &netdev_dev_nest, .free = (void *)netdev_dev_get_ntf_free, }, [NETDEV_CMD_DEV_CHANGE_NTF] = { .alloc_sz = sizeof(struct netdev_dev_get_ntf), .cb = netdev_dev_get_rsp_parse, .policy = &netdev_dev_nest, .free = (void *)netdev_dev_get_ntf_free, }, }; const struct ynl_family ynl_netdev_family = { .name = "netdev", .ntf_info = netdev_ntf_info, .ntf_info_size = MNL_ARRAY_SIZE(netdev_ntf_info), };
linux-master
tools/net/ynl/generated/netdev-user.c
// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/handshake.yaml */ /* YNL-GEN user source */ #include <stdlib.h> #include <string.h> #include "handshake-user.h" #include "ynl.h" #include <linux/handshake.h> #include <libmnl/libmnl.h> #include <linux/genetlink.h> /* Enums */ static const char * const handshake_op_strmap[] = { [HANDSHAKE_CMD_READY] = "ready", [HANDSHAKE_CMD_ACCEPT] = "accept", [HANDSHAKE_CMD_DONE] = "done", }; const char *handshake_op_str(int op) { if (op < 0 || op >= (int)MNL_ARRAY_SIZE(handshake_op_strmap)) return NULL; return handshake_op_strmap[op]; } static const char * const handshake_handler_class_strmap[] = { [0] = "none", [1] = "tlshd", [2] = "max", }; const char *handshake_handler_class_str(enum handshake_handler_class value) { if (value < 0 || value >= (int)MNL_ARRAY_SIZE(handshake_handler_class_strmap)) return NULL; return handshake_handler_class_strmap[value]; } static const char * const handshake_msg_type_strmap[] = { [0] = "unspec", [1] = "clienthello", [2] = "serverhello", }; const char *handshake_msg_type_str(enum handshake_msg_type value) { if (value < 0 || value >= (int)MNL_ARRAY_SIZE(handshake_msg_type_strmap)) return NULL; return handshake_msg_type_strmap[value]; } static const char * const handshake_auth_strmap[] = { [0] = "unspec", [1] = "unauth", [2] = "psk", [3] = "x509", }; const char *handshake_auth_str(enum handshake_auth value) { if (value < 0 || value >= (int)MNL_ARRAY_SIZE(handshake_auth_strmap)) return NULL; return handshake_auth_strmap[value]; } /* Policies */ struct ynl_policy_attr handshake_x509_policy[HANDSHAKE_A_X509_MAX + 1] = { [HANDSHAKE_A_X509_CERT] = { .name = "cert", .type = YNL_PT_U32, }, [HANDSHAKE_A_X509_PRIVKEY] = { .name = "privkey", .type = YNL_PT_U32, }, }; struct ynl_policy_nest handshake_x509_nest = { .max_attr = HANDSHAKE_A_X509_MAX, .table = handshake_x509_policy, }; struct ynl_policy_attr handshake_accept_policy[HANDSHAKE_A_ACCEPT_MAX + 1] = { [HANDSHAKE_A_ACCEPT_SOCKFD] = { .name = "sockfd", .type = YNL_PT_U32, }, [HANDSHAKE_A_ACCEPT_HANDLER_CLASS] = { .name = "handler-class", .type = YNL_PT_U32, }, [HANDSHAKE_A_ACCEPT_MESSAGE_TYPE] = { .name = "message-type", .type = YNL_PT_U32, }, [HANDSHAKE_A_ACCEPT_TIMEOUT] = { .name = "timeout", .type = YNL_PT_U32, }, [HANDSHAKE_A_ACCEPT_AUTH_MODE] = { .name = "auth-mode", .type = YNL_PT_U32, }, [HANDSHAKE_A_ACCEPT_PEER_IDENTITY] = { .name = "peer-identity", .type = YNL_PT_U32, }, [HANDSHAKE_A_ACCEPT_CERTIFICATE] = { .name = "certificate", .type = YNL_PT_NEST, .nest = &handshake_x509_nest, }, [HANDSHAKE_A_ACCEPT_PEERNAME] = { .name = "peername", .type = YNL_PT_NUL_STR, }, }; struct ynl_policy_nest handshake_accept_nest = { .max_attr = HANDSHAKE_A_ACCEPT_MAX, .table = handshake_accept_policy, }; struct ynl_policy_attr handshake_done_policy[HANDSHAKE_A_DONE_MAX + 1] = { [HANDSHAKE_A_DONE_STATUS] = { .name = "status", .type = YNL_PT_U32, }, [HANDSHAKE_A_DONE_SOCKFD] = { .name = "sockfd", .type = YNL_PT_U32, }, [HANDSHAKE_A_DONE_REMOTE_AUTH] = { .name = "remote-auth", .type = YNL_PT_U32, }, }; struct ynl_policy_nest handshake_done_nest = { .max_attr = HANDSHAKE_A_DONE_MAX, .table = handshake_done_policy, }; /* Common nested types */ void handshake_x509_free(struct handshake_x509 *obj) { } int handshake_x509_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct handshake_x509 *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == HANDSHAKE_A_X509_CERT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.cert = 1; dst->cert = mnl_attr_get_u32(attr); } else if (type == HANDSHAKE_A_X509_PRIVKEY) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.privkey = 1; dst->privkey = mnl_attr_get_u32(attr); } } return 0; } /* ============== HANDSHAKE_CMD_ACCEPT ============== */ /* HANDSHAKE_CMD_ACCEPT - do */ void handshake_accept_req_free(struct handshake_accept_req *req) { free(req); } void handshake_accept_rsp_free(struct handshake_accept_rsp *rsp) { unsigned int i; free(rsp->peer_identity); for (i = 0; i < rsp->n_certificate; i++) handshake_x509_free(&rsp->certificate[i]); free(rsp->certificate); free(rsp->peername); free(rsp); } int handshake_accept_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct handshake_accept_rsp *dst; unsigned int n_peer_identity = 0; unsigned int n_certificate = 0; const struct nlattr *attr; struct ynl_parse_arg parg; int i; dst = yarg->data; parg.ys = yarg->ys; if (dst->certificate) return ynl_error_parse(yarg, "attribute already present (accept.certificate)"); if (dst->peer_identity) return ynl_error_parse(yarg, "attribute already present (accept.peer-identity)"); mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == HANDSHAKE_A_ACCEPT_SOCKFD) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sockfd = 1; dst->sockfd = mnl_attr_get_u32(attr); } else if (type == HANDSHAKE_A_ACCEPT_MESSAGE_TYPE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.message_type = 1; dst->message_type = mnl_attr_get_u32(attr); } else if (type == HANDSHAKE_A_ACCEPT_TIMEOUT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.timeout = 1; dst->timeout = mnl_attr_get_u32(attr); } else if (type == HANDSHAKE_A_ACCEPT_AUTH_MODE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.auth_mode = 1; dst->auth_mode = mnl_attr_get_u32(attr); } else if (type == HANDSHAKE_A_ACCEPT_PEER_IDENTITY) { n_peer_identity++; } else if (type == HANDSHAKE_A_ACCEPT_CERTIFICATE) { n_certificate++; } else if (type == HANDSHAKE_A_ACCEPT_PEERNAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.peername_len = len; dst->peername = malloc(len + 1); memcpy(dst->peername, mnl_attr_get_str(attr), len); dst->peername[len] = 0; } } if (n_certificate) { dst->certificate = calloc(n_certificate, sizeof(*dst->certificate)); dst->n_certificate = n_certificate; i = 0; parg.rsp_policy = &handshake_x509_nest; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { if (mnl_attr_get_type(attr) == HANDSHAKE_A_ACCEPT_CERTIFICATE) { parg.data = &dst->certificate[i]; if (handshake_x509_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } if (n_peer_identity) { dst->peer_identity = calloc(n_peer_identity, sizeof(*dst->peer_identity)); dst->n_peer_identity = n_peer_identity; i = 0; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { if (mnl_attr_get_type(attr) == HANDSHAKE_A_ACCEPT_PEER_IDENTITY) { dst->peer_identity[i] = mnl_attr_get_u32(attr); i++; } } } return MNL_CB_OK; } struct handshake_accept_rsp * handshake_accept(struct ynl_sock *ys, struct handshake_accept_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct handshake_accept_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, HANDSHAKE_CMD_ACCEPT, 1); ys->req_policy = &handshake_accept_nest; yrs.yarg.rsp_policy = &handshake_accept_nest; if (req->_present.handler_class) mnl_attr_put_u32(nlh, HANDSHAKE_A_ACCEPT_HANDLER_CLASS, req->handler_class); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = handshake_accept_rsp_parse; yrs.rsp_cmd = HANDSHAKE_CMD_ACCEPT; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: handshake_accept_rsp_free(rsp); return NULL; } /* HANDSHAKE_CMD_ACCEPT - notify */ void handshake_accept_ntf_free(struct handshake_accept_ntf *rsp) { unsigned int i; free(rsp->obj.peer_identity); for (i = 0; i < rsp->obj.n_certificate; i++) handshake_x509_free(&rsp->obj.certificate[i]); free(rsp->obj.certificate); free(rsp->obj.peername); free(rsp); } /* ============== HANDSHAKE_CMD_DONE ============== */ /* HANDSHAKE_CMD_DONE - do */ void handshake_done_req_free(struct handshake_done_req *req) { free(req->remote_auth); free(req); } int handshake_done(struct ynl_sock *ys, struct handshake_done_req *req) { struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, HANDSHAKE_CMD_DONE, 1); ys->req_policy = &handshake_done_nest; if (req->_present.status) mnl_attr_put_u32(nlh, HANDSHAKE_A_DONE_STATUS, req->status); if (req->_present.sockfd) mnl_attr_put_u32(nlh, HANDSHAKE_A_DONE_SOCKFD, req->sockfd); for (unsigned int i = 0; i < req->n_remote_auth; i++) mnl_attr_put_u32(nlh, HANDSHAKE_A_DONE_REMOTE_AUTH, req->remote_auth[i]); err = ynl_exec(ys, nlh, NULL); if (err < 0) return -1; return 0; } static const struct ynl_ntf_info handshake_ntf_info[] = { [HANDSHAKE_CMD_READY] = { .alloc_sz = sizeof(struct handshake_accept_ntf), .cb = handshake_accept_rsp_parse, .policy = &handshake_accept_nest, .free = (void *)handshake_accept_ntf_free, }, }; const struct ynl_family ynl_handshake_family = { .name = "handshake", .ntf_info = handshake_ntf_info, .ntf_info_size = MNL_ARRAY_SIZE(handshake_ntf_info), };
linux-master
tools/net/ynl/generated/handshake-user.c
// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/devlink.yaml */ /* YNL-GEN user source */ #include <stdlib.h> #include <string.h> #include "devlink-user.h" #include "ynl.h" #include <linux/devlink.h> #include <libmnl/libmnl.h> #include <linux/genetlink.h> /* Enums */ static const char * const devlink_op_strmap[] = { [3] = "get", [7] = "port-get", [DEVLINK_CMD_SB_GET] = "sb-get", [DEVLINK_CMD_SB_POOL_GET] = "sb-pool-get", [DEVLINK_CMD_SB_PORT_POOL_GET] = "sb-port-pool-get", [DEVLINK_CMD_SB_TC_POOL_BIND_GET] = "sb-tc-pool-bind-get", [DEVLINK_CMD_PARAM_GET] = "param-get", [DEVLINK_CMD_REGION_GET] = "region-get", [DEVLINK_CMD_INFO_GET] = "info-get", [DEVLINK_CMD_HEALTH_REPORTER_GET] = "health-reporter-get", [DEVLINK_CMD_TRAP_GET] = "trap-get", [DEVLINK_CMD_TRAP_GROUP_GET] = "trap-group-get", [DEVLINK_CMD_TRAP_POLICER_GET] = "trap-policer-get", [DEVLINK_CMD_RATE_GET] = "rate-get", [DEVLINK_CMD_LINECARD_GET] = "linecard-get", [DEVLINK_CMD_SELFTESTS_GET] = "selftests-get", }; const char *devlink_op_str(int op) { if (op < 0 || op >= (int)MNL_ARRAY_SIZE(devlink_op_strmap)) return NULL; return devlink_op_strmap[op]; } static const char * const devlink_sb_pool_type_strmap[] = { [0] = "ingress", [1] = "egress", }; const char *devlink_sb_pool_type_str(enum devlink_sb_pool_type value) { if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_sb_pool_type_strmap)) return NULL; return devlink_sb_pool_type_strmap[value]; } /* Policies */ struct ynl_policy_attr devlink_dl_info_version_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_INFO_VERSION_NAME] = { .name = "info-version-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_INFO_VERSION_VALUE] = { .name = "info-version-value", .type = YNL_PT_NUL_STR, }, }; struct ynl_policy_nest devlink_dl_info_version_nest = { .max_attr = DEVLINK_ATTR_MAX, .table = devlink_dl_info_version_policy, }; struct ynl_policy_attr devlink_dl_reload_stats_entry_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_RELOAD_STATS_LIMIT] = { .name = "reload-stats-limit", .type = YNL_PT_U8, }, [DEVLINK_ATTR_RELOAD_STATS_VALUE] = { .name = "reload-stats-value", .type = YNL_PT_U32, }, }; struct ynl_policy_nest devlink_dl_reload_stats_entry_nest = { .max_attr = DEVLINK_ATTR_MAX, .table = devlink_dl_reload_stats_entry_policy, }; struct ynl_policy_attr devlink_dl_reload_act_stats_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_RELOAD_STATS_ENTRY] = { .name = "reload-stats-entry", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_entry_nest, }, }; struct ynl_policy_nest devlink_dl_reload_act_stats_nest = { .max_attr = DEVLINK_ATTR_MAX, .table = devlink_dl_reload_act_stats_policy, }; struct ynl_policy_attr devlink_dl_reload_act_info_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_RELOAD_ACTION] = { .name = "reload-action", .type = YNL_PT_U8, }, [DEVLINK_ATTR_RELOAD_ACTION_STATS] = { .name = "reload-action-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_stats_nest, }, }; struct ynl_policy_nest devlink_dl_reload_act_info_nest = { .max_attr = DEVLINK_ATTR_MAX, .table = devlink_dl_reload_act_info_policy, }; struct ynl_policy_attr devlink_dl_reload_stats_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_RELOAD_ACTION_INFO] = { .name = "reload-action-info", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_info_nest, }, }; struct ynl_policy_nest devlink_dl_reload_stats_nest = { .max_attr = DEVLINK_ATTR_MAX, .table = devlink_dl_reload_stats_policy, }; struct ynl_policy_attr devlink_dl_dev_stats_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_RELOAD_STATS] = { .name = "reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, }, [DEVLINK_ATTR_REMOTE_RELOAD_STATS] = { .name = "remote-reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, }, }; struct ynl_policy_nest devlink_dl_dev_stats_nest = { .max_attr = DEVLINK_ATTR_MAX, .table = devlink_dl_dev_stats_policy, }; struct ynl_policy_attr devlink_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .name = "bus-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_DEV_NAME] = { .name = "dev-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_PORT_INDEX] = { .name = "port-index", .type = YNL_PT_U32, }, [DEVLINK_ATTR_SB_INDEX] = { .name = "sb-index", .type = YNL_PT_U32, }, [DEVLINK_ATTR_SB_POOL_INDEX] = { .name = "sb-pool-index", .type = YNL_PT_U16, }, [DEVLINK_ATTR_SB_POOL_TYPE] = { .name = "sb-pool-type", .type = YNL_PT_U8, }, [DEVLINK_ATTR_SB_TC_INDEX] = { .name = "sb-tc-index", .type = YNL_PT_U16, }, [DEVLINK_ATTR_PARAM_NAME] = { .name = "param-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_REGION_NAME] = { .name = "region-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_INFO_DRIVER_NAME] = { .name = "info-driver-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_INFO_SERIAL_NUMBER] = { .name = "info-serial-number", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_INFO_VERSION_FIXED] = { .name = "info-version-fixed", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, }, [DEVLINK_ATTR_INFO_VERSION_RUNNING] = { .name = "info-version-running", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, }, [DEVLINK_ATTR_INFO_VERSION_STORED] = { .name = "info-version-stored", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, }, [DEVLINK_ATTR_INFO_VERSION_NAME] = { .name = "info-version-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_INFO_VERSION_VALUE] = { .name = "info-version-value", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .name = "health-reporter-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_TRAP_NAME] = { .name = "trap-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_TRAP_GROUP_NAME] = { .name = "trap-group-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_RELOAD_FAILED] = { .name = "reload-failed", .type = YNL_PT_U8, }, [DEVLINK_ATTR_TRAP_POLICER_ID] = { .name = "trap-policer-id", .type = YNL_PT_U32, }, [DEVLINK_ATTR_RELOAD_ACTION] = { .name = "reload-action", .type = YNL_PT_U8, }, [DEVLINK_ATTR_DEV_STATS] = { .name = "dev-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_dev_stats_nest, }, [DEVLINK_ATTR_RELOAD_STATS] = { .name = "reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, }, [DEVLINK_ATTR_RELOAD_STATS_ENTRY] = { .name = "reload-stats-entry", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_entry_nest, }, [DEVLINK_ATTR_RELOAD_STATS_LIMIT] = { .name = "reload-stats-limit", .type = YNL_PT_U8, }, [DEVLINK_ATTR_RELOAD_STATS_VALUE] = { .name = "reload-stats-value", .type = YNL_PT_U32, }, [DEVLINK_ATTR_REMOTE_RELOAD_STATS] = { .name = "remote-reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, }, [DEVLINK_ATTR_RELOAD_ACTION_INFO] = { .name = "reload-action-info", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_info_nest, }, [DEVLINK_ATTR_RELOAD_ACTION_STATS] = { .name = "reload-action-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_stats_nest, }, [DEVLINK_ATTR_RATE_NODE_NAME] = { .name = "rate-node-name", .type = YNL_PT_NUL_STR, }, [DEVLINK_ATTR_LINECARD_INDEX] = { .name = "linecard-index", .type = YNL_PT_U32, }, }; struct ynl_policy_nest devlink_nest = { .max_attr = DEVLINK_ATTR_MAX, .table = devlink_policy, }; /* Common nested types */ void devlink_dl_info_version_free(struct devlink_dl_info_version *obj) { free(obj->info_version_name); free(obj->info_version_value); } int devlink_dl_info_version_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct devlink_dl_info_version *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_INFO_VERSION_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.info_version_name_len = len; dst->info_version_name = malloc(len + 1); memcpy(dst->info_version_name, mnl_attr_get_str(attr), len); dst->info_version_name[len] = 0; } else if (type == DEVLINK_ATTR_INFO_VERSION_VALUE) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.info_version_value_len = len; dst->info_version_value = malloc(len + 1); memcpy(dst->info_version_value, mnl_attr_get_str(attr), len); dst->info_version_value[len] = 0; } } return 0; } void devlink_dl_reload_stats_entry_free(struct devlink_dl_reload_stats_entry *obj) { } int devlink_dl_reload_stats_entry_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct devlink_dl_reload_stats_entry *dst = yarg->data; const struct nlattr *attr; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_RELOAD_STATS_LIMIT) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.reload_stats_limit = 1; dst->reload_stats_limit = mnl_attr_get_u8(attr); } else if (type == DEVLINK_ATTR_RELOAD_STATS_VALUE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.reload_stats_value = 1; dst->reload_stats_value = mnl_attr_get_u32(attr); } } return 0; } void devlink_dl_reload_act_stats_free(struct devlink_dl_reload_act_stats *obj) { unsigned int i; for (i = 0; i < obj->n_reload_stats_entry; i++) devlink_dl_reload_stats_entry_free(&obj->reload_stats_entry[i]); free(obj->reload_stats_entry); } int devlink_dl_reload_act_stats_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct devlink_dl_reload_act_stats *dst = yarg->data; unsigned int n_reload_stats_entry = 0; const struct nlattr *attr; struct ynl_parse_arg parg; int i; parg.ys = yarg->ys; if (dst->reload_stats_entry) return ynl_error_parse(yarg, "attribute already present (dl-reload-act-stats.reload-stats-entry)"); mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_RELOAD_STATS_ENTRY) { n_reload_stats_entry++; } } if (n_reload_stats_entry) { dst->reload_stats_entry = calloc(n_reload_stats_entry, sizeof(*dst->reload_stats_entry)); dst->n_reload_stats_entry = n_reload_stats_entry; i = 0; parg.rsp_policy = &devlink_dl_reload_stats_entry_nest; mnl_attr_for_each_nested(attr, nested) { if (mnl_attr_get_type(attr) == DEVLINK_ATTR_RELOAD_STATS_ENTRY) { parg.data = &dst->reload_stats_entry[i]; if (devlink_dl_reload_stats_entry_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } return 0; } void devlink_dl_reload_act_info_free(struct devlink_dl_reload_act_info *obj) { unsigned int i; for (i = 0; i < obj->n_reload_action_stats; i++) devlink_dl_reload_act_stats_free(&obj->reload_action_stats[i]); free(obj->reload_action_stats); } int devlink_dl_reload_act_info_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct devlink_dl_reload_act_info *dst = yarg->data; unsigned int n_reload_action_stats = 0; const struct nlattr *attr; struct ynl_parse_arg parg; int i; parg.ys = yarg->ys; if (dst->reload_action_stats) return ynl_error_parse(yarg, "attribute already present (dl-reload-act-info.reload-action-stats)"); mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_RELOAD_ACTION) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.reload_action = 1; dst->reload_action = mnl_attr_get_u8(attr); } else if (type == DEVLINK_ATTR_RELOAD_ACTION_STATS) { n_reload_action_stats++; } } if (n_reload_action_stats) { dst->reload_action_stats = calloc(n_reload_action_stats, sizeof(*dst->reload_action_stats)); dst->n_reload_action_stats = n_reload_action_stats; i = 0; parg.rsp_policy = &devlink_dl_reload_act_stats_nest; mnl_attr_for_each_nested(attr, nested) { if (mnl_attr_get_type(attr) == DEVLINK_ATTR_RELOAD_ACTION_STATS) { parg.data = &dst->reload_action_stats[i]; if (devlink_dl_reload_act_stats_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } return 0; } void devlink_dl_reload_stats_free(struct devlink_dl_reload_stats *obj) { unsigned int i; for (i = 0; i < obj->n_reload_action_info; i++) devlink_dl_reload_act_info_free(&obj->reload_action_info[i]); free(obj->reload_action_info); } int devlink_dl_reload_stats_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct devlink_dl_reload_stats *dst = yarg->data; unsigned int n_reload_action_info = 0; const struct nlattr *attr; struct ynl_parse_arg parg; int i; parg.ys = yarg->ys; if (dst->reload_action_info) return ynl_error_parse(yarg, "attribute already present (dl-reload-stats.reload-action-info)"); mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_RELOAD_ACTION_INFO) { n_reload_action_info++; } } if (n_reload_action_info) { dst->reload_action_info = calloc(n_reload_action_info, sizeof(*dst->reload_action_info)); dst->n_reload_action_info = n_reload_action_info; i = 0; parg.rsp_policy = &devlink_dl_reload_act_info_nest; mnl_attr_for_each_nested(attr, nested) { if (mnl_attr_get_type(attr) == DEVLINK_ATTR_RELOAD_ACTION_INFO) { parg.data = &dst->reload_action_info[i]; if (devlink_dl_reload_act_info_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } return 0; } void devlink_dl_dev_stats_free(struct devlink_dl_dev_stats *obj) { devlink_dl_reload_stats_free(&obj->reload_stats); devlink_dl_reload_stats_free(&obj->remote_reload_stats); } int devlink_dl_dev_stats_parse(struct ynl_parse_arg *yarg, const struct nlattr *nested) { struct devlink_dl_dev_stats *dst = yarg->data; const struct nlattr *attr; struct ynl_parse_arg parg; parg.ys = yarg->ys; mnl_attr_for_each_nested(attr, nested) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_RELOAD_STATS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.reload_stats = 1; parg.rsp_policy = &devlink_dl_reload_stats_nest; parg.data = &dst->reload_stats; if (devlink_dl_reload_stats_parse(&parg, attr)) return MNL_CB_ERROR; } else if (type == DEVLINK_ATTR_REMOTE_RELOAD_STATS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.remote_reload_stats = 1; parg.rsp_policy = &devlink_dl_reload_stats_nest; parg.data = &dst->remote_reload_stats; if (devlink_dl_reload_stats_parse(&parg, attr)) return MNL_CB_ERROR; } } return 0; } /* ============== DEVLINK_CMD_GET ============== */ /* DEVLINK_CMD_GET - do */ void devlink_get_req_free(struct devlink_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_get_rsp_free(struct devlink_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); devlink_dl_dev_stats_free(&rsp->dev_stats); free(rsp); } int devlink_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct devlink_get_rsp *dst; const struct nlattr *attr; struct ynl_parse_arg parg; dst = yarg->data; parg.ys = yarg->ys; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_RELOAD_FAILED) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.reload_failed = 1; dst->reload_failed = mnl_attr_get_u8(attr); } else if (type == DEVLINK_ATTR_RELOAD_ACTION) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.reload_action = 1; dst->reload_action = mnl_attr_get_u8(attr); } else if (type == DEVLINK_ATTR_DEV_STATS) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.dev_stats = 1; parg.rsp_policy = &devlink_dl_dev_stats_nest; parg.data = &dst->dev_stats; if (devlink_dl_dev_stats_parse(&parg, attr)) return MNL_CB_ERROR; } } return MNL_CB_OK; } struct devlink_get_rsp * devlink_get(struct ynl_sock *ys, struct devlink_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_get_rsp_parse; yrs.rsp_cmd = 3; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_GET - dump */ void devlink_get_list_free(struct devlink_get_list *rsp) { struct devlink_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); devlink_dl_dev_stats_free(&rsp->obj.dev_stats); free(rsp); } } struct devlink_get_list *devlink_get_dump(struct ynl_sock *ys) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_get_list); yds.cb = devlink_get_rsp_parse; yds.rsp_cmd = 3; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_GET, 1); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_PORT_GET ============== */ /* DEVLINK_CMD_PORT_GET - do */ void devlink_port_get_req_free(struct devlink_port_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_port_get_rsp_free(struct devlink_port_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp); } int devlink_port_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct devlink_port_get_rsp *dst; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_PORT_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port_index = 1; dst->port_index = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct devlink_port_get_rsp * devlink_port_get(struct ynl_sock *ys, struct devlink_port_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_port_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PORT_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.port_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_port_get_rsp_parse; yrs.rsp_cmd = 7; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_port_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_PORT_GET - dump */ int devlink_port_get_rsp_dump_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_port_get_rsp_dump *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_PORT_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port_index = 1; dst->port_index = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } void devlink_port_get_rsp_list_free(struct devlink_port_get_rsp_list *rsp) { struct devlink_port_get_rsp_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp); } } struct devlink_port_get_rsp_list * devlink_port_get_dump(struct ynl_sock *ys, struct devlink_port_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_port_get_rsp_list); yds.cb = devlink_port_get_rsp_dump_parse; yds.rsp_cmd = 7; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_PORT_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_port_get_rsp_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_SB_GET ============== */ /* DEVLINK_CMD_SB_GET - do */ void devlink_sb_get_req_free(struct devlink_sb_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_sb_get_rsp_free(struct devlink_sb_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp); } int devlink_sb_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct devlink_sb_get_rsp *dst; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_SB_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sb_index = 1; dst->sb_index = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct devlink_sb_get_rsp * devlink_sb_get(struct ynl_sock *ys, struct devlink_sb_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_sb_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.sb_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_sb_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_SB_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_sb_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_SB_GET - dump */ void devlink_sb_get_list_free(struct devlink_sb_get_list *rsp) { struct devlink_sb_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp); } } struct devlink_sb_get_list * devlink_sb_get_dump(struct ynl_sock *ys, struct devlink_sb_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_sb_get_list); yds.cb = devlink_sb_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_SB_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_sb_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_SB_POOL_GET ============== */ /* DEVLINK_CMD_SB_POOL_GET - do */ void devlink_sb_pool_get_req_free(struct devlink_sb_pool_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_sb_pool_get_rsp_free(struct devlink_sb_pool_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp); } int devlink_sb_pool_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_sb_pool_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_SB_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sb_index = 1; dst->sb_index = mnl_attr_get_u32(attr); } else if (type == DEVLINK_ATTR_SB_POOL_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sb_pool_index = 1; dst->sb_pool_index = mnl_attr_get_u16(attr); } } return MNL_CB_OK; } struct devlink_sb_pool_get_rsp * devlink_sb_pool_get(struct ynl_sock *ys, struct devlink_sb_pool_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_sb_pool_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_POOL_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.sb_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index); if (req->_present.sb_pool_index) mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_POOL_INDEX, req->sb_pool_index); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_sb_pool_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_SB_POOL_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_sb_pool_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_SB_POOL_GET - dump */ void devlink_sb_pool_get_list_free(struct devlink_sb_pool_get_list *rsp) { struct devlink_sb_pool_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp); } } struct devlink_sb_pool_get_list * devlink_sb_pool_get_dump(struct ynl_sock *ys, struct devlink_sb_pool_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_sb_pool_get_list); yds.cb = devlink_sb_pool_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_SB_POOL_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_POOL_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_sb_pool_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_SB_PORT_POOL_GET ============== */ /* DEVLINK_CMD_SB_PORT_POOL_GET - do */ void devlink_sb_port_pool_get_req_free(struct devlink_sb_port_pool_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_sb_port_pool_get_rsp_free(struct devlink_sb_port_pool_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp); } int devlink_sb_port_pool_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_sb_port_pool_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_PORT_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port_index = 1; dst->port_index = mnl_attr_get_u32(attr); } else if (type == DEVLINK_ATTR_SB_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sb_index = 1; dst->sb_index = mnl_attr_get_u32(attr); } else if (type == DEVLINK_ATTR_SB_POOL_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sb_pool_index = 1; dst->sb_pool_index = mnl_attr_get_u16(attr); } } return MNL_CB_OK; } struct devlink_sb_port_pool_get_rsp * devlink_sb_port_pool_get(struct ynl_sock *ys, struct devlink_sb_port_pool_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_sb_port_pool_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_PORT_POOL_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.port_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index); if (req->_present.sb_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index); if (req->_present.sb_pool_index) mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_POOL_INDEX, req->sb_pool_index); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_sb_port_pool_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_sb_port_pool_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_SB_PORT_POOL_GET - dump */ void devlink_sb_port_pool_get_list_free(struct devlink_sb_port_pool_get_list *rsp) { struct devlink_sb_port_pool_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp); } } struct devlink_sb_port_pool_get_list * devlink_sb_port_pool_get_dump(struct ynl_sock *ys, struct devlink_sb_port_pool_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_sb_port_pool_get_list); yds.cb = devlink_sb_port_pool_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_PORT_POOL_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_sb_port_pool_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_SB_TC_POOL_BIND_GET ============== */ /* DEVLINK_CMD_SB_TC_POOL_BIND_GET - do */ void devlink_sb_tc_pool_bind_get_req_free(struct devlink_sb_tc_pool_bind_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_sb_tc_pool_bind_get_rsp_free(struct devlink_sb_tc_pool_bind_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp); } int devlink_sb_tc_pool_bind_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_sb_tc_pool_bind_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_PORT_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port_index = 1; dst->port_index = mnl_attr_get_u32(attr); } else if (type == DEVLINK_ATTR_SB_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sb_index = 1; dst->sb_index = mnl_attr_get_u32(attr); } else if (type == DEVLINK_ATTR_SB_POOL_TYPE) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sb_pool_type = 1; dst->sb_pool_type = mnl_attr_get_u8(attr); } else if (type == DEVLINK_ATTR_SB_TC_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.sb_tc_index = 1; dst->sb_tc_index = mnl_attr_get_u16(attr); } } return MNL_CB_OK; } struct devlink_sb_tc_pool_bind_get_rsp * devlink_sb_tc_pool_bind_get(struct ynl_sock *ys, struct devlink_sb_tc_pool_bind_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_sb_tc_pool_bind_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_TC_POOL_BIND_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.port_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index); if (req->_present.sb_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index); if (req->_present.sb_pool_type) mnl_attr_put_u8(nlh, DEVLINK_ATTR_SB_POOL_TYPE, req->sb_pool_type); if (req->_present.sb_tc_index) mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_TC_INDEX, req->sb_tc_index); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_sb_tc_pool_bind_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_sb_tc_pool_bind_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_SB_TC_POOL_BIND_GET - dump */ void devlink_sb_tc_pool_bind_get_list_free(struct devlink_sb_tc_pool_bind_get_list *rsp) { struct devlink_sb_tc_pool_bind_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp); } } struct devlink_sb_tc_pool_bind_get_list * devlink_sb_tc_pool_bind_get_dump(struct ynl_sock *ys, struct devlink_sb_tc_pool_bind_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_sb_tc_pool_bind_get_list); yds.cb = devlink_sb_tc_pool_bind_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_TC_POOL_BIND_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_sb_tc_pool_bind_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_PARAM_GET ============== */ /* DEVLINK_CMD_PARAM_GET - do */ void devlink_param_get_req_free(struct devlink_param_get_req *req) { free(req->bus_name); free(req->dev_name); free(req->param_name); free(req); } void devlink_param_get_rsp_free(struct devlink_param_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp->param_name); free(rsp); } int devlink_param_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_param_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_PARAM_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.param_name_len = len; dst->param_name = malloc(len + 1); memcpy(dst->param_name, mnl_attr_get_str(attr), len); dst->param_name[len] = 0; } } return MNL_CB_OK; } struct devlink_param_get_rsp * devlink_param_get(struct ynl_sock *ys, struct devlink_param_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_param_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PARAM_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.param_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_PARAM_NAME, req->param_name); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_param_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_PARAM_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_param_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_PARAM_GET - dump */ void devlink_param_get_list_free(struct devlink_param_get_list *rsp) { struct devlink_param_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp->obj.param_name); free(rsp); } } struct devlink_param_get_list * devlink_param_get_dump(struct ynl_sock *ys, struct devlink_param_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_param_get_list); yds.cb = devlink_param_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_PARAM_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_PARAM_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_param_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_REGION_GET ============== */ /* DEVLINK_CMD_REGION_GET - do */ void devlink_region_get_req_free(struct devlink_region_get_req *req) { free(req->bus_name); free(req->dev_name); free(req->region_name); free(req); } void devlink_region_get_rsp_free(struct devlink_region_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp->region_name); free(rsp); } int devlink_region_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_region_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_PORT_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port_index = 1; dst->port_index = mnl_attr_get_u32(attr); } else if (type == DEVLINK_ATTR_REGION_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.region_name_len = len; dst->region_name = malloc(len + 1); memcpy(dst->region_name, mnl_attr_get_str(attr), len); dst->region_name[len] = 0; } } return MNL_CB_OK; } struct devlink_region_get_rsp * devlink_region_get(struct ynl_sock *ys, struct devlink_region_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_region_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_REGION_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.port_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index); if (req->_present.region_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_REGION_NAME, req->region_name); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_region_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_REGION_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_region_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_REGION_GET - dump */ void devlink_region_get_list_free(struct devlink_region_get_list *rsp) { struct devlink_region_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp->obj.region_name); free(rsp); } } struct devlink_region_get_list * devlink_region_get_dump(struct ynl_sock *ys, struct devlink_region_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_region_get_list); yds.cb = devlink_region_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_REGION_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_REGION_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_region_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_INFO_GET ============== */ /* DEVLINK_CMD_INFO_GET - do */ void devlink_info_get_req_free(struct devlink_info_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_info_get_rsp_free(struct devlink_info_get_rsp *rsp) { unsigned int i; free(rsp->bus_name); free(rsp->dev_name); free(rsp->info_driver_name); free(rsp->info_serial_number); for (i = 0; i < rsp->n_info_version_fixed; i++) devlink_dl_info_version_free(&rsp->info_version_fixed[i]); free(rsp->info_version_fixed); for (i = 0; i < rsp->n_info_version_running; i++) devlink_dl_info_version_free(&rsp->info_version_running[i]); free(rsp->info_version_running); for (i = 0; i < rsp->n_info_version_stored; i++) devlink_dl_info_version_free(&rsp->info_version_stored[i]); free(rsp->info_version_stored); free(rsp); } int devlink_info_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { unsigned int n_info_version_running = 0; unsigned int n_info_version_stored = 0; unsigned int n_info_version_fixed = 0; struct ynl_parse_arg *yarg = data; struct devlink_info_get_rsp *dst; const struct nlattr *attr; struct ynl_parse_arg parg; int i; dst = yarg->data; parg.ys = yarg->ys; if (dst->info_version_fixed) return ynl_error_parse(yarg, "attribute already present (devlink.info-version-fixed)"); if (dst->info_version_running) return ynl_error_parse(yarg, "attribute already present (devlink.info-version-running)"); if (dst->info_version_stored) return ynl_error_parse(yarg, "attribute already present (devlink.info-version-stored)"); mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_INFO_DRIVER_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.info_driver_name_len = len; dst->info_driver_name = malloc(len + 1); memcpy(dst->info_driver_name, mnl_attr_get_str(attr), len); dst->info_driver_name[len] = 0; } else if (type == DEVLINK_ATTR_INFO_SERIAL_NUMBER) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.info_serial_number_len = len; dst->info_serial_number = malloc(len + 1); memcpy(dst->info_serial_number, mnl_attr_get_str(attr), len); dst->info_serial_number[len] = 0; } else if (type == DEVLINK_ATTR_INFO_VERSION_FIXED) { n_info_version_fixed++; } else if (type == DEVLINK_ATTR_INFO_VERSION_RUNNING) { n_info_version_running++; } else if (type == DEVLINK_ATTR_INFO_VERSION_STORED) { n_info_version_stored++; } } if (n_info_version_fixed) { dst->info_version_fixed = calloc(n_info_version_fixed, sizeof(*dst->info_version_fixed)); dst->n_info_version_fixed = n_info_version_fixed; i = 0; parg.rsp_policy = &devlink_dl_info_version_nest; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { if (mnl_attr_get_type(attr) == DEVLINK_ATTR_INFO_VERSION_FIXED) { parg.data = &dst->info_version_fixed[i]; if (devlink_dl_info_version_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } if (n_info_version_running) { dst->info_version_running = calloc(n_info_version_running, sizeof(*dst->info_version_running)); dst->n_info_version_running = n_info_version_running; i = 0; parg.rsp_policy = &devlink_dl_info_version_nest; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { if (mnl_attr_get_type(attr) == DEVLINK_ATTR_INFO_VERSION_RUNNING) { parg.data = &dst->info_version_running[i]; if (devlink_dl_info_version_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } if (n_info_version_stored) { dst->info_version_stored = calloc(n_info_version_stored, sizeof(*dst->info_version_stored)); dst->n_info_version_stored = n_info_version_stored; i = 0; parg.rsp_policy = &devlink_dl_info_version_nest; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { if (mnl_attr_get_type(attr) == DEVLINK_ATTR_INFO_VERSION_STORED) { parg.data = &dst->info_version_stored[i]; if (devlink_dl_info_version_parse(&parg, attr)) return MNL_CB_ERROR; i++; } } } return MNL_CB_OK; } struct devlink_info_get_rsp * devlink_info_get(struct ynl_sock *ys, struct devlink_info_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_info_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_INFO_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_info_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_INFO_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_info_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_INFO_GET - dump */ void devlink_info_get_list_free(struct devlink_info_get_list *rsp) { struct devlink_info_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { unsigned int i; rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp->obj.info_driver_name); free(rsp->obj.info_serial_number); for (i = 0; i < rsp->obj.n_info_version_fixed; i++) devlink_dl_info_version_free(&rsp->obj.info_version_fixed[i]); free(rsp->obj.info_version_fixed); for (i = 0; i < rsp->obj.n_info_version_running; i++) devlink_dl_info_version_free(&rsp->obj.info_version_running[i]); free(rsp->obj.info_version_running); for (i = 0; i < rsp->obj.n_info_version_stored; i++) devlink_dl_info_version_free(&rsp->obj.info_version_stored[i]); free(rsp->obj.info_version_stored); free(rsp); } } struct devlink_info_get_list *devlink_info_get_dump(struct ynl_sock *ys) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_info_get_list); yds.cb = devlink_info_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_INFO_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_INFO_GET, 1); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_info_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_HEALTH_REPORTER_GET ============== */ /* DEVLINK_CMD_HEALTH_REPORTER_GET - do */ void devlink_health_reporter_get_req_free(struct devlink_health_reporter_get_req *req) { free(req->bus_name); free(req->dev_name); free(req->health_reporter_name); free(req); } void devlink_health_reporter_get_rsp_free(struct devlink_health_reporter_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp->health_reporter_name); free(rsp); } int devlink_health_reporter_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_health_reporter_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_PORT_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port_index = 1; dst->port_index = mnl_attr_get_u32(attr); } else if (type == DEVLINK_ATTR_HEALTH_REPORTER_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.health_reporter_name_len = len; dst->health_reporter_name = malloc(len + 1); memcpy(dst->health_reporter_name, mnl_attr_get_str(attr), len); dst->health_reporter_name[len] = 0; } } return MNL_CB_OK; } struct devlink_health_reporter_get_rsp * devlink_health_reporter_get(struct ynl_sock *ys, struct devlink_health_reporter_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_health_reporter_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.port_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index); if (req->_present.health_reporter_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_health_reporter_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_HEALTH_REPORTER_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_health_reporter_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_HEALTH_REPORTER_GET - dump */ void devlink_health_reporter_get_list_free(struct devlink_health_reporter_get_list *rsp) { struct devlink_health_reporter_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp->obj.health_reporter_name); free(rsp); } } struct devlink_health_reporter_get_list * devlink_health_reporter_get_dump(struct ynl_sock *ys, struct devlink_health_reporter_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_health_reporter_get_list); yds.cb = devlink_health_reporter_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_HEALTH_REPORTER_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.port_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_health_reporter_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_TRAP_GET ============== */ /* DEVLINK_CMD_TRAP_GET - do */ void devlink_trap_get_req_free(struct devlink_trap_get_req *req) { free(req->bus_name); free(req->dev_name); free(req->trap_name); free(req); } void devlink_trap_get_rsp_free(struct devlink_trap_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp->trap_name); free(rsp); } int devlink_trap_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct devlink_trap_get_rsp *dst; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_TRAP_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.trap_name_len = len; dst->trap_name = malloc(len + 1); memcpy(dst->trap_name, mnl_attr_get_str(attr), len); dst->trap_name[len] = 0; } } return MNL_CB_OK; } struct devlink_trap_get_rsp * devlink_trap_get(struct ynl_sock *ys, struct devlink_trap_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_trap_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_TRAP_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.trap_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_TRAP_NAME, req->trap_name); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_trap_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_TRAP_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_trap_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_TRAP_GET - dump */ void devlink_trap_get_list_free(struct devlink_trap_get_list *rsp) { struct devlink_trap_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp->obj.trap_name); free(rsp); } } struct devlink_trap_get_list * devlink_trap_get_dump(struct ynl_sock *ys, struct devlink_trap_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_trap_get_list); yds.cb = devlink_trap_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_TRAP_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_trap_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_TRAP_GROUP_GET ============== */ /* DEVLINK_CMD_TRAP_GROUP_GET - do */ void devlink_trap_group_get_req_free(struct devlink_trap_group_get_req *req) { free(req->bus_name); free(req->dev_name); free(req->trap_group_name); free(req); } void devlink_trap_group_get_rsp_free(struct devlink_trap_group_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp->trap_group_name); free(rsp); } int devlink_trap_group_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_trap_group_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_TRAP_GROUP_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.trap_group_name_len = len; dst->trap_group_name = malloc(len + 1); memcpy(dst->trap_group_name, mnl_attr_get_str(attr), len); dst->trap_group_name[len] = 0; } } return MNL_CB_OK; } struct devlink_trap_group_get_rsp * devlink_trap_group_get(struct ynl_sock *ys, struct devlink_trap_group_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_trap_group_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_TRAP_GROUP_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.trap_group_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_TRAP_GROUP_NAME, req->trap_group_name); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_trap_group_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_trap_group_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_TRAP_GROUP_GET - dump */ void devlink_trap_group_get_list_free(struct devlink_trap_group_get_list *rsp) { struct devlink_trap_group_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp->obj.trap_group_name); free(rsp); } } struct devlink_trap_group_get_list * devlink_trap_group_get_dump(struct ynl_sock *ys, struct devlink_trap_group_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_trap_group_get_list); yds.cb = devlink_trap_group_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GROUP_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_trap_group_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_TRAP_POLICER_GET ============== */ /* DEVLINK_CMD_TRAP_POLICER_GET - do */ void devlink_trap_policer_get_req_free(struct devlink_trap_policer_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_trap_policer_get_rsp_free(struct devlink_trap_policer_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp); } int devlink_trap_policer_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_trap_policer_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_TRAP_POLICER_ID) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.trap_policer_id = 1; dst->trap_policer_id = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct devlink_trap_policer_get_rsp * devlink_trap_policer_get(struct ynl_sock *ys, struct devlink_trap_policer_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_trap_policer_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_TRAP_POLICER_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.trap_policer_id) mnl_attr_put_u32(nlh, DEVLINK_ATTR_TRAP_POLICER_ID, req->trap_policer_id); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_trap_policer_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_trap_policer_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_TRAP_POLICER_GET - dump */ void devlink_trap_policer_get_list_free(struct devlink_trap_policer_get_list *rsp) { struct devlink_trap_policer_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp); } } struct devlink_trap_policer_get_list * devlink_trap_policer_get_dump(struct ynl_sock *ys, struct devlink_trap_policer_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_trap_policer_get_list); yds.cb = devlink_trap_policer_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_POLICER_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_trap_policer_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_RATE_GET ============== */ /* DEVLINK_CMD_RATE_GET - do */ void devlink_rate_get_req_free(struct devlink_rate_get_req *req) { free(req->bus_name); free(req->dev_name); free(req->rate_node_name); free(req); } void devlink_rate_get_rsp_free(struct devlink_rate_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp->rate_node_name); free(rsp); } int devlink_rate_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct devlink_rate_get_rsp *dst; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_PORT_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.port_index = 1; dst->port_index = mnl_attr_get_u32(attr); } else if (type == DEVLINK_ATTR_RATE_NODE_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.rate_node_name_len = len; dst->rate_node_name = malloc(len + 1); memcpy(dst->rate_node_name, mnl_attr_get_str(attr), len); dst->rate_node_name[len] = 0; } } return MNL_CB_OK; } struct devlink_rate_get_rsp * devlink_rate_get(struct ynl_sock *ys, struct devlink_rate_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_rate_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_RATE_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.port_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index); if (req->_present.rate_node_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_NODE_NAME, req->rate_node_name); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_rate_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_RATE_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_rate_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_RATE_GET - dump */ void devlink_rate_get_list_free(struct devlink_rate_get_list *rsp) { struct devlink_rate_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp->obj.rate_node_name); free(rsp); } } struct devlink_rate_get_list * devlink_rate_get_dump(struct ynl_sock *ys, struct devlink_rate_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_rate_get_list); yds.cb = devlink_rate_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_RATE_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_RATE_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_rate_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_LINECARD_GET ============== */ /* DEVLINK_CMD_LINECARD_GET - do */ void devlink_linecard_get_req_free(struct devlink_linecard_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_linecard_get_rsp_free(struct devlink_linecard_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp); } int devlink_linecard_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_linecard_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } else if (type == DEVLINK_ATTR_LINECARD_INDEX) { if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; dst->_present.linecard_index = 1; dst->linecard_index = mnl_attr_get_u32(attr); } } return MNL_CB_OK; } struct devlink_linecard_get_rsp * devlink_linecard_get(struct ynl_sock *ys, struct devlink_linecard_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_linecard_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_LINECARD_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); if (req->_present.linecard_index) mnl_attr_put_u32(nlh, DEVLINK_ATTR_LINECARD_INDEX, req->linecard_index); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_linecard_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_LINECARD_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_linecard_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_LINECARD_GET - dump */ void devlink_linecard_get_list_free(struct devlink_linecard_get_list *rsp) { struct devlink_linecard_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp); } } struct devlink_linecard_get_list * devlink_linecard_get_dump(struct ynl_sock *ys, struct devlink_linecard_get_req_dump *req) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_linecard_get_list); yds.cb = devlink_linecard_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_LINECARD_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_LINECARD_GET, 1); ys->req_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_linecard_get_list_free(yds.first); return NULL; } /* ============== DEVLINK_CMD_SELFTESTS_GET ============== */ /* DEVLINK_CMD_SELFTESTS_GET - do */ void devlink_selftests_get_req_free(struct devlink_selftests_get_req *req) { free(req->bus_name); free(req->dev_name); free(req); } void devlink_selftests_get_rsp_free(struct devlink_selftests_get_rsp *rsp) { free(rsp->bus_name); free(rsp->dev_name); free(rsp); } int devlink_selftests_get_rsp_parse(const struct nlmsghdr *nlh, void *data) { struct devlink_selftests_get_rsp *dst; struct ynl_parse_arg *yarg = data; const struct nlattr *attr; dst = yarg->data; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { unsigned int type = mnl_attr_get_type(attr); if (type == DEVLINK_ATTR_BUS_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.bus_name_len = len; dst->bus_name = malloc(len + 1); memcpy(dst->bus_name, mnl_attr_get_str(attr), len); dst->bus_name[len] = 0; } else if (type == DEVLINK_ATTR_DEV_NAME) { unsigned int len; if (ynl_attr_validate(yarg, attr)) return MNL_CB_ERROR; len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr)); dst->_present.dev_name_len = len; dst->dev_name = malloc(len + 1); memcpy(dst->dev_name, mnl_attr_get_str(attr), len); dst->dev_name[len] = 0; } } return MNL_CB_OK; } struct devlink_selftests_get_rsp * devlink_selftests_get(struct ynl_sock *ys, struct devlink_selftests_get_req *req) { struct ynl_req_state yrs = { .yarg = { .ys = ys, }, }; struct devlink_selftests_get_rsp *rsp; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SELFTESTS_GET, 1); ys->req_policy = &devlink_nest; yrs.yarg.rsp_policy = &devlink_nest; if (req->_present.bus_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name); if (req->_present.dev_name_len) mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name); rsp = calloc(1, sizeof(*rsp)); yrs.yarg.data = rsp; yrs.cb = devlink_selftests_get_rsp_parse; yrs.rsp_cmd = DEVLINK_CMD_SELFTESTS_GET; err = ynl_exec(ys, nlh, &yrs); if (err < 0) goto err_free; return rsp; err_free: devlink_selftests_get_rsp_free(rsp); return NULL; } /* DEVLINK_CMD_SELFTESTS_GET - dump */ void devlink_selftests_get_list_free(struct devlink_selftests_get_list *rsp) { struct devlink_selftests_get_list *next = rsp; while ((void *)next != YNL_LIST_END) { rsp = next; next = rsp->next; free(rsp->obj.bus_name); free(rsp->obj.dev_name); free(rsp); } } struct devlink_selftests_get_list * devlink_selftests_get_dump(struct ynl_sock *ys) { struct ynl_dump_state yds = {}; struct nlmsghdr *nlh; int err; yds.ys = ys; yds.alloc_sz = sizeof(struct devlink_selftests_get_list); yds.cb = devlink_selftests_get_rsp_parse; yds.rsp_cmd = DEVLINK_CMD_SELFTESTS_GET; yds.rsp_policy = &devlink_nest; nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SELFTESTS_GET, 1); err = ynl_exec_dump(ys, nlh, &yds); if (err < 0) goto free_list; return yds.first; free_list: devlink_selftests_get_list_free(yds.first); return NULL; } const struct ynl_family ynl_devlink_family = { .name = "devlink", };
linux-master
tools/net/ynl/generated/devlink-user.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <string.h> #include <ynl.h> #include <net/if.h> #include "ethtool-user.h" int main(int argc, char **argv) { struct ethtool_channels_get_req_dump creq = {}; struct ethtool_rings_get_req_dump rreq = {}; struct ethtool_channels_get_list *channels; struct ethtool_rings_get_list *rings; struct ynl_sock *ys; ys = ynl_sock_create(&ynl_ethtool_family, NULL); if (!ys) return 1; creq._present.header = 1; /* ethtool needs an empty nest, sigh */ channels = ethtool_channels_get_dump(ys, &creq); if (!channels) goto err_close; printf("Channels:\n"); ynl_dump_foreach(channels, dev) { printf(" %8s: ", dev->header.dev_name); if (dev->_present.rx_count) printf("rx %d ", dev->rx_count); if (dev->_present.tx_count) printf("tx %d ", dev->tx_count); if (dev->_present.combined_count) printf("combined %d ", dev->combined_count); printf("\n"); } ethtool_channels_get_list_free(channels); rreq._present.header = 1; /* ethtool needs an empty nest.. */ rings = ethtool_rings_get_dump(ys, &rreq); if (!rings) goto err_close; printf("Rings:\n"); ynl_dump_foreach(rings, dev) { printf(" %8s: ", dev->header.dev_name); if (dev->_present.rx) printf("rx %d ", dev->rx); if (dev->_present.tx) printf("tx %d ", dev->tx); printf("\n"); } ethtool_rings_get_list_free(rings); ynl_sock_destroy(ys); return 0; err_close: fprintf(stderr, "YNL (%d): %s\n", ys->err.code, ys->err.msg); ynl_sock_destroy(ys); return 2; }
linux-master
tools/net/ynl/samples/ethtool.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <string.h> #include <ynl.h> #include "devlink-user.h" int main(int argc, char **argv) { struct devlink_get_list *devs; struct ynl_sock *ys; ys = ynl_sock_create(&ynl_devlink_family, NULL); if (!ys) return 1; devs = devlink_get_dump(ys); if (!devs) goto err_close; ynl_dump_foreach(devs, d) { struct devlink_info_get_req *info_req; struct devlink_info_get_rsp *info_rsp; printf("%s/%s:\n", d->bus_name, d->dev_name); info_req = devlink_info_get_req_alloc(); devlink_info_get_req_set_bus_name(info_req, d->bus_name); devlink_info_get_req_set_dev_name(info_req, d->dev_name); info_rsp = devlink_info_get(ys, info_req); devlink_info_get_req_free(info_req); if (!info_rsp) goto err_free_devs; if (info_rsp->_present.info_driver_name_len) printf(" driver: %s\n", info_rsp->info_driver_name); if (info_rsp->n_info_version_running) printf(" running fw:\n"); for (unsigned i = 0; i < info_rsp->n_info_version_running; i++) printf(" %s: %s\n", info_rsp->info_version_running[i].info_version_name, info_rsp->info_version_running[i].info_version_value); printf(" ...\n"); devlink_info_get_rsp_free(info_rsp); } devlink_get_list_free(devs); ynl_sock_destroy(ys); return 0; err_free_devs: devlink_get_list_free(devs); err_close: fprintf(stderr, "YNL: %s\n", ys->err.msg); ynl_sock_destroy(ys); return 2; }
linux-master
tools/net/ynl/samples/devlink.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <string.h> #include <ynl.h> #include <net/if.h> #include "netdev-user.h" /* netdev genetlink family code sample * This sample shows off basics of the netdev family but also notification * handling, hence the somewhat odd UI. We subscribe to notifications first * then wait for ifc selection, so the socket may already accumulate * notifications as we wait. This allows us to test that YNL can handle * requests and notifications getting interleaved. */ static void netdev_print_device(struct netdev_dev_get_rsp *d, unsigned int op) { char ifname[IF_NAMESIZE]; const char *name; if (!d->_present.ifindex) return; name = if_indextoname(d->ifindex, ifname); if (name) printf("%8s", name); printf("[%d]\t", d->ifindex); if (!d->_present.xdp_features) return; printf("%llx:", d->xdp_features); for (int i = 0; d->xdp_features > 1U << i; i++) { if (d->xdp_features & (1U << i)) printf(" %s", netdev_xdp_act_str(1 << i)); } printf(" xdp-zc-max-segs=%u", d->xdp_zc_max_segs); name = netdev_op_str(op); if (name) printf(" (ntf: %s)", name); printf("\n"); } int main(int argc, char **argv) { struct netdev_dev_get_list *devs; struct ynl_ntf_base_type *ntf; struct ynl_error yerr; struct ynl_sock *ys; int ifindex = 0; if (argc > 1) ifindex = strtol(argv[1], NULL, 0); ys = ynl_sock_create(&ynl_netdev_family, &yerr); if (!ys) { fprintf(stderr, "YNL: %s\n", yerr.msg); return 1; } if (ynl_subscribe(ys, "mgmt")) goto err_close; printf("Select ifc ($ifindex; or 0 = dump; or -2 ntf check): "); scanf("%d", &ifindex); if (ifindex > 0) { struct netdev_dev_get_req *req; struct netdev_dev_get_rsp *d; req = netdev_dev_get_req_alloc(); netdev_dev_get_req_set_ifindex(req, ifindex); d = netdev_dev_get(ys, req); netdev_dev_get_req_free(req); if (!d) goto err_close; netdev_print_device(d, 0); netdev_dev_get_rsp_free(d); } else if (!ifindex) { devs = netdev_dev_get_dump(ys); if (!devs) goto err_close; ynl_dump_foreach(devs, d) netdev_print_device(d, 0); netdev_dev_get_list_free(devs); } else if (ifindex == -2) { ynl_ntf_check(ys); } while ((ntf = ynl_ntf_dequeue(ys))) { netdev_print_device((struct netdev_dev_get_rsp *)&ntf->data, ntf->cmd); ynl_ntf_free(ntf); } ynl_sock_destroy(ys); return 0; err_close: fprintf(stderr, "YNL: %s\n", ys->err.msg); ynl_sock_destroy(ys); return 2; }
linux-master
tools/net/ynl/samples/netdev.c
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause #include <errno.h> #include <poll.h> #include <string.h> #include <stdlib.h> #include <linux/types.h> #include <libmnl/libmnl.h> #include <linux/genetlink.h> #include "ynl.h" #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(*arr)) #define __yerr_msg(yse, _msg...) \ ({ \ struct ynl_error *_yse = (yse); \ \ if (_yse) { \ snprintf(_yse->msg, sizeof(_yse->msg) - 1, _msg); \ _yse->msg[sizeof(_yse->msg) - 1] = 0; \ } \ }) #define __yerr_code(yse, _code...) \ ({ \ struct ynl_error *_yse = (yse); \ \ if (_yse) { \ _yse->code = _code; \ } \ }) #define __yerr(yse, _code, _msg...) \ ({ \ __yerr_msg(yse, _msg); \ __yerr_code(yse, _code); \ }) #define __perr(yse, _msg) __yerr(yse, errno, _msg) #define yerr_msg(_ys, _msg...) __yerr_msg(&(_ys)->err, _msg) #define yerr(_ys, _code, _msg...) __yerr(&(_ys)->err, _code, _msg) #define perr(_ys, _msg) __yerr(&(_ys)->err, errno, _msg) /* -- Netlink boiler plate */ static int ynl_err_walk_report_one(struct ynl_policy_nest *policy, unsigned int type, char *str, int str_sz, int *n) { if (!policy) { if (*n < str_sz) *n += snprintf(str, str_sz, "!policy"); return 1; } if (type > policy->max_attr) { if (*n < str_sz) *n += snprintf(str, str_sz, "!oob"); return 1; } if (!policy->table[type].name) { if (*n < str_sz) *n += snprintf(str, str_sz, "!name"); return 1; } if (*n < str_sz) *n += snprintf(str, str_sz - *n, ".%s", policy->table[type].name); return 0; } static int ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off, struct ynl_policy_nest *policy, char *str, int str_sz, struct ynl_policy_nest **nest_pol) { unsigned int astart_off, aend_off; const struct nlattr *attr; unsigned int data_len; unsigned int type; bool found = false; int n = 0; if (!policy) { if (n < str_sz) n += snprintf(str, str_sz, "!policy"); return n; } data_len = end - start; mnl_attr_for_each_payload(start, data_len) { astart_off = (char *)attr - (char *)start; aend_off = astart_off + mnl_attr_get_payload_len(attr); if (aend_off <= off) continue; found = true; break; } if (!found) return 0; off -= astart_off; type = mnl_attr_get_type(attr); if (ynl_err_walk_report_one(policy, type, str, str_sz, &n)) return n; if (!off) { if (nest_pol) *nest_pol = policy->table[type].nest; return n; } if (!policy->table[type].nest) { if (n < str_sz) n += snprintf(str, str_sz, "!nest"); return n; } off -= sizeof(struct nlattr); start = mnl_attr_get_payload(attr); end = start + mnl_attr_get_payload_len(attr); return n + ynl_err_walk(ys, start, end, off, policy->table[type].nest, &str[n], str_sz - n, nest_pol); } #define NLMSGERR_ATTR_MISS_TYPE (NLMSGERR_ATTR_POLICY + 1) #define NLMSGERR_ATTR_MISS_NEST (NLMSGERR_ATTR_POLICY + 2) #define NLMSGERR_ATTR_MAX (NLMSGERR_ATTR_MAX + 2) static int ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh, unsigned int hlen) { const struct nlattr *tb[NLMSGERR_ATTR_MAX + 1] = {}; char miss_attr[sizeof(ys->err.msg)]; char bad_attr[sizeof(ys->err.msg)]; const struct nlattr *attr; const char *str = NULL; if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) return MNL_CB_OK; mnl_attr_for_each(attr, nlh, hlen) { unsigned int len, type; len = mnl_attr_get_payload_len(attr); type = mnl_attr_get_type(attr); if (type > NLMSGERR_ATTR_MAX) continue; tb[type] = attr; switch (type) { case NLMSGERR_ATTR_OFFS: case NLMSGERR_ATTR_MISS_TYPE: case NLMSGERR_ATTR_MISS_NEST: if (len != sizeof(__u32)) return MNL_CB_ERROR; break; case NLMSGERR_ATTR_MSG: str = mnl_attr_get_payload(attr); if (str[len - 1]) return MNL_CB_ERROR; break; default: break; } } bad_attr[0] = '\0'; miss_attr[0] = '\0'; if (tb[NLMSGERR_ATTR_OFFS]) { unsigned int n, off; void *start, *end; ys->err.attr_offs = mnl_attr_get_u32(tb[NLMSGERR_ATTR_OFFS]); n = snprintf(bad_attr, sizeof(bad_attr), "%sbad attribute: ", str ? " (" : ""); start = mnl_nlmsg_get_payload_offset(ys->nlh, sizeof(struct genlmsghdr)); end = mnl_nlmsg_get_payload_tail(ys->nlh); off = ys->err.attr_offs; off -= sizeof(struct nlmsghdr); off -= sizeof(struct genlmsghdr); n += ynl_err_walk(ys, start, end, off, ys->req_policy, &bad_attr[n], sizeof(bad_attr) - n, NULL); if (n >= sizeof(bad_attr)) n = sizeof(bad_attr) - 1; bad_attr[n] = '\0'; } if (tb[NLMSGERR_ATTR_MISS_TYPE]) { struct ynl_policy_nest *nest_pol = NULL; unsigned int n, off, type; void *start, *end; int n2; type = mnl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_TYPE]); n = snprintf(miss_attr, sizeof(miss_attr), "%smissing attribute: ", bad_attr[0] ? ", " : (str ? " (" : "")); start = mnl_nlmsg_get_payload_offset(ys->nlh, sizeof(struct genlmsghdr)); end = mnl_nlmsg_get_payload_tail(ys->nlh); nest_pol = ys->req_policy; if (tb[NLMSGERR_ATTR_MISS_NEST]) { off = mnl_attr_get_u32(tb[NLMSGERR_ATTR_MISS_NEST]); off -= sizeof(struct nlmsghdr); off -= sizeof(struct genlmsghdr); n += ynl_err_walk(ys, start, end, off, ys->req_policy, &miss_attr[n], sizeof(miss_attr) - n, &nest_pol); } n2 = 0; ynl_err_walk_report_one(nest_pol, type, &miss_attr[n], sizeof(miss_attr) - n, &n2); n += n2; if (n >= sizeof(miss_attr)) n = sizeof(miss_attr) - 1; miss_attr[n] = '\0'; } /* Implicitly depend on ys->err.code already set */ if (str) yerr_msg(ys, "Kernel %s: '%s'%s%s%s", ys->err.code ? "error" : "warning", str, bad_attr, miss_attr, bad_attr[0] || miss_attr[0] ? ")" : ""); else if (bad_attr[0] || miss_attr[0]) yerr_msg(ys, "Kernel %s: %s%s", ys->err.code ? "error" : "warning", bad_attr, miss_attr); return MNL_CB_OK; } static int ynl_cb_error(const struct nlmsghdr *nlh, void *data) { const struct nlmsgerr *err = mnl_nlmsg_get_payload(nlh); struct ynl_parse_arg *yarg = data; unsigned int hlen; int code; code = err->error >= 0 ? err->error : -err->error; yarg->ys->err.code = code; errno = code; hlen = sizeof(*err); if (!(nlh->nlmsg_flags & NLM_F_CAPPED)) hlen += mnl_nlmsg_get_payload_len(&err->msg); ynl_ext_ack_check(yarg->ys, nlh, hlen); return code ? MNL_CB_ERROR : MNL_CB_STOP; } static int ynl_cb_done(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; int err; err = *(int *)NLMSG_DATA(nlh); if (err < 0) { yarg->ys->err.code = -err; errno = -err; ynl_ext_ack_check(yarg->ys, nlh, sizeof(int)); return MNL_CB_ERROR; } return MNL_CB_STOP; } static int ynl_cb_noop(const struct nlmsghdr *nlh, void *data) { return MNL_CB_OK; } mnl_cb_t ynl_cb_array[NLMSG_MIN_TYPE] = { [NLMSG_NOOP] = ynl_cb_noop, [NLMSG_ERROR] = ynl_cb_error, [NLMSG_DONE] = ynl_cb_done, [NLMSG_OVERRUN] = ynl_cb_noop, }; /* Attribute validation */ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr) { struct ynl_policy_attr *policy; unsigned int type, len; unsigned char *data; data = mnl_attr_get_payload(attr); len = mnl_attr_get_payload_len(attr); type = mnl_attr_get_type(attr); if (type > yarg->rsp_policy->max_attr) { yerr(yarg->ys, YNL_ERROR_INTERNAL, "Internal error, validating unknown attribute"); return -1; } policy = &yarg->rsp_policy->table[type]; switch (policy->type) { case YNL_PT_REJECT: yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Rejected attribute (%s)", policy->name); return -1; case YNL_PT_IGNORE: break; case YNL_PT_U8: if (len == sizeof(__u8)) break; yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (u8 %s)", policy->name); return -1; case YNL_PT_U16: if (len == sizeof(__u16)) break; yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (u16 %s)", policy->name); return -1; case YNL_PT_U32: if (len == sizeof(__u32)) break; yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (u32 %s)", policy->name); return -1; case YNL_PT_U64: if (len == sizeof(__u64)) break; yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (u64 %s)", policy->name); return -1; case YNL_PT_FLAG: /* Let flags grow into real attrs, why not.. */ break; case YNL_PT_NEST: if (!len || len >= sizeof(*attr)) break; yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (nest %s)", policy->name); return -1; case YNL_PT_BINARY: if (!policy->len || len == policy->len) break; yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (binary %s)", policy->name); return -1; case YNL_PT_NUL_STR: if ((!policy->len || len <= policy->len) && !data[len - 1]) break; yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (string %s)", policy->name); return -1; default: yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (unknown %s)", policy->name); return -1; } return 0; } /* Generic code */ static void ynl_err_reset(struct ynl_sock *ys) { ys->err.code = 0; ys->err.attr_offs = 0; ys->err.msg[0] = 0; } struct nlmsghdr *ynl_msg_start(struct ynl_sock *ys, __u32 id, __u16 flags) { struct nlmsghdr *nlh; ynl_err_reset(ys); nlh = ys->nlh = mnl_nlmsg_put_header(ys->tx_buf); nlh->nlmsg_type = id; nlh->nlmsg_flags = flags; nlh->nlmsg_seq = ++ys->seq; return nlh; } struct nlmsghdr * ynl_gemsg_start(struct ynl_sock *ys, __u32 id, __u16 flags, __u8 cmd, __u8 version) { struct genlmsghdr gehdr; struct nlmsghdr *nlh; void *data; nlh = ynl_msg_start(ys, id, flags); memset(&gehdr, 0, sizeof(gehdr)); gehdr.cmd = cmd; gehdr.version = version; data = mnl_nlmsg_put_extra_header(nlh, sizeof(gehdr)); memcpy(data, &gehdr, sizeof(gehdr)); return nlh; } void ynl_msg_start_req(struct ynl_sock *ys, __u32 id) { ynl_msg_start(ys, id, NLM_F_REQUEST | NLM_F_ACK); } void ynl_msg_start_dump(struct ynl_sock *ys, __u32 id) { ynl_msg_start(ys, id, NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP); } struct nlmsghdr * ynl_gemsg_start_req(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version) { return ynl_gemsg_start(ys, id, NLM_F_REQUEST | NLM_F_ACK, cmd, version); } struct nlmsghdr * ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version) { return ynl_gemsg_start(ys, id, NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP, cmd, version); } int ynl_recv_ack(struct ynl_sock *ys, int ret) { if (!ret) { yerr(ys, YNL_ERROR_EXPECT_ACK, "Expecting an ACK but nothing received"); return -1; } ret = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE); if (ret < 0) { perr(ys, "Socket receive failed"); return ret; } return mnl_cb_run(ys->rx_buf, ret, ys->seq, ys->portid, ynl_cb_null, ys); } int ynl_cb_null(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; yerr(yarg->ys, YNL_ERROR_UNEXPECT_MSG, "Received a message when none were expected"); return MNL_CB_ERROR; } /* Init/fini and genetlink boiler plate */ static int ynl_get_family_info_mcast(struct ynl_sock *ys, const struct nlattr *mcasts) { const struct nlattr *entry, *attr; unsigned int i; mnl_attr_for_each_nested(attr, mcasts) ys->n_mcast_groups++; if (!ys->n_mcast_groups) return 0; ys->mcast_groups = calloc(ys->n_mcast_groups, sizeof(*ys->mcast_groups)); if (!ys->mcast_groups) return MNL_CB_ERROR; i = 0; mnl_attr_for_each_nested(entry, mcasts) { mnl_attr_for_each_nested(attr, entry) { if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GRP_ID) ys->mcast_groups[i].id = mnl_attr_get_u32(attr); if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GRP_NAME) { strncpy(ys->mcast_groups[i].name, mnl_attr_get_str(attr), GENL_NAMSIZ - 1); ys->mcast_groups[i].name[GENL_NAMSIZ - 1] = 0; } } } return 0; } static int ynl_get_family_info_cb(const struct nlmsghdr *nlh, void *data) { struct ynl_parse_arg *yarg = data; struct ynl_sock *ys = yarg->ys; const struct nlattr *attr; bool found_id = true; mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) { if (mnl_attr_get_type(attr) == CTRL_ATTR_MCAST_GROUPS) if (ynl_get_family_info_mcast(ys, attr)) return MNL_CB_ERROR; if (mnl_attr_get_type(attr) != CTRL_ATTR_FAMILY_ID) continue; if (mnl_attr_get_payload_len(attr) != sizeof(__u16)) { yerr(ys, YNL_ERROR_ATTR_INVALID, "Invalid family ID"); return MNL_CB_ERROR; } ys->family_id = mnl_attr_get_u16(attr); found_id = true; } if (!found_id) { yerr(ys, YNL_ERROR_ATTR_MISSING, "Family ID missing"); return MNL_CB_ERROR; } return MNL_CB_OK; } static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name) { struct ynl_parse_arg yarg = { .ys = ys, }; struct nlmsghdr *nlh; int err; nlh = ynl_gemsg_start_req(ys, GENL_ID_CTRL, CTRL_CMD_GETFAMILY, 1); mnl_attr_put_strz(nlh, CTRL_ATTR_FAMILY_NAME, family_name); err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len); if (err < 0) { perr(ys, "failed to request socket family info"); return err; } err = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE); if (err <= 0) { perr(ys, "failed to receive the socket family info"); return err; } err = mnl_cb_run2(ys->rx_buf, err, ys->seq, ys->portid, ynl_get_family_info_cb, &yarg, ynl_cb_array, ARRAY_SIZE(ynl_cb_array)); if (err < 0) { free(ys->mcast_groups); perr(ys, "failed to receive the socket family info - no such family?"); return err; } return ynl_recv_ack(ys, err); } struct ynl_sock * ynl_sock_create(const struct ynl_family *yf, struct ynl_error *yse) { struct ynl_sock *ys; int one = 1; ys = malloc(sizeof(*ys) + 2 * MNL_SOCKET_BUFFER_SIZE); if (!ys) return NULL; memset(ys, 0, sizeof(*ys)); ys->family = yf; ys->tx_buf = &ys->raw_buf[0]; ys->rx_buf = &ys->raw_buf[MNL_SOCKET_BUFFER_SIZE]; ys->ntf_last_next = &ys->ntf_first; ys->sock = mnl_socket_open(NETLINK_GENERIC); if (!ys->sock) { __perr(yse, "failed to create a netlink socket"); goto err_free_sock; } if (mnl_socket_setsockopt(ys->sock, NETLINK_CAP_ACK, &one, sizeof(one))) { __perr(yse, "failed to enable netlink ACK"); goto err_close_sock; } if (mnl_socket_setsockopt(ys->sock, NETLINK_EXT_ACK, &one, sizeof(one))) { __perr(yse, "failed to enable netlink ext ACK"); goto err_close_sock; } ys->seq = random(); ys->portid = mnl_socket_get_portid(ys->sock); if (ynl_sock_read_family(ys, yf->name)) { if (yse) memcpy(yse, &ys->err, sizeof(*yse)); goto err_close_sock; } return ys; err_close_sock: mnl_socket_close(ys->sock); err_free_sock: free(ys); return NULL; } void ynl_sock_destroy(struct ynl_sock *ys) { struct ynl_ntf_base_type *ntf; mnl_socket_close(ys->sock); while ((ntf = ynl_ntf_dequeue(ys))) ynl_ntf_free(ntf); free(ys->mcast_groups); free(ys); } /* YNL multicast handling */ void ynl_ntf_free(struct ynl_ntf_base_type *ntf) { ntf->free(ntf); } int ynl_subscribe(struct ynl_sock *ys, const char *grp_name) { unsigned int i; int err; for (i = 0; i < ys->n_mcast_groups; i++) if (!strcmp(ys->mcast_groups[i].name, grp_name)) break; if (i == ys->n_mcast_groups) { yerr(ys, ENOENT, "Multicast group '%s' not found", grp_name); return -1; } err = mnl_socket_setsockopt(ys->sock, NETLINK_ADD_MEMBERSHIP, &ys->mcast_groups[i].id, sizeof(ys->mcast_groups[i].id)); if (err < 0) { perr(ys, "Subscribing to multicast group failed"); return -1; } return 0; } int ynl_socket_get_fd(struct ynl_sock *ys) { return mnl_socket_get_fd(ys->sock); } struct ynl_ntf_base_type *ynl_ntf_dequeue(struct ynl_sock *ys) { struct ynl_ntf_base_type *ntf; if (!ynl_has_ntf(ys)) return NULL; ntf = ys->ntf_first; ys->ntf_first = ntf->next; if (ys->ntf_last_next == &ntf->next) ys->ntf_last_next = &ys->ntf_first; return ntf; } static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh) { struct ynl_parse_arg yarg = { .ys = ys, }; const struct ynl_ntf_info *info; struct ynl_ntf_base_type *rsp; struct genlmsghdr *gehdr; int ret; gehdr = mnl_nlmsg_get_payload(nlh); if (gehdr->cmd >= ys->family->ntf_info_size) return MNL_CB_ERROR; info = &ys->family->ntf_info[gehdr->cmd]; if (!info->cb) return MNL_CB_ERROR; rsp = calloc(1, info->alloc_sz); rsp->free = info->free; yarg.data = rsp->data; yarg.rsp_policy = info->policy; ret = info->cb(nlh, &yarg); if (ret <= MNL_CB_STOP) goto err_free; rsp->family = nlh->nlmsg_type; rsp->cmd = gehdr->cmd; *ys->ntf_last_next = rsp; ys->ntf_last_next = &rsp->next; return MNL_CB_OK; err_free: info->free(rsp); return MNL_CB_ERROR; } static int ynl_ntf_trampoline(const struct nlmsghdr *nlh, void *data) { return ynl_ntf_parse((struct ynl_sock *)data, nlh); } int ynl_ntf_check(struct ynl_sock *ys) { ssize_t len; int err; do { /* libmnl doesn't let us pass flags to the recv to make * it non-blocking so we need to poll() or peek() :| */ struct pollfd pfd = { }; pfd.fd = mnl_socket_get_fd(ys->sock); pfd.events = POLLIN; err = poll(&pfd, 1, 1); if (err < 1) return err; len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE); if (len < 0) return len; err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid, ynl_ntf_trampoline, ys, ynl_cb_array, NLMSG_MIN_TYPE); if (err < 0) return err; } while (err > 0); return 0; } /* YNL specific helpers used by the auto-generated code */ struct ynl_dump_list_type *YNL_LIST_END = (void *)(0xb4d123); void ynl_error_unknown_notification(struct ynl_sock *ys, __u8 cmd) { yerr(ys, YNL_ERROR_UNKNOWN_NTF, "Unknown notification message type '%d'", cmd); } int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg) { yerr(yarg->ys, YNL_ERROR_INV_RESP, "Error parsing response: %s", msg); return MNL_CB_ERROR; } static int ynl_check_alien(struct ynl_sock *ys, const struct nlmsghdr *nlh, __u32 rsp_cmd) { struct genlmsghdr *gehdr; if (mnl_nlmsg_get_payload_len(nlh) < sizeof(*gehdr)) { yerr(ys, YNL_ERROR_INV_RESP, "Kernel responded with truncated message"); return -1; } gehdr = mnl_nlmsg_get_payload(nlh); if (gehdr->cmd != rsp_cmd) return ynl_ntf_parse(ys, nlh); return 0; } static int ynl_req_trampoline(const struct nlmsghdr *nlh, void *data) { struct ynl_req_state *yrs = data; int ret; ret = ynl_check_alien(yrs->yarg.ys, nlh, yrs->rsp_cmd); if (ret) return ret < 0 ? MNL_CB_ERROR : MNL_CB_OK; return yrs->cb(nlh, &yrs->yarg); } int ynl_exec(struct ynl_sock *ys, struct nlmsghdr *req_nlh, struct ynl_req_state *yrs) { ssize_t len; int err; err = mnl_socket_sendto(ys->sock, req_nlh, req_nlh->nlmsg_len); if (err < 0) return err; do { len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE); if (len < 0) return len; err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid, ynl_req_trampoline, yrs, ynl_cb_array, NLMSG_MIN_TYPE); if (err < 0) return err; } while (err > 0); return 0; } static int ynl_dump_trampoline(const struct nlmsghdr *nlh, void *data) { struct ynl_dump_state *ds = data; struct ynl_dump_list_type *obj; struct ynl_parse_arg yarg = {}; int ret; ret = ynl_check_alien(ds->ys, nlh, ds->rsp_cmd); if (ret) return ret < 0 ? MNL_CB_ERROR : MNL_CB_OK; obj = calloc(1, ds->alloc_sz); if (!obj) return MNL_CB_ERROR; if (!ds->first) ds->first = obj; if (ds->last) ds->last->next = obj; ds->last = obj; yarg.ys = ds->ys; yarg.rsp_policy = ds->rsp_policy; yarg.data = &obj->data; return ds->cb(nlh, &yarg); } static void *ynl_dump_end(struct ynl_dump_state *ds) { if (!ds->first) return YNL_LIST_END; ds->last->next = YNL_LIST_END; return ds->first; } int ynl_exec_dump(struct ynl_sock *ys, struct nlmsghdr *req_nlh, struct ynl_dump_state *yds) { ssize_t len; int err; err = mnl_socket_sendto(ys->sock, req_nlh, req_nlh->nlmsg_len); if (err < 0) return err; do { len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE); if (len < 0) goto err_close_list; err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid, ynl_dump_trampoline, yds, ynl_cb_array, NLMSG_MIN_TYPE); if (err < 0) goto err_close_list; } while (err > 0); yds->first = ynl_dump_end(yds); return 0; err_close_list: yds->first = ynl_dump_end(yds); return -1; }
linux-master
tools/net/ynl/lib/ynl.c
// SPDX-License-Identifier: GPL-2.0 /* Simple test of virtio code, entirely in userpsace. */ #define _GNU_SOURCE #include <sched.h> #include <err.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/virtio.h> #include <linux/vringh.h> #include <linux/virtio_ring.h> #include <linux/virtio_config.h> #include <linux/uaccess.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <sys/wait.h> #include <fcntl.h> #define USER_MEM (1024*1024) void *__user_addr_min, *__user_addr_max; void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end; static u64 user_addr_offset; #define RINGSIZE 256 #define ALIGN 4096 static bool never_notify_host(struct virtqueue *vq) { abort(); } static void never_callback_guest(struct virtqueue *vq) { abort(); } static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r) { if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset) return false; if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset) return false; r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset; r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset; r->offset = user_addr_offset; return true; } /* We return single byte ranges. */ static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r) { if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset) return false; if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset) return false; r->start = addr; r->end_incl = r->start; r->offset = user_addr_offset; return true; } struct guest_virtio_device { struct virtio_device vdev; int to_host_fd; unsigned long notifies; }; static bool parallel_notify_host(struct virtqueue *vq) { int rc; struct guest_virtio_device *gvdev; gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev); rc = write(gvdev->to_host_fd, "", 1); if (rc < 0) return false; gvdev->notifies++; return true; } static bool no_notify_host(struct virtqueue *vq) { return true; } #define NUM_XFERS (10000000) /* We aim for two "distant" cpus. */ static void find_cpus(unsigned int *first, unsigned int *last) { unsigned int i; *first = -1U; *last = 0; for (i = 0; i < 4096; i++) { cpu_set_t set; CPU_ZERO(&set); CPU_SET(i, &set); if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) { if (i < *first) *first = i; if (i > *last) *last = i; } } } /* Opencoded version for fast mode */ static inline int vringh_get_head(struct vringh *vrh, u16 *head) { u16 avail_idx, i; int err; err = get_user(avail_idx, &vrh->vring.avail->idx); if (err) return err; if (vrh->last_avail_idx == avail_idx) return 0; /* Only get avail ring entries after they have been exposed by guest. */ virtio_rmb(vrh->weak_barriers); i = vrh->last_avail_idx & (vrh->vring.num - 1); err = get_user(*head, &vrh->vring.avail->ring[i]); if (err) return err; vrh->last_avail_idx++; return 1; } static int parallel_test(u64 features, bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r), bool fast_vringh) { void *host_map, *guest_map; int fd, mapsize, to_guest[2], to_host[2]; unsigned long xfers = 0, notifies = 0, receives = 0; unsigned int first_cpu, last_cpu; cpu_set_t cpu_set; char buf[128]; /* Create real file to mmap. */ fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600); if (fd < 0) err(1, "Opening /tmp/vringh_test-file"); /* Extra room at the end for some data, and indirects */ mapsize = vring_size(RINGSIZE, ALIGN) + RINGSIZE * 2 * sizeof(int) + RINGSIZE * 6 * sizeof(struct vring_desc); mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1); ftruncate(fd, mapsize); /* Parent and child use separate addresses, to check our mapping logic! */ host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); pipe(to_guest); pipe(to_host); CPU_ZERO(&cpu_set); find_cpus(&first_cpu, &last_cpu); printf("Using CPUS %u and %u\n", first_cpu, last_cpu); fflush(stdout); if (fork() != 0) { struct vringh vrh; int status, err, rlen = 0; char rbuf[5]; /* We are the host: never access guest addresses! */ munmap(guest_map, mapsize); __user_addr_min = host_map; __user_addr_max = __user_addr_min + mapsize; user_addr_offset = host_map - guest_map; assert(user_addr_offset); close(to_guest[0]); close(to_host[1]); vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN); vringh_init_user(&vrh, features, RINGSIZE, true, vrh.vring.desc, vrh.vring.avail, vrh.vring.used); CPU_SET(first_cpu, &cpu_set); if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set)) errx(1, "Could not set affinity to cpu %u", first_cpu); while (xfers < NUM_XFERS) { struct iovec host_riov[2], host_wiov[2]; struct vringh_iov riov, wiov; u16 head, written; if (fast_vringh) { for (;;) { err = vringh_get_head(&vrh, &head); if (err != 0) break; err = vringh_need_notify_user(&vrh); if (err < 0) errx(1, "vringh_need_notify_user: %i", err); if (err) { write(to_guest[1], "", 1); notifies++; } } if (err != 1) errx(1, "vringh_get_head"); written = 0; goto complete; } else { vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); } if (err == 0) { err = vringh_need_notify_user(&vrh); if (err < 0) errx(1, "vringh_need_notify_user: %i", err); if (err) { write(to_guest[1], "", 1); notifies++; } if (!vringh_notify_enable_user(&vrh)) continue; /* Swallow all notifies at once. */ if (read(to_host[0], buf, sizeof(buf)) < 1) break; vringh_notify_disable_user(&vrh); receives++; continue; } if (err != 1) errx(1, "vringh_getdesc_user: %i", err); /* We simply copy bytes. */ if (riov.used) { rlen = vringh_iov_pull_user(&riov, rbuf, sizeof(rbuf)); if (rlen != 4) errx(1, "vringh_iov_pull_user: %i", rlen); assert(riov.i == riov.used); written = 0; } else { err = vringh_iov_push_user(&wiov, rbuf, rlen); if (err != rlen) errx(1, "vringh_iov_push_user: %i", err); assert(wiov.i == wiov.used); written = err; } complete: xfers++; err = vringh_complete_user(&vrh, head, written); if (err != 0) errx(1, "vringh_complete_user: %i", err); } err = vringh_need_notify_user(&vrh); if (err < 0) errx(1, "vringh_need_notify_user: %i", err); if (err) { write(to_guest[1], "", 1); notifies++; } wait(&status); if (!WIFEXITED(status)) errx(1, "Child died with signal %i?", WTERMSIG(status)); if (WEXITSTATUS(status) != 0) errx(1, "Child exited %i?", WEXITSTATUS(status)); printf("Host: notified %lu, pinged %lu\n", notifies, receives); return 0; } else { struct guest_virtio_device gvdev; struct virtqueue *vq; unsigned int *data; struct vring_desc *indirects; unsigned int finished = 0; /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */ data = guest_map + vring_size(RINGSIZE, ALIGN); indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int); /* We are the guest. */ munmap(host_map, mapsize); close(to_guest[1]); close(to_host[0]); gvdev.vdev.features = features; INIT_LIST_HEAD(&gvdev.vdev.vqs); spin_lock_init(&gvdev.vdev.vqs_list_lock); gvdev.to_host_fd = to_host[1]; gvdev.notifies = 0; CPU_SET(first_cpu, &cpu_set); if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set)) err(1, "Could not set affinity to cpu %u", first_cpu); vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true, false, guest_map, fast_vringh ? no_notify_host : parallel_notify_host, never_callback_guest, "guest vq"); /* Don't kfree indirects. */ __kfree_ignore_start = indirects; __kfree_ignore_end = indirects + RINGSIZE * 6; while (xfers < NUM_XFERS) { struct scatterlist sg[4]; unsigned int num_sg, len; int *dbuf, err; bool output = !(xfers % 2); /* Consume bufs. */ while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) { if (len == 4) assert(*dbuf == finished - 1); else if (!fast_vringh) assert(*dbuf == finished); finished++; } /* Produce a buffer. */ dbuf = data + (xfers % (RINGSIZE + 1)); if (output) *dbuf = xfers; else *dbuf = -1; switch ((xfers / sizeof(*dbuf)) % 4) { case 0: /* Nasty three-element sg list. */ sg_init_table(sg, num_sg = 3); sg_set_buf(&sg[0], (void *)dbuf, 1); sg_set_buf(&sg[1], (void *)dbuf + 1, 2); sg_set_buf(&sg[2], (void *)dbuf + 3, 1); break; case 1: sg_init_table(sg, num_sg = 2); sg_set_buf(&sg[0], (void *)dbuf, 1); sg_set_buf(&sg[1], (void *)dbuf + 1, 3); break; case 2: sg_init_table(sg, num_sg = 1); sg_set_buf(&sg[0], (void *)dbuf, 4); break; case 3: sg_init_table(sg, num_sg = 4); sg_set_buf(&sg[0], (void *)dbuf, 1); sg_set_buf(&sg[1], (void *)dbuf + 1, 1); sg_set_buf(&sg[2], (void *)dbuf + 2, 1); sg_set_buf(&sg[3], (void *)dbuf + 3, 1); break; } /* May allocate an indirect, so force it to allocate * user addr */ __kmalloc_fake = indirects + (xfers % RINGSIZE) * 4; if (output) err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf, GFP_KERNEL); else err = virtqueue_add_inbuf(vq, sg, num_sg, dbuf, GFP_KERNEL); if (err == -ENOSPC) { if (!virtqueue_enable_cb_delayed(vq)) continue; /* Swallow all notifies at once. */ if (read(to_guest[0], buf, sizeof(buf)) < 1) break; receives++; virtqueue_disable_cb(vq); continue; } if (err) errx(1, "virtqueue_add_in/outbuf: %i", err); xfers++; virtqueue_kick(vq); } /* Any extra? */ while (finished != xfers) { int *dbuf; unsigned int len; /* Consume bufs. */ dbuf = virtqueue_get_buf(vq, &len); if (dbuf) { if (len == 4) assert(*dbuf == finished - 1); else assert(len == 0); finished++; continue; } if (!virtqueue_enable_cb_delayed(vq)) continue; if (read(to_guest[0], buf, sizeof(buf)) < 1) break; receives++; virtqueue_disable_cb(vq); } printf("Guest: notified %lu, pinged %lu\n", gvdev.notifies, receives); vring_del_virtqueue(vq); return 0; } } int main(int argc, char *argv[]) { struct virtio_device vdev; struct virtqueue *vq; struct vringh vrh; struct scatterlist guest_sg[RINGSIZE], *sgs[2]; struct iovec host_riov[2], host_wiov[2]; struct vringh_iov riov, wiov; struct vring_used_elem used[RINGSIZE]; char buf[28]; u16 head; int err; unsigned i; void *ret; bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r); bool fast_vringh = false, parallel = false; getrange = getrange_iov; vdev.features = 0; INIT_LIST_HEAD(&vdev.vqs); spin_lock_init(&vdev.vqs_list_lock); while (argv[1]) { if (strcmp(argv[1], "--indirect") == 0) __virtio_set_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC); else if (strcmp(argv[1], "--eventidx") == 0) __virtio_set_bit(&vdev, VIRTIO_RING_F_EVENT_IDX); else if (strcmp(argv[1], "--virtio-1") == 0) __virtio_set_bit(&vdev, VIRTIO_F_VERSION_1); else if (strcmp(argv[1], "--slow-range") == 0) getrange = getrange_slow; else if (strcmp(argv[1], "--fast-vringh") == 0) fast_vringh = true; else if (strcmp(argv[1], "--parallel") == 0) parallel = true; else errx(1, "Unknown arg %s", argv[1]); argv++; } if (parallel) return parallel_test(vdev.features, getrange, fast_vringh); if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0) abort(); __user_addr_max = __user_addr_min + USER_MEM; memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN)); /* Set up guest side. */ vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, false, __user_addr_min, never_notify_host, never_callback_guest, "guest vq"); /* Set up host side. */ vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN); vringh_init_user(&vrh, vdev.features, RINGSIZE, true, vrh.vring.desc, vrh.vring.avail, vrh.vring.used); /* No descriptor to get yet... */ err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); if (err != 0) errx(1, "vringh_getdesc_user: %i", err); /* Guest puts in a descriptor. */ memcpy(__user_addr_max - 1, "a", 1); sg_init_table(guest_sg, 1); sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1); sg_init_table(guest_sg+1, 1); sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2); sgs[0] = &guest_sg[0]; sgs[1] = &guest_sg[1]; /* May allocate an indirect, so force it to allocate user addr */ __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN); err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL); if (err) errx(1, "virtqueue_add_sgs: %i", err); __kmalloc_fake = NULL; /* Host retreives it. */ vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); if (err != 1) errx(1, "vringh_getdesc_user: %i", err); assert(riov.used == 1); assert(riov.iov[0].iov_base == __user_addr_max - 1); assert(riov.iov[0].iov_len == 1); if (getrange != getrange_slow) { assert(wiov.used == 1); assert(wiov.iov[0].iov_base == __user_addr_max - 3); assert(wiov.iov[0].iov_len == 2); } else { assert(wiov.used == 2); assert(wiov.iov[0].iov_base == __user_addr_max - 3); assert(wiov.iov[0].iov_len == 1); assert(wiov.iov[1].iov_base == __user_addr_max - 2); assert(wiov.iov[1].iov_len == 1); } err = vringh_iov_pull_user(&riov, buf, 5); if (err != 1) errx(1, "vringh_iov_pull_user: %i", err); assert(buf[0] == 'a'); assert(riov.i == 1); assert(vringh_iov_pull_user(&riov, buf, 5) == 0); memcpy(buf, "bcdef", 5); err = vringh_iov_push_user(&wiov, buf, 5); if (err != 2) errx(1, "vringh_iov_push_user: %i", err); assert(memcmp(__user_addr_max - 3, "bc", 2) == 0); assert(wiov.i == wiov.used); assert(vringh_iov_push_user(&wiov, buf, 5) == 0); /* Host is done. */ err = vringh_complete_user(&vrh, head, err); if (err != 0) errx(1, "vringh_complete_user: %i", err); /* Guest should see used token now. */ __kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN); __kfree_ignore_end = __kfree_ignore_start + 1; ret = virtqueue_get_buf(vq, &i); if (ret != &err) errx(1, "virtqueue_get_buf: %p", ret); assert(i == 2); /* Guest puts in a huge descriptor. */ sg_init_table(guest_sg, RINGSIZE); for (i = 0; i < RINGSIZE; i++) { sg_set_buf(&guest_sg[i], __user_addr_max - USER_MEM/4, USER_MEM/4); } /* Fill contents with recognisable garbage. */ for (i = 0; i < USER_MEM/4; i++) ((char *)__user_addr_max - USER_MEM/4)[i] = i; /* This will allocate an indirect, so force it to allocate user addr */ __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN); err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL); if (err) errx(1, "virtqueue_add_outbuf (large): %i", err); __kmalloc_fake = NULL; /* Host picks it up (allocates new iov). */ vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); if (err != 1) errx(1, "vringh_getdesc_user: %i", err); assert(riov.max_num & VRINGH_IOV_ALLOCATED); assert(riov.iov != host_riov); if (getrange != getrange_slow) assert(riov.used == RINGSIZE); else assert(riov.used == RINGSIZE * USER_MEM/4); assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED)); assert(wiov.used == 0); /* Pull data back out (in odd chunks), should be as expected. */ for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) { err = vringh_iov_pull_user(&riov, buf, 3); if (err != 3 && i + err != RINGSIZE * USER_MEM/4) errx(1, "vringh_iov_pull_user large: %i", err); assert(buf[0] == (char)i); assert(err < 2 || buf[1] == (char)(i + 1)); assert(err < 3 || buf[2] == (char)(i + 2)); } assert(riov.i == riov.used); vringh_iov_cleanup(&riov); vringh_iov_cleanup(&wiov); /* Complete using multi interface, just because we can. */ used[0].id = head; used[0].len = 0; err = vringh_complete_multi_user(&vrh, used, 1); if (err) errx(1, "vringh_complete_multi_user(1): %i", err); /* Free up those descriptors. */ ret = virtqueue_get_buf(vq, &i); if (ret != &err) errx(1, "virtqueue_get_buf: %p", ret); /* Add lots of descriptors. */ sg_init_table(guest_sg, 1); sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1); for (i = 0; i < RINGSIZE; i++) { err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL); if (err) errx(1, "virtqueue_add_outbuf (multiple): %i", err); } /* Now get many, and consume them all at once. */ vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); for (i = 0; i < RINGSIZE; i++) { err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); if (err != 1) errx(1, "vringh_getdesc_user: %i", err); used[i].id = head; used[i].len = 0; } /* Make sure it wraps around ring, to test! */ assert(vrh.vring.used->idx % RINGSIZE != 0); err = vringh_complete_multi_user(&vrh, used, RINGSIZE); if (err) errx(1, "vringh_complete_multi_user: %i", err); /* Free those buffers. */ for (i = 0; i < RINGSIZE; i++) { unsigned len; assert(virtqueue_get_buf(vq, &len) != NULL); } /* Test weird (but legal!) indirect. */ if (__virtio_test_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC)) { char *data = __user_addr_max - USER_MEM/4; struct vring_desc *d = __user_addr_max - USER_MEM/2; struct vring vring; /* Force creation of direct, which we modify. */ __virtio_clear_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC); vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, false, __user_addr_min, never_notify_host, never_callback_guest, "guest vq"); sg_init_table(guest_sg, 4); sg_set_buf(&guest_sg[0], d, sizeof(*d)*2); sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1); sg_set_buf(&guest_sg[2], data + 6, 4); sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3); err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL); if (err) errx(1, "virtqueue_add_outbuf (indirect): %i", err); vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN); /* They're used in order, but double-check... */ assert(vring.desc[0].addr == (unsigned long)d); assert(vring.desc[1].addr == (unsigned long)(d+2)); assert(vring.desc[2].addr == (unsigned long)data + 6); assert(vring.desc[3].addr == (unsigned long)(d+3)); vring.desc[0].flags |= VRING_DESC_F_INDIRECT; vring.desc[1].flags |= VRING_DESC_F_INDIRECT; vring.desc[3].flags |= VRING_DESC_F_INDIRECT; /* First indirect */ d[0].addr = (unsigned long)data; d[0].len = 1; d[0].flags = VRING_DESC_F_NEXT; d[0].next = 1; d[1].addr = (unsigned long)data + 1; d[1].len = 2; d[1].flags = 0; /* Second indirect */ d[2].addr = (unsigned long)data + 3; d[2].len = 3; d[2].flags = 0; /* Third indirect */ d[3].addr = (unsigned long)data + 10; d[3].len = 5; d[3].flags = VRING_DESC_F_NEXT; d[3].next = 1; d[4].addr = (unsigned long)data + 15; d[4].len = 6; d[4].flags = VRING_DESC_F_NEXT; d[4].next = 2; d[5].addr = (unsigned long)data + 21; d[5].len = 7; d[5].flags = 0; /* Host picks it up (allocates new iov). */ vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); if (err != 1) errx(1, "vringh_getdesc_user: %i", err); if (head != 0) errx(1, "vringh_getdesc_user: head %i not 0", head); assert(riov.max_num & VRINGH_IOV_ALLOCATED); if (getrange != getrange_slow) assert(riov.used == 7); else assert(riov.used == 28); err = vringh_iov_pull_user(&riov, buf, 29); assert(err == 28); /* Data should be linear. */ for (i = 0; i < err; i++) assert(buf[i] == i); vringh_iov_cleanup(&riov); } /* Don't leak memory... */ vring_del_virtqueue(vq); free(__user_addr_min); return 0; }
linux-master
tools/virtio/vringh_test.c
// SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include <getopt.h> #include <limits.h> #include <string.h> #include <poll.h> #include <sys/eventfd.h> #include <stdlib.h> #include <assert.h> #include <unistd.h> #include <sys/ioctl.h> #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> #include <stdbool.h> #include <linux/virtio_types.h> #include <linux/vhost.h> #include <linux/virtio.h> #include <linux/virtio_ring.h> #include "../../drivers/vhost/test.h" #define RANDOM_BATCH -1 /* Unused */ void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end; struct vq_info { int kick; int call; int num; int idx; void *ring; /* copy used for control */ struct vring vring; struct virtqueue *vq; }; struct vdev_info { struct virtio_device vdev; int control; struct pollfd fds[1]; struct vq_info vqs[1]; int nvqs; void *buf; size_t buf_size; struct vhost_memory *mem; }; static const struct vhost_vring_file no_backend = { .fd = -1 }, backend = { .fd = 1 }; static const struct vhost_vring_state null_state = {}; bool vq_notify(struct virtqueue *vq) { struct vq_info *info = vq->priv; unsigned long long v = 1; int r; r = write(info->kick, &v, sizeof v); assert(r == sizeof v); return true; } void vq_callback(struct virtqueue *vq) { } void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info) { struct vhost_vring_state state = { .index = info->idx }; struct vhost_vring_file file = { .index = info->idx }; unsigned long long features = dev->vdev.features; struct vhost_vring_addr addr = { .index = info->idx, .desc_user_addr = (uint64_t)(unsigned long)info->vring.desc, .avail_user_addr = (uint64_t)(unsigned long)info->vring.avail, .used_user_addr = (uint64_t)(unsigned long)info->vring.used, }; int r; r = ioctl(dev->control, VHOST_SET_FEATURES, &features); assert(r >= 0); state.num = info->vring.num; r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state); assert(r >= 0); state.num = 0; r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state); assert(r >= 0); r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr); assert(r >= 0); file.fd = info->kick; r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file); assert(r >= 0); file.fd = info->call; r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file); assert(r >= 0); } static void vq_reset(struct vq_info *info, int num, struct virtio_device *vdev) { if (info->vq) vring_del_virtqueue(info->vq); memset(info->ring, 0, vring_size(num, 4096)); vring_init(&info->vring, num, info->ring, 4096); info->vq = vring_new_virtqueue(info->idx, num, 4096, vdev, true, false, info->ring, vq_notify, vq_callback, "test"); assert(info->vq); info->vq->priv = info; } static void vq_info_add(struct vdev_info *dev, int num) { struct vq_info *info = &dev->vqs[dev->nvqs]; int r; info->idx = dev->nvqs; info->kick = eventfd(0, EFD_NONBLOCK); info->call = eventfd(0, EFD_NONBLOCK); r = posix_memalign(&info->ring, 4096, vring_size(num, 4096)); assert(r >= 0); vq_reset(info, num, &dev->vdev); vhost_vq_setup(dev, info); dev->fds[info->idx].fd = info->call; dev->fds[info->idx].events = POLLIN; dev->nvqs++; } static void vdev_info_init(struct vdev_info* dev, unsigned long long features) { int r; memset(dev, 0, sizeof *dev); dev->vdev.features = features; INIT_LIST_HEAD(&dev->vdev.vqs); spin_lock_init(&dev->vdev.vqs_list_lock); dev->buf_size = 1024; dev->buf = malloc(dev->buf_size); assert(dev->buf); dev->control = open("/dev/vhost-test", O_RDWR); assert(dev->control >= 0); r = ioctl(dev->control, VHOST_SET_OWNER, NULL); assert(r >= 0); dev->mem = malloc(offsetof(struct vhost_memory, regions) + sizeof dev->mem->regions[0]); assert(dev->mem); memset(dev->mem, 0, offsetof(struct vhost_memory, regions) + sizeof dev->mem->regions[0]); dev->mem->nregions = 1; dev->mem->regions[0].guest_phys_addr = (long)dev->buf; dev->mem->regions[0].userspace_addr = (long)dev->buf; dev->mem->regions[0].memory_size = dev->buf_size; r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); assert(r >= 0); } /* TODO: this is pretty bad: we get a cache line bounce * for the wait queue on poll and another one on read, * plus the read which is there just to clear the * current state. */ static void wait_for_interrupt(struct vdev_info *dev) { int i; unsigned long long val; poll(dev->fds, dev->nvqs, -1); for (i = 0; i < dev->nvqs; ++i) if (dev->fds[i].revents & POLLIN) { read(dev->fds[i].fd, &val, sizeof val); } } static void run_test(struct vdev_info *dev, struct vq_info *vq, bool delayed, int batch, int reset_n, int bufs) { struct scatterlist sl; long started = 0, completed = 0, next_reset = reset_n; long completed_before, started_before; int r, test = 1; unsigned int len; long long spurious = 0; const bool random_batch = batch == RANDOM_BATCH; r = ioctl(dev->control, VHOST_TEST_RUN, &test); assert(r >= 0); if (!reset_n) { next_reset = INT_MAX; } for (;;) { virtqueue_disable_cb(vq->vq); completed_before = completed; started_before = started; do { const bool reset = completed > next_reset; if (random_batch) batch = (random() % vq->vring.num) + 1; while (started < bufs && (started - completed) < batch) { sg_init_one(&sl, dev->buf, dev->buf_size); r = virtqueue_add_outbuf(vq->vq, &sl, 1, dev->buf + started, GFP_ATOMIC); if (unlikely(r != 0)) { if (r == -ENOSPC && started > started_before) r = 0; else r = -1; break; } ++started; if (unlikely(!virtqueue_kick(vq->vq))) { r = -1; break; } } if (started >= bufs) r = -1; if (reset) { r = ioctl(dev->control, VHOST_TEST_SET_BACKEND, &no_backend); assert(!r); } /* Flush out completed bufs if any */ while (virtqueue_get_buf(vq->vq, &len)) { ++completed; r = 0; } if (reset) { struct vhost_vring_state s = { .index = 0 }; vq_reset(vq, vq->vring.num, &dev->vdev); r = ioctl(dev->control, VHOST_GET_VRING_BASE, &s); assert(!r); s.num = 0; r = ioctl(dev->control, VHOST_SET_VRING_BASE, &null_state); assert(!r); r = ioctl(dev->control, VHOST_TEST_SET_BACKEND, &backend); assert(!r); started = completed; while (completed > next_reset) next_reset += completed; } } while (r == 0); if (completed == completed_before && started == started_before) ++spurious; assert(completed <= bufs); assert(started <= bufs); if (completed == bufs) break; if (delayed) { if (virtqueue_enable_cb_delayed(vq->vq)) wait_for_interrupt(dev); } else { if (virtqueue_enable_cb(vq->vq)) wait_for_interrupt(dev); } } test = 0; r = ioctl(dev->control, VHOST_TEST_RUN, &test); assert(r >= 0); fprintf(stderr, "spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n", spurious, started, completed); } const char optstring[] = "h"; const struct option longopts[] = { { .name = "help", .val = 'h', }, { .name = "event-idx", .val = 'E', }, { .name = "no-event-idx", .val = 'e', }, { .name = "indirect", .val = 'I', }, { .name = "no-indirect", .val = 'i', }, { .name = "virtio-1", .val = '1', }, { .name = "no-virtio-1", .val = '0', }, { .name = "delayed-interrupt", .val = 'D', }, { .name = "no-delayed-interrupt", .val = 'd', }, { .name = "batch", .val = 'b', .has_arg = required_argument, }, { .name = "reset", .val = 'r', .has_arg = optional_argument, }, { } }; static void help(int status) { fprintf(stderr, "Usage: virtio_test [--help]" " [--no-indirect]" " [--no-event-idx]" " [--no-virtio-1]" " [--delayed-interrupt]" " [--batch=random/N]" " [--reset=N]" "\n"); exit(status); } int main(int argc, char **argv) { struct vdev_info dev; unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | (1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1); long batch = 1, reset = 0; int o; bool delayed = false; for (;;) { o = getopt_long(argc, argv, optstring, longopts, NULL); switch (o) { case -1: goto done; case '?': help(2); case 'e': features &= ~(1ULL << VIRTIO_RING_F_EVENT_IDX); break; case 'h': help(0); case 'i': features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC); break; case '0': features &= ~(1ULL << VIRTIO_F_VERSION_1); break; case 'D': delayed = true; break; case 'b': if (0 == strcmp(optarg, "random")) { batch = RANDOM_BATCH; } else { batch = strtol(optarg, NULL, 10); assert(batch > 0); assert(batch < (long)INT_MAX + 1); } break; case 'r': if (!optarg) { reset = 1; } else { reset = strtol(optarg, NULL, 10); assert(reset > 0); assert(reset < (long)INT_MAX + 1); } break; default: assert(0); break; } } done: vdev_info_init(&dev, features); vq_info_add(&dev, 256); run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000); return 0; }
linux-master
tools/virtio/virtio_test.c
// SPDX-License-Identifier: GPL-2.0-only /* * Controller of read/write threads for virtio-trace * * Copyright (C) 2012 Hitachi, Ltd. * Created by Yoshihiro Yunomae <[email protected]> * Masami Hiramatsu <[email protected]> */ #define _GNU_SOURCE #include <fcntl.h> #include <poll.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include "trace-agent.h" #define HOST_MSG_SIZE 256 #define EVENT_WAIT_MSEC 100 static volatile sig_atomic_t global_signal_val; bool global_sig_receive; /* default false */ bool global_run_operation; /* default false*/ /* Handle SIGTERM/SIGINT/SIGQUIT to exit */ static void signal_handler(int sig) { global_signal_val = sig; } int rw_ctl_init(const char *ctl_path) { int ctl_fd; ctl_fd = open(ctl_path, O_RDONLY); if (ctl_fd == -1) { pr_err("Cannot open ctl_fd\n"); goto error; } return ctl_fd; error: exit(EXIT_FAILURE); } static int wait_order(int ctl_fd) { struct pollfd poll_fd; int ret = 0; while (!global_sig_receive) { poll_fd.fd = ctl_fd; poll_fd.events = POLLIN; ret = poll(&poll_fd, 1, EVENT_WAIT_MSEC); if (global_signal_val) { global_sig_receive = true; pr_info("Receive interrupt %d\n", global_signal_val); /* Wakes rw-threads when they are sleeping */ if (!global_run_operation) pthread_cond_broadcast(&cond_wakeup); ret = -1; break; } if (ret < 0) { pr_err("Polling error\n"); goto error; } if (ret) break; } return ret; error: exit(EXIT_FAILURE); } /* * contol read/write threads by handling global_run_operation */ void *rw_ctl_loop(int ctl_fd) { ssize_t rlen; char buf[HOST_MSG_SIZE]; int ret; /* Setup signal handlers */ signal(SIGTERM, signal_handler); signal(SIGINT, signal_handler); signal(SIGQUIT, signal_handler); while (!global_sig_receive) { ret = wait_order(ctl_fd); if (ret < 0) break; rlen = read(ctl_fd, buf, sizeof(buf)); if (rlen < 0) { pr_err("read data error in ctl thread\n"); goto error; } if (rlen == 2 && buf[0] == '1') { /* * If host writes '1' to a control path, * this controller wakes all read/write threads. */ global_run_operation = true; pthread_cond_broadcast(&cond_wakeup); pr_debug("Wake up all read/write threads\n"); } else if (rlen == 2 && buf[0] == '0') { /* * If host writes '0' to a control path, read/write * threads will wait for notification from Host. */ global_run_operation = false; pr_debug("Stop all read/write threads\n"); } else pr_info("Invalid host notification: %s\n", buf); } return NULL; error: exit(EXIT_FAILURE); }
linux-master
tools/virtio/virtio-trace/trace-agent-ctl.c
// SPDX-License-Identifier: GPL-2.0-only /* * Guest agent for virtio-trace * * Copyright (C) 2012 Hitachi, Ltd. * Created by Yoshihiro Yunomae <[email protected]> * Masami Hiramatsu <[email protected]> */ #define _GNU_SOURCE #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include "trace-agent.h" #define PAGE_SIZE (sysconf(_SC_PAGE_SIZE)) #define PIPE_DEF_BUFS 16 #define PIPE_MIN_SIZE (PAGE_SIZE*PIPE_DEF_BUFS) #define PIPE_MAX_SIZE (1024*1024) #define TRACEFS "/sys/kernel/tracing" #define DEBUGFS "/sys/kernel/debug/tracing" #define READ_PATH_FMT "%s/per_cpu/cpu%d/trace_pipe_raw" #define WRITE_PATH_FMT "/dev/virtio-ports/trace-path-cpu%d" #define CTL_PATH "/dev/virtio-ports/agent-ctl-path" pthread_mutex_t mutex_notify = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond_wakeup = PTHREAD_COND_INITIALIZER; static int get_total_cpus(void) { int nr_cpus = (int)sysconf(_SC_NPROCESSORS_CONF); if (nr_cpus <= 0) { pr_err("Could not read cpus\n"); goto error; } else if (nr_cpus > MAX_CPUS) { pr_err("Exceed max cpus(%d)\n", (int)MAX_CPUS); goto error; } return nr_cpus; error: exit(EXIT_FAILURE); } static void *agent_info_new(void) { struct agent_info *s; int i; s = zalloc(sizeof(struct agent_info)); if (s == NULL) { pr_err("agent_info zalloc error\n"); exit(EXIT_FAILURE); } s->pipe_size = PIPE_INIT; s->use_stdout = false; s->cpus = get_total_cpus(); s->ctl_fd = -1; /* read/write threads init */ for (i = 0; i < s->cpus; i++) s->rw_ti[i] = rw_thread_info_new(); return s; } static unsigned long parse_size(const char *arg) { unsigned long value, round; char *ptr; value = strtoul(arg, &ptr, 10); switch (*ptr) { case 'K': case 'k': value <<= 10; break; case 'M': case 'm': value <<= 20; break; default: break; } if (value > PIPE_MAX_SIZE) { pr_err("Pipe size must be less than 1MB\n"); goto error; } else if (value < PIPE_MIN_SIZE) { pr_err("Pipe size must be over 64KB\n"); goto error; } /* Align buffer size with page unit */ round = value & (PAGE_SIZE - 1); value = value - round; return value; error: return 0; } static void usage(char const *prg) { pr_err("usage: %s [-h] [-o] [-s <size of pipe>]\n", prg); } static const char *make_path(int cpu_num, bool this_is_write_path) { int ret; char *buf; buf = zalloc(PATH_MAX); if (buf == NULL) { pr_err("Could not allocate buffer\n"); goto error; } if (this_is_write_path) /* write(output) path */ ret = snprintf(buf, PATH_MAX, WRITE_PATH_FMT, cpu_num); else { /* read(input) path */ ret = snprintf(buf, PATH_MAX, READ_PATH_FMT, TRACEFS, cpu_num); if (ret > 0 && access(buf, F_OK) != 0) ret = snprintf(buf, PATH_MAX, READ_PATH_FMT, DEBUGFS, cpu_num); } if (ret <= 0) { pr_err("Failed to generate %s path(CPU#%d):%d\n", this_is_write_path ? "read" : "write", cpu_num, ret); goto error; } return buf; error: free(buf); return NULL; } static const char *make_input_path(int cpu_num) { return make_path(cpu_num, false); } static const char *make_output_path(int cpu_num) { return make_path(cpu_num, true); } static void *agent_info_init(struct agent_info *s) { int cpu; const char *in_path = NULL; const char *out_path = NULL; /* init read/write threads */ for (cpu = 0; cpu < s->cpus; cpu++) { /* set read(input) path per read/write thread */ in_path = make_input_path(cpu); if (in_path == NULL) goto error; /* set write(output) path per read/write thread*/ if (!s->use_stdout) { out_path = make_output_path(cpu); if (out_path == NULL) goto error; } else /* stdout mode */ pr_debug("stdout mode\n"); rw_thread_init(cpu, in_path, out_path, s->use_stdout, s->pipe_size, s->rw_ti[cpu]); } /* init controller of read/write threads */ s->ctl_fd = rw_ctl_init((const char *)CTL_PATH); return NULL; error: exit(EXIT_FAILURE); } static void *parse_args(int argc, char *argv[], struct agent_info *s) { int cmd; unsigned long size; while ((cmd = getopt(argc, argv, "hos:")) != -1) { switch (cmd) { /* stdout mode */ case 'o': s->use_stdout = true; break; /* size of pipe */ case 's': size = parse_size(optarg); if (size == 0) goto error; s->pipe_size = size; break; case 'h': default: usage(argv[0]); goto error; } } agent_info_init(s); return NULL; error: exit(EXIT_FAILURE); } static void agent_main_loop(struct agent_info *s) { int cpu; pthread_t rw_thread_per_cpu[MAX_CPUS]; /* Start all read/write threads */ for (cpu = 0; cpu < s->cpus; cpu++) rw_thread_per_cpu[cpu] = rw_thread_run(s->rw_ti[cpu]); rw_ctl_loop(s->ctl_fd); /* Finish all read/write threads */ for (cpu = 0; cpu < s->cpus; cpu++) { int ret; ret = pthread_join(rw_thread_per_cpu[cpu], NULL); if (ret != 0) { pr_err("pthread_join() error:%d (cpu %d)\n", ret, cpu); exit(EXIT_FAILURE); } } } static void agent_info_free(struct agent_info *s) { int i; close(s->ctl_fd); for (i = 0; i < s->cpus; i++) { close(s->rw_ti[i]->in_fd); close(s->rw_ti[i]->out_fd); close(s->rw_ti[i]->read_pipe); close(s->rw_ti[i]->write_pipe); free(s->rw_ti[i]); } free(s); } int main(int argc, char *argv[]) { struct agent_info *s = NULL; s = agent_info_new(); parse_args(argc, argv, s); agent_main_loop(s); agent_info_free(s); return 0; }
linux-master
tools/virtio/virtio-trace/trace-agent.c
// SPDX-License-Identifier: GPL-2.0-only /* * Read/write thread of a guest agent for virtio-trace * * Copyright (C) 2012 Hitachi, Ltd. * Created by Yoshihiro Yunomae <[email protected]> * Masami Hiramatsu <[email protected]> */ #define _GNU_SOURCE #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/syscall.h> #include "trace-agent.h" #define READ_WAIT_USEC 100000 void *rw_thread_info_new(void) { struct rw_thread_info *rw_ti; rw_ti = zalloc(sizeof(struct rw_thread_info)); if (rw_ti == NULL) { pr_err("rw_thread_info zalloc error\n"); exit(EXIT_FAILURE); } rw_ti->cpu_num = -1; rw_ti->in_fd = -1; rw_ti->out_fd = -1; rw_ti->read_pipe = -1; rw_ti->write_pipe = -1; rw_ti->pipe_size = PIPE_INIT; return rw_ti; } void *rw_thread_init(int cpu, const char *in_path, const char *out_path, bool stdout_flag, unsigned long pipe_size, struct rw_thread_info *rw_ti) { int data_pipe[2]; rw_ti->cpu_num = cpu; /* set read(input) fd */ rw_ti->in_fd = open(in_path, O_RDONLY); if (rw_ti->in_fd == -1) { pr_err("Could not open in_fd (CPU:%d)\n", cpu); goto error; } /* set write(output) fd */ if (!stdout_flag) { /* virtio-serial output mode */ rw_ti->out_fd = open(out_path, O_WRONLY); if (rw_ti->out_fd == -1) { pr_err("Could not open out_fd (CPU:%d)\n", cpu); goto error; } } else /* stdout mode */ rw_ti->out_fd = STDOUT_FILENO; if (pipe2(data_pipe, O_NONBLOCK) < 0) { pr_err("Could not create pipe in rw-thread(%d)\n", cpu); goto error; } /* * Size of pipe is 64kB in default based on fs/pipe.c. * To read/write trace data speedy, pipe size is changed. */ if (fcntl(*data_pipe, F_SETPIPE_SZ, pipe_size) < 0) { pr_err("Could not change pipe size in rw-thread(%d)\n", cpu); goto error; } rw_ti->read_pipe = data_pipe[1]; rw_ti->write_pipe = data_pipe[0]; rw_ti->pipe_size = pipe_size; return NULL; error: exit(EXIT_FAILURE); } /* Bind a thread to a cpu */ static void bind_cpu(int cpu_num) { cpu_set_t mask; CPU_ZERO(&mask); CPU_SET(cpu_num, &mask); /* bind my thread to cpu_num by assigning zero to the first argument */ if (sched_setaffinity(0, sizeof(mask), &mask) == -1) pr_err("Could not set CPU#%d affinity\n", (int)cpu_num); } static void *rw_thread_main(void *thread_info) { ssize_t rlen, wlen; ssize_t ret; struct rw_thread_info *ts = (struct rw_thread_info *)thread_info; bind_cpu(ts->cpu_num); while (1) { /* Wait for a read order of trace data by Host OS */ if (!global_run_operation) { pthread_mutex_lock(&mutex_notify); pthread_cond_wait(&cond_wakeup, &mutex_notify); pthread_mutex_unlock(&mutex_notify); } if (global_sig_receive) break; /* * Each thread read trace_pipe_raw of each cpu bounding the * thread, so contention of multi-threads does not occur. */ rlen = splice(ts->in_fd, NULL, ts->read_pipe, NULL, ts->pipe_size, SPLICE_F_MOVE | SPLICE_F_MORE); if (rlen < 0) { pr_err("Splice_read in rw-thread(%d)\n", ts->cpu_num); goto error; } else if (rlen == 0) { /* * If trace data do not exist or are unreadable not * for exceeding the page size, splice_read returns * NULL. Then, this waits for being filled the data in a * ring-buffer. */ usleep(READ_WAIT_USEC); pr_debug("Read retry(cpu:%d)\n", ts->cpu_num); continue; } wlen = 0; do { ret = splice(ts->write_pipe, NULL, ts->out_fd, NULL, rlen - wlen, SPLICE_F_MOVE | SPLICE_F_MORE); if (ret < 0) { pr_err("Splice_write in rw-thread(%d)\n", ts->cpu_num); goto error; } else if (ret == 0) /* * When host reader is not in time for reading * trace data, guest will be stopped. This is * because char dev in QEMU is not supported * non-blocking mode. Then, writer might be * sleep in that case. * This sleep will be removed by supporting * non-blocking mode. */ sleep(1); wlen += ret; } while (wlen < rlen); } return NULL; error: exit(EXIT_FAILURE); } pthread_t rw_thread_run(struct rw_thread_info *rw_ti) { int ret; pthread_t rw_thread_per_cpu; ret = pthread_create(&rw_thread_per_cpu, NULL, rw_thread_main, rw_ti); if (ret != 0) { pr_err("Could not create a rw thread(%d)\n", rw_ti->cpu_num); exit(EXIT_FAILURE); } return rw_thread_per_cpu; }
linux-master
tools/virtio/virtio-trace/trace-agent-rw.c
#include "test.c"
linux-master
tools/virtio/vhost_test/vhost_test.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Red Hat, Inc. * Author: Michael S. Tsirkin <[email protected]> * * Partial implementation of virtio 0.9. event index is used for signalling, * unconditionally. Design roughly follows linux kernel implementation in order * to be able to judge its performance. */ #define _GNU_SOURCE #include "main.h" #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <string.h> #include <linux/virtio_ring.h> struct data { void *data; } *data; struct vring ring; /* enabling the below activates experimental ring polling code * (which skips index reads on consumer in favor of looking at * high bits of ring id ^ 0x8000). */ /* #ifdef RING_POLL */ /* enabling the below activates experimental in-order code * (which skips ring updates and reads and writes len in descriptor). */ /* #ifdef INORDER */ #if defined(RING_POLL) && defined(INORDER) #error "RING_POLL and INORDER are mutually exclusive" #endif /* how much padding is needed to avoid false cache sharing */ #define HOST_GUEST_PADDING 0x80 struct guest { unsigned short avail_idx; unsigned short last_used_idx; unsigned short num_free; unsigned short kicked_avail_idx; #ifndef INORDER unsigned short free_head; #else unsigned short reserved_free_head; #endif unsigned char reserved[HOST_GUEST_PADDING - 10]; } guest; struct host { /* we do not need to track last avail index * unless we have more than one in flight. */ unsigned short used_idx; unsigned short called_used_idx; unsigned char reserved[HOST_GUEST_PADDING - 4]; } host; /* implemented by ring */ void alloc_ring(void) { int ret; int i; void *p; ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000)); if (ret) { perror("Unable to allocate ring buffer.\n"); exit(3); } memset(p, 0, vring_size(ring_size, 0x1000)); vring_init(&ring, ring_size, p, 0x1000); guest.avail_idx = 0; guest.kicked_avail_idx = -1; guest.last_used_idx = 0; #ifndef INORDER /* Put everything in free lists. */ guest.free_head = 0; #endif for (i = 0; i < ring_size - 1; i++) ring.desc[i].next = i + 1; host.used_idx = 0; host.called_used_idx = -1; guest.num_free = ring_size; data = malloc(ring_size * sizeof *data); if (!data) { perror("Unable to allocate data buffer.\n"); exit(3); } memset(data, 0, ring_size * sizeof *data); } /* guest side */ int add_inbuf(unsigned len, void *buf, void *datap) { unsigned head; #ifndef INORDER unsigned avail; #endif struct vring_desc *desc; if (!guest.num_free) return -1; #ifdef INORDER head = (ring_size - 1) & (guest.avail_idx++); #else head = guest.free_head; #endif guest.num_free--; desc = ring.desc; desc[head].flags = VRING_DESC_F_NEXT; desc[head].addr = (unsigned long)(void *)buf; desc[head].len = len; /* We do it like this to simulate the way * we'd have to flip it if we had multiple * descriptors. */ desc[head].flags &= ~VRING_DESC_F_NEXT; #ifndef INORDER guest.free_head = desc[head].next; #endif data[head].data = datap; #ifdef RING_POLL /* Barrier A (for pairing) */ smp_release(); avail = guest.avail_idx++; ring.avail->ring[avail & (ring_size - 1)] = (head | (avail & ~(ring_size - 1))) ^ 0x8000; #else #ifndef INORDER /* Barrier A (for pairing) */ smp_release(); avail = (ring_size - 1) & (guest.avail_idx++); ring.avail->ring[avail] = head; #endif /* Barrier A (for pairing) */ smp_release(); #endif ring.avail->idx = guest.avail_idx; return 0; } void *get_buf(unsigned *lenp, void **bufp) { unsigned head; unsigned index; void *datap; #ifdef RING_POLL head = (ring_size - 1) & guest.last_used_idx; index = ring.used->ring[head].id; if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1)) return NULL; /* Barrier B (for pairing) */ smp_acquire(); index &= ring_size - 1; #else if (ring.used->idx == guest.last_used_idx) return NULL; /* Barrier B (for pairing) */ smp_acquire(); #ifdef INORDER head = (ring_size - 1) & guest.last_used_idx; index = head; #else head = (ring_size - 1) & guest.last_used_idx; index = ring.used->ring[head].id; #endif #endif #ifdef INORDER *lenp = ring.desc[index].len; #else *lenp = ring.used->ring[head].len; #endif datap = data[index].data; *bufp = (void*)(unsigned long)ring.desc[index].addr; data[index].data = NULL; #ifndef INORDER ring.desc[index].next = guest.free_head; guest.free_head = index; #endif guest.num_free++; guest.last_used_idx++; return datap; } bool used_empty() { unsigned short last_used_idx = guest.last_used_idx; #ifdef RING_POLL unsigned short head = last_used_idx & (ring_size - 1); unsigned index = ring.used->ring[head].id; return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1); #else return ring.used->idx == last_used_idx; #endif } void disable_call() { /* Doing nothing to disable calls might cause * extra interrupts, but reduces the number of cache misses. */ } bool enable_call() { vring_used_event(&ring) = guest.last_used_idx; /* Flush call index write */ /* Barrier D (for pairing) */ smp_mb(); return used_empty(); } void kick_available(void) { bool need; /* Flush in previous flags write */ /* Barrier C (for pairing) */ smp_mb(); need = vring_need_event(vring_avail_event(&ring), guest.avail_idx, guest.kicked_avail_idx); guest.kicked_avail_idx = guest.avail_idx; if (need) kick(); } /* host side */ void disable_kick() { /* Doing nothing to disable kicks might cause * extra interrupts, but reduces the number of cache misses. */ } bool enable_kick() { vring_avail_event(&ring) = host.used_idx; /* Barrier C (for pairing) */ smp_mb(); return avail_empty(); } bool avail_empty() { unsigned head = host.used_idx; #ifdef RING_POLL unsigned index = ring.avail->ring[head & (ring_size - 1)]; return ((index ^ head ^ 0x8000) & ~(ring_size - 1)); #else return head == ring.avail->idx; #endif } bool use_buf(unsigned *lenp, void **bufp) { unsigned used_idx = host.used_idx; struct vring_desc *desc; unsigned head; #ifdef RING_POLL head = ring.avail->ring[used_idx & (ring_size - 1)]; if ((used_idx ^ head ^ 0x8000) & ~(ring_size - 1)) return false; /* Barrier A (for pairing) */ smp_acquire(); used_idx &= ring_size - 1; desc = &ring.desc[head & (ring_size - 1)]; #else if (used_idx == ring.avail->idx) return false; /* Barrier A (for pairing) */ smp_acquire(); used_idx &= ring_size - 1; #ifdef INORDER head = used_idx; #else head = ring.avail->ring[used_idx]; #endif desc = &ring.desc[head]; #endif *lenp = desc->len; *bufp = (void *)(unsigned long)desc->addr; #ifdef INORDER desc->len = desc->len - 1; #else /* now update used ring */ ring.used->ring[used_idx].id = head; ring.used->ring[used_idx].len = desc->len - 1; #endif /* Barrier B (for pairing) */ smp_release(); host.used_idx++; ring.used->idx = host.used_idx; return true; } void call_used(void) { bool need; /* Flush in previous flags write */ /* Barrier D (for pairing) */ smp_mb(); need = vring_need_event(vring_used_event(&ring), host.used_idx, host.called_used_idx); host.called_used_idx = host.used_idx; if (need) call(); }
linux-master
tools/virtio/ringtest/virtio_ring_0_9.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Red Hat, Inc. * Author: Michael S. Tsirkin <[email protected]> * * Command line processing and common functions for ring benchmarking. */ #define _GNU_SOURCE #include <getopt.h> #include <pthread.h> #include <assert.h> #include <sched.h> #include "main.h" #include <sys/eventfd.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <limits.h> int runcycles = 10000000; int max_outstanding = INT_MAX; int batch = 1; int param = 0; bool do_sleep = false; bool do_relax = false; bool do_exit = true; unsigned ring_size = 256; static int kickfd = -1; static int callfd = -1; void notify(int fd) { unsigned long long v = 1; int r; vmexit(); r = write(fd, &v, sizeof v); assert(r == sizeof v); vmentry(); } void wait_for_notify(int fd) { unsigned long long v = 1; int r; vmexit(); r = read(fd, &v, sizeof v); assert(r == sizeof v); vmentry(); } void kick(void) { notify(kickfd); } void wait_for_kick(void) { wait_for_notify(kickfd); } void call(void) { notify(callfd); } void wait_for_call(void) { wait_for_notify(callfd); } void set_affinity(const char *arg) { cpu_set_t cpuset; int ret; pthread_t self; long int cpu; char *endptr; if (!arg) return; cpu = strtol(arg, &endptr, 0); assert(!*endptr); assert(cpu >= 0 && cpu < CPU_SETSIZE); self = pthread_self(); CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); ret = pthread_setaffinity_np(self, sizeof(cpu_set_t), &cpuset); assert(!ret); } void poll_used(void) { while (used_empty()) busy_wait(); } static void __attribute__((__flatten__)) run_guest(void) { int completed_before; int completed = 0; int started = 0; int bufs = runcycles; int spurious = 0; int r; unsigned len; void *buf; int tokick = batch; for (;;) { if (do_sleep) disable_call(); completed_before = completed; do { if (started < bufs && started - completed < max_outstanding) { r = add_inbuf(0, "Buffer\n", "Hello, world!"); if (__builtin_expect(r == 0, true)) { ++started; if (!--tokick) { tokick = batch; if (do_sleep) kick_available(); } } } else r = -1; /* Flush out completed bufs if any */ if (get_buf(&len, &buf)) { ++completed; if (__builtin_expect(completed == bufs, false)) return; r = 0; } } while (r == 0); if (completed == completed_before) ++spurious; assert(completed <= bufs); assert(started <= bufs); if (do_sleep) { if (used_empty() && enable_call()) wait_for_call(); } else { poll_used(); } } } void poll_avail(void) { while (avail_empty()) busy_wait(); } static void __attribute__((__flatten__)) run_host(void) { int completed_before; int completed = 0; int spurious = 0; int bufs = runcycles; unsigned len; void *buf; for (;;) { if (do_sleep) { if (avail_empty() && enable_kick()) wait_for_kick(); } else { poll_avail(); } if (do_sleep) disable_kick(); completed_before = completed; while (__builtin_expect(use_buf(&len, &buf), true)) { if (do_sleep) call_used(); ++completed; if (__builtin_expect(completed == bufs, false)) return; } if (completed == completed_before) ++spurious; assert(completed <= bufs); if (completed == bufs) break; } } void *start_guest(void *arg) { set_affinity(arg); run_guest(); pthread_exit(NULL); } void *start_host(void *arg) { set_affinity(arg); run_host(); pthread_exit(NULL); } static const char optstring[] = ""; static const struct option longopts[] = { { .name = "help", .has_arg = no_argument, .val = 'h', }, { .name = "host-affinity", .has_arg = required_argument, .val = 'H', }, { .name = "guest-affinity", .has_arg = required_argument, .val = 'G', }, { .name = "ring-size", .has_arg = required_argument, .val = 'R', }, { .name = "run-cycles", .has_arg = required_argument, .val = 'C', }, { .name = "outstanding", .has_arg = required_argument, .val = 'o', }, { .name = "batch", .has_arg = required_argument, .val = 'b', }, { .name = "param", .has_arg = required_argument, .val = 'p', }, { .name = "sleep", .has_arg = no_argument, .val = 's', }, { .name = "relax", .has_arg = no_argument, .val = 'x', }, { .name = "exit", .has_arg = no_argument, .val = 'e', }, { } }; static void help(void) { fprintf(stderr, "Usage: <test> [--help]" " [--host-affinity H]" " [--guest-affinity G]" " [--ring-size R (default: %d)]" " [--run-cycles C (default: %d)]" " [--batch b]" " [--outstanding o]" " [--param p]" " [--sleep]" " [--relax]" " [--exit]" "\n", ring_size, runcycles); } int main(int argc, char **argv) { int ret; pthread_t host, guest; void *tret; char *host_arg = NULL; char *guest_arg = NULL; char *endptr; long int c; kickfd = eventfd(0, 0); assert(kickfd >= 0); callfd = eventfd(0, 0); assert(callfd >= 0); for (;;) { int o = getopt_long(argc, argv, optstring, longopts, NULL); switch (o) { case -1: goto done; case '?': help(); exit(2); case 'H': host_arg = optarg; break; case 'G': guest_arg = optarg; break; case 'R': ring_size = strtol(optarg, &endptr, 0); assert(ring_size && !(ring_size & (ring_size - 1))); assert(!*endptr); break; case 'C': c = strtol(optarg, &endptr, 0); assert(!*endptr); assert(c > 0 && c < INT_MAX); runcycles = c; break; case 'o': c = strtol(optarg, &endptr, 0); assert(!*endptr); assert(c > 0 && c < INT_MAX); max_outstanding = c; break; case 'p': c = strtol(optarg, &endptr, 0); assert(!*endptr); assert(c > 0 && c < INT_MAX); param = c; break; case 'b': c = strtol(optarg, &endptr, 0); assert(!*endptr); assert(c > 0 && c < INT_MAX); batch = c; break; case 's': do_sleep = true; break; case 'x': do_relax = true; break; case 'e': do_exit = true; break; default: help(); exit(4); break; } } /* does nothing here, used to make sure all smp APIs compile */ smp_acquire(); smp_release(); smp_mb(); done: if (batch > max_outstanding) batch = max_outstanding; if (optind < argc) { help(); exit(4); } alloc_ring(); ret = pthread_create(&host, NULL, start_host, host_arg); assert(!ret); ret = pthread_create(&guest, NULL, start_guest, guest_arg); assert(!ret); ret = pthread_join(guest, &tret); assert(!ret); ret = pthread_join(host, &tret); assert(!ret); return 0; }
linux-master
tools/virtio/ringtest/main.c
// SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include "main.h" #include <assert.h> /* stub implementation: useful for measuring overhead */ void alloc_ring(void) { } /* guest side */ int add_inbuf(unsigned len, void *buf, void *datap) { return 0; } /* * skb_array API provides no way for producer to find out whether a given * buffer was consumed. Our tests merely require that a successful get_buf * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, * fake it accordingly. */ void *get_buf(unsigned *lenp, void **bufp) { return "Buffer"; } bool used_empty() { return false; } void disable_call() { assert(0); } bool enable_call() { assert(0); } void kick_available(void) { assert(0); } /* host side */ void disable_kick() { assert(0); } bool enable_kick() { assert(0); } bool avail_empty() { return false; } bool use_buf(unsigned *lenp, void **bufp) { return true; } void call_used(void) { assert(0); }
linux-master
tools/virtio/ringtest/noring.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Red Hat, Inc. * Author: Michael S. Tsirkin <[email protected]> * * Simple descriptor-based ring. virtio 0.9 compatible event index is used for * signalling, unconditionally. */ #define _GNU_SOURCE #include "main.h" #include <stdlib.h> #include <stdio.h> #include <string.h> /* Next - Where next entry will be written. * Prev - "Next" value when event triggered previously. * Event - Peer requested event after writing this entry. */ static inline bool need_event(unsigned short event, unsigned short next, unsigned short prev) { return (unsigned short)(next - event - 1) < (unsigned short)(next - prev); } /* Design: * Guest adds descriptors with unique index values and DESC_HW in flags. * Host overwrites used descriptors with correct len, index, and DESC_HW clear. * Flags are always set last. */ #define DESC_HW 0x1 struct desc { unsigned short flags; unsigned short index; unsigned len; unsigned long long addr; }; /* how much padding is needed to avoid false cache sharing */ #define HOST_GUEST_PADDING 0x80 /* Mostly read */ struct event { unsigned short kick_index; unsigned char reserved0[HOST_GUEST_PADDING - 2]; unsigned short call_index; unsigned char reserved1[HOST_GUEST_PADDING - 2]; }; struct data { void *buf; /* descriptor is writeable, we can't get buf from there */ void *data; } *data; struct desc *ring; struct event *event; struct guest { unsigned avail_idx; unsigned last_used_idx; unsigned num_free; unsigned kicked_avail_idx; unsigned char reserved[HOST_GUEST_PADDING - 12]; } guest; struct host { /* we do not need to track last avail index * unless we have more than one in flight. */ unsigned used_idx; unsigned called_used_idx; unsigned char reserved[HOST_GUEST_PADDING - 4]; } host; /* implemented by ring */ void alloc_ring(void) { int ret; int i; ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring); if (ret) { perror("Unable to allocate ring buffer.\n"); exit(3); } event = calloc(1, sizeof(*event)); if (!event) { perror("Unable to allocate event buffer.\n"); exit(3); } guest.avail_idx = 0; guest.kicked_avail_idx = -1; guest.last_used_idx = 0; host.used_idx = 0; host.called_used_idx = -1; for (i = 0; i < ring_size; ++i) { struct desc desc = { .index = i, }; ring[i] = desc; } guest.num_free = ring_size; data = calloc(ring_size, sizeof(*data)); if (!data) { perror("Unable to allocate data buffer.\n"); exit(3); } } /* guest side */ int add_inbuf(unsigned len, void *buf, void *datap) { unsigned head, index; if (!guest.num_free) return -1; guest.num_free--; head = (ring_size - 1) & (guest.avail_idx++); /* Start with a write. On MESI architectures this helps * avoid a shared state with consumer that is polling this descriptor. */ ring[head].addr = (unsigned long)(void*)buf; ring[head].len = len; /* read below might bypass write above. That is OK because it's just an * optimization. If this happens, we will get the cache line in a * shared state which is unfortunate, but probably not worth it to * add an explicit full barrier to avoid this. */ barrier(); index = ring[head].index; data[index].buf = buf; data[index].data = datap; /* Barrier A (for pairing) */ smp_release(); ring[head].flags = DESC_HW; return 0; } void *get_buf(unsigned *lenp, void **bufp) { unsigned head = (ring_size - 1) & guest.last_used_idx; unsigned index; void *datap; if (ring[head].flags & DESC_HW) return NULL; /* Barrier B (for pairing) */ smp_acquire(); *lenp = ring[head].len; index = ring[head].index & (ring_size - 1); datap = data[index].data; *bufp = data[index].buf; data[index].buf = NULL; data[index].data = NULL; guest.num_free++; guest.last_used_idx++; return datap; } bool used_empty() { unsigned head = (ring_size - 1) & guest.last_used_idx; return (ring[head].flags & DESC_HW); } void disable_call() { /* Doing nothing to disable calls might cause * extra interrupts, but reduces the number of cache misses. */ } bool enable_call() { event->call_index = guest.last_used_idx; /* Flush call index write */ /* Barrier D (for pairing) */ smp_mb(); return used_empty(); } void kick_available(void) { bool need; /* Flush in previous flags write */ /* Barrier C (for pairing) */ smp_mb(); need = need_event(event->kick_index, guest.avail_idx, guest.kicked_avail_idx); guest.kicked_avail_idx = guest.avail_idx; if (need) kick(); } /* host side */ void disable_kick() { /* Doing nothing to disable kicks might cause * extra interrupts, but reduces the number of cache misses. */ } bool enable_kick() { event->kick_index = host.used_idx; /* Barrier C (for pairing) */ smp_mb(); return avail_empty(); } bool avail_empty() { unsigned head = (ring_size - 1) & host.used_idx; return !(ring[head].flags & DESC_HW); } bool use_buf(unsigned *lenp, void **bufp) { unsigned head = (ring_size - 1) & host.used_idx; if (!(ring[head].flags & DESC_HW)) return false; /* make sure length read below is not speculated */ /* Barrier A (for pairing) */ smp_acquire(); /* simple in-order completion: we don't need * to touch index at all. This also means we * can just modify the descriptor in-place. */ ring[head].len--; /* Make sure len is valid before flags. * Note: alternative is to write len and flags in one access - * possible on 64 bit architectures but wmb is free on Intel anyway * so I have no way to test whether it's a gain. */ /* Barrier B (for pairing) */ smp_release(); ring[head].flags = 0; host.used_idx++; return true; } void call_used(void) { bool need; /* Flush in previous flags write */ /* Barrier D (for pairing) */ smp_mb(); need = need_event(event->call_index, host.used_idx, host.called_used_idx); host.called_used_idx = host.used_idx; if (need) call(); }
linux-master
tools/virtio/ringtest/ring.c
#define INORDER 1 #include "virtio_ring_0_9.c"
linux-master
tools/virtio/ringtest/virtio_ring_inorder.c
// SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include "main.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <pthread.h> #include <malloc.h> #include <assert.h> #include <errno.h> #include <limits.h> #define SMP_CACHE_BYTES 64 #define cache_line_size() SMP_CACHE_BYTES #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) #define unlikely(x) (__builtin_expect(!!(x), 0)) #define likely(x) (__builtin_expect(!!(x), 1)) #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) #define SIZE_MAX (~(size_t)0) #define KMALLOC_MAX_SIZE SIZE_MAX typedef pthread_spinlock_t spinlock_t; typedef int gfp_t; #define __GFP_ZERO 0x1 static void *kmalloc(unsigned size, gfp_t gfp) { void *p = memalign(64, size); if (!p) return p; if (gfp & __GFP_ZERO) memset(p, 0, size); return p; } static inline void *kzalloc(unsigned size, gfp_t flags) { return kmalloc(size, flags | __GFP_ZERO); } static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) { if (size != 0 && n > SIZE_MAX / size) return NULL; return kmalloc(n * size, flags); } static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { return kmalloc_array(n, size, flags | __GFP_ZERO); } static void kfree(void *p) { if (p) free(p); } #define kvmalloc_array kmalloc_array #define kvfree kfree static void spin_lock_init(spinlock_t *lock) { int r = pthread_spin_init(lock, 0); assert(!r); } static void spin_lock(spinlock_t *lock) { int ret = pthread_spin_lock(lock); assert(!ret); } static void spin_unlock(spinlock_t *lock) { int ret = pthread_spin_unlock(lock); assert(!ret); } static void spin_lock_bh(spinlock_t *lock) { spin_lock(lock); } static void spin_unlock_bh(spinlock_t *lock) { spin_unlock(lock); } static void spin_lock_irq(spinlock_t *lock) { spin_lock(lock); } static void spin_unlock_irq(spinlock_t *lock) { spin_unlock(lock); } static void spin_lock_irqsave(spinlock_t *lock, unsigned long f) { spin_lock(lock); } static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) { spin_unlock(lock); } #include "../../../include/linux/ptr_ring.h" static unsigned long long headcnt, tailcnt; static struct ptr_ring array ____cacheline_aligned_in_smp; /* implemented by ring */ void alloc_ring(void) { int ret = ptr_ring_init(&array, ring_size, 0); assert(!ret); /* Hacky way to poke at ring internals. Useful for testing though. */ if (param) array.batch = param; } /* guest side */ int add_inbuf(unsigned len, void *buf, void *datap) { int ret; ret = __ptr_ring_produce(&array, buf); if (ret >= 0) { ret = 0; headcnt++; } return ret; } /* * ptr_ring API provides no way for producer to find out whether a given * buffer was consumed. Our tests merely require that a successful get_buf * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, * fake it accordingly. */ void *get_buf(unsigned *lenp, void **bufp) { void *datap; if (tailcnt == headcnt || __ptr_ring_full(&array)) datap = NULL; else { datap = "Buffer\n"; ++tailcnt; } return datap; } bool used_empty() { return (tailcnt == headcnt || __ptr_ring_full(&array)); } void disable_call() { assert(0); } bool enable_call() { assert(0); } void kick_available(void) { assert(0); } /* host side */ void disable_kick() { assert(0); } bool enable_kick() { assert(0); } bool avail_empty() { return __ptr_ring_empty(&array); } bool use_buf(unsigned *lenp, void **bufp) { void *ptr; ptr = __ptr_ring_consume(&array); return ptr; } void call_used(void) { assert(0); }
linux-master
tools/virtio/ringtest/ptr_ring.c
#define RING_POLL 1 #include "virtio_ring_0_9.c"
linux-master
tools/virtio/ringtest/virtio_ring_poll.c
// SPDX-License-Identifier: GPL-2.0-only /* * An implementation of host to guest copy functionality for Linux. * * Copyright (C) 2014, Microsoft, Inc. * * Author : K. Y. Srinivasan <[email protected]> */ #include <sys/types.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <linux/hyperv.h> #include <linux/limits.h> #include <syslog.h> #include <sys/stat.h> #include <fcntl.h> #include <getopt.h> static int target_fd; static char target_fname[PATH_MAX]; static unsigned long long filesize; static int hv_start_fcopy(struct hv_start_fcopy *smsg) { int error = HV_E_FAIL; char *q, *p; filesize = 0; p = (char *)smsg->path_name; snprintf(target_fname, sizeof(target_fname), "%s/%s", (char *)smsg->path_name, (char *)smsg->file_name); syslog(LOG_INFO, "Target file name: %s", target_fname); /* * Check to see if the path is already in place; if not, * create if required. */ while ((q = strchr(p, '/')) != NULL) { if (q == p) { p++; continue; } *q = '\0'; if (access((char *)smsg->path_name, F_OK)) { if (smsg->copy_flags & CREATE_PATH) { if (mkdir((char *)smsg->path_name, 0755)) { syslog(LOG_ERR, "Failed to create %s", (char *)smsg->path_name); goto done; } } else { syslog(LOG_ERR, "Invalid path: %s", (char *)smsg->path_name); goto done; } } p = q + 1; *q = '/'; } if (!access(target_fname, F_OK)) { syslog(LOG_INFO, "File: %s exists", target_fname); if (!(smsg->copy_flags & OVER_WRITE)) { error = HV_ERROR_ALREADY_EXISTS; goto done; } } target_fd = open(target_fname, O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC, 0744); if (target_fd == -1) { syslog(LOG_INFO, "Open Failed: %s", strerror(errno)); goto done; } error = 0; done: if (error) target_fname[0] = '\0'; return error; } static int hv_copy_data(struct hv_do_fcopy *cpmsg) { ssize_t bytes_written; int ret = 0; bytes_written = pwrite(target_fd, cpmsg->data, cpmsg->size, cpmsg->offset); filesize += cpmsg->size; if (bytes_written != cpmsg->size) { switch (errno) { case ENOSPC: ret = HV_ERROR_DISK_FULL; break; default: ret = HV_E_FAIL; break; } syslog(LOG_ERR, "pwrite failed to write %llu bytes: %ld (%s)", filesize, (long)bytes_written, strerror(errno)); } return ret; } /* * Reset target_fname to "" in the two below functions for hibernation: if * the fcopy operation is aborted by hibernation, the daemon should remove the * partially-copied file; to achieve this, the hv_utils driver always fakes a * CANCEL_FCOPY message upon suspend, and later when the VM resumes back, * the daemon calls hv_copy_cancel() to remove the file; if a file is copied * successfully before suspend, hv_copy_finished() must reset target_fname to * avoid that the file can be incorrectly removed upon resume, since the faked * CANCEL_FCOPY message is spurious in this case. */ static int hv_copy_finished(void) { close(target_fd); target_fname[0] = '\0'; return 0; } static int hv_copy_cancel(void) { close(target_fd); if (strlen(target_fname) > 0) { unlink(target_fname); target_fname[0] = '\0'; } return 0; } void print_usage(char *argv[]) { fprintf(stderr, "Usage: %s [options]\n" "Options are:\n" " -n, --no-daemon stay in foreground, don't daemonize\n" " -h, --help print this help\n", argv[0]); } int main(int argc, char *argv[]) { int fcopy_fd = -1; int error; int daemonize = 1, long_index = 0, opt; int version = FCOPY_CURRENT_VERSION; union { struct hv_fcopy_hdr hdr; struct hv_start_fcopy start; struct hv_do_fcopy copy; __u32 kernel_modver; } buffer = { }; int in_handshake; static struct option long_options[] = { {"help", no_argument, 0, 'h' }, {"no-daemon", no_argument, 0, 'n' }, {0, 0, 0, 0 } }; while ((opt = getopt_long(argc, argv, "hn", long_options, &long_index)) != -1) { switch (opt) { case 'n': daemonize = 0; break; case 'h': default: print_usage(argv); exit(EXIT_FAILURE); } } if (daemonize && daemon(1, 0)) { syslog(LOG_ERR, "daemon() failed; error: %s", strerror(errno)); exit(EXIT_FAILURE); } openlog("HV_FCOPY", 0, LOG_USER); syslog(LOG_INFO, "starting; pid is:%d", getpid()); reopen_fcopy_fd: if (fcopy_fd != -1) close(fcopy_fd); /* Remove any possible partially-copied file on error */ hv_copy_cancel(); in_handshake = 1; fcopy_fd = open("/dev/vmbus/hv_fcopy", O_RDWR); if (fcopy_fd < 0) { syslog(LOG_ERR, "open /dev/vmbus/hv_fcopy failed; error: %d %s", errno, strerror(errno)); exit(EXIT_FAILURE); } /* * Register with the kernel. */ if ((write(fcopy_fd, &version, sizeof(int))) != sizeof(int)) { syslog(LOG_ERR, "Registration failed: %s", strerror(errno)); exit(EXIT_FAILURE); } while (1) { /* * In this loop we process fcopy messages after the * handshake is complete. */ ssize_t len; len = pread(fcopy_fd, &buffer, sizeof(buffer), 0); if (len < 0) { syslog(LOG_ERR, "pread failed: %s", strerror(errno)); goto reopen_fcopy_fd; } if (in_handshake) { if (len != sizeof(buffer.kernel_modver)) { syslog(LOG_ERR, "invalid version negotiation"); exit(EXIT_FAILURE); } in_handshake = 0; syslog(LOG_INFO, "kernel module version: %u", buffer.kernel_modver); continue; } switch (buffer.hdr.operation) { case START_FILE_COPY: error = hv_start_fcopy(&buffer.start); break; case WRITE_TO_FILE: error = hv_copy_data(&buffer.copy); break; case COMPLETE_FCOPY: error = hv_copy_finished(); break; case CANCEL_FCOPY: error = hv_copy_cancel(); break; default: error = HV_E_FAIL; syslog(LOG_ERR, "Unknown operation: %d", buffer.hdr.operation); } /* * pwrite() may return an error due to the faked CANCEL_FCOPY * message upon hibernation. Ignore the error by resetting the * dev file, i.e. closing and re-opening it. */ if (pwrite(fcopy_fd, &error, sizeof(int), 0) != sizeof(int)) { syslog(LOG_ERR, "pwrite failed: %s", strerror(errno)); goto reopen_fcopy_fd; } } }
linux-master
tools/hv/hv_fcopy_daemon.c
// SPDX-License-Identifier: GPL-2.0-only /* * An implementation of the host initiated guest snapshot for Hyper-V. * * Copyright (C) 2013, Microsoft, Inc. * Author : K. Y. Srinivasan <[email protected]> */ #include <sys/types.h> #include <sys/poll.h> #include <sys/ioctl.h> #include <sys/stat.h> #include <sys/sysmacros.h> #include <fcntl.h> #include <stdio.h> #include <mntent.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <ctype.h> #include <errno.h> #include <linux/fs.h> #include <linux/major.h> #include <linux/hyperv.h> #include <syslog.h> #include <getopt.h> #include <stdbool.h> #include <dirent.h> static bool fs_frozen; /* Don't use syslog() in the function since that can cause write to disk */ static int vss_do_freeze(char *dir, unsigned int cmd) { int ret, fd = open(dir, O_RDONLY); if (fd < 0) return 1; ret = ioctl(fd, cmd, 0); /* * If a partition is mounted more than once, only the first * FREEZE/THAW can succeed and the later ones will get * EBUSY/EINVAL respectively: there could be 2 cases: * 1) a user may mount the same partition to different directories * by mistake or on purpose; * 2) The subvolume of btrfs appears to have the same partition * mounted more than once. */ if (ret) { if ((cmd == FIFREEZE && errno == EBUSY) || (cmd == FITHAW && errno == EINVAL)) { close(fd); return 0; } } close(fd); return !!ret; } static bool is_dev_loop(const char *blkname) { char *buffer; DIR *dir; struct dirent *entry; bool ret = false; buffer = malloc(PATH_MAX); if (!buffer) { syslog(LOG_ERR, "Can't allocate memory!"); exit(1); } snprintf(buffer, PATH_MAX, "%s/loop", blkname); if (!access(buffer, R_OK | X_OK)) { ret = true; goto free_buffer; } else if (errno != ENOENT) { syslog(LOG_ERR, "Can't access: %s; error:%d %s!", buffer, errno, strerror(errno)); } snprintf(buffer, PATH_MAX, "%s/slaves", blkname); dir = opendir(buffer); if (!dir) { if (errno != ENOENT) syslog(LOG_ERR, "Can't opendir: %s; error:%d %s!", buffer, errno, strerror(errno)); goto free_buffer; } while ((entry = readdir(dir)) != NULL) { if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) continue; snprintf(buffer, PATH_MAX, "%s/slaves/%s", blkname, entry->d_name); if (is_dev_loop(buffer)) { ret = true; break; } } closedir(dir); free_buffer: free(buffer); return ret; } static int vss_operate(int operation) { char match[] = "/dev/"; FILE *mounts; struct mntent *ent; struct stat sb; char errdir[1024] = {0}; char blkdir[23]; /* /sys/dev/block/XXX:XXX */ unsigned int cmd; int error = 0, root_seen = 0, save_errno = 0; switch (operation) { case VSS_OP_FREEZE: cmd = FIFREEZE; break; case VSS_OP_THAW: cmd = FITHAW; break; default: return -1; } mounts = setmntent("/proc/mounts", "r"); if (mounts == NULL) return -1; while ((ent = getmntent(mounts))) { if (strncmp(ent->mnt_fsname, match, strlen(match))) continue; if (stat(ent->mnt_fsname, &sb)) { syslog(LOG_ERR, "Can't stat: %s; error:%d %s!", ent->mnt_fsname, errno, strerror(errno)); } else { sprintf(blkdir, "/sys/dev/block/%d:%d", major(sb.st_rdev), minor(sb.st_rdev)); if (is_dev_loop(blkdir)) continue; } if (hasmntopt(ent, MNTOPT_RO) != NULL) continue; if (strcmp(ent->mnt_type, "vfat") == 0) continue; if (strcmp(ent->mnt_dir, "/") == 0) { root_seen = 1; continue; } error |= vss_do_freeze(ent->mnt_dir, cmd); if (operation == VSS_OP_FREEZE) { if (error) goto err; fs_frozen = true; } } endmntent(mounts); if (root_seen) { error |= vss_do_freeze("/", cmd); if (operation == VSS_OP_FREEZE) { if (error) goto err; fs_frozen = true; } } if (operation == VSS_OP_THAW && !error) fs_frozen = false; goto out; err: save_errno = errno; if (ent) { strncpy(errdir, ent->mnt_dir, sizeof(errdir)-1); endmntent(mounts); } vss_operate(VSS_OP_THAW); fs_frozen = false; /* Call syslog after we thaw all filesystems */ if (ent) syslog(LOG_ERR, "FREEZE of %s failed; error:%d %s", errdir, save_errno, strerror(save_errno)); else syslog(LOG_ERR, "FREEZE of / failed; error:%d %s", save_errno, strerror(save_errno)); out: return error; } void print_usage(char *argv[]) { fprintf(stderr, "Usage: %s [options]\n" "Options are:\n" " -n, --no-daemon stay in foreground, don't daemonize\n" " -h, --help print this help\n", argv[0]); } int main(int argc, char *argv[]) { int vss_fd = -1, len; int error; struct pollfd pfd; int op; struct hv_vss_msg vss_msg[1]; int daemonize = 1, long_index = 0, opt; int in_handshake; __u32 kernel_modver; static struct option long_options[] = { {"help", no_argument, 0, 'h' }, {"no-daemon", no_argument, 0, 'n' }, {0, 0, 0, 0 } }; while ((opt = getopt_long(argc, argv, "hn", long_options, &long_index)) != -1) { switch (opt) { case 'n': daemonize = 0; break; case 'h': print_usage(argv); exit(0); default: print_usage(argv); exit(EXIT_FAILURE); } } if (daemonize && daemon(1, 0)) return 1; openlog("Hyper-V VSS", 0, LOG_USER); syslog(LOG_INFO, "VSS starting; pid is:%d", getpid()); reopen_vss_fd: if (vss_fd != -1) close(vss_fd); if (fs_frozen) { if (vss_operate(VSS_OP_THAW) || fs_frozen) { syslog(LOG_ERR, "failed to thaw file system: err=%d", errno); exit(EXIT_FAILURE); } } in_handshake = 1; vss_fd = open("/dev/vmbus/hv_vss", O_RDWR); if (vss_fd < 0) { syslog(LOG_ERR, "open /dev/vmbus/hv_vss failed; error: %d %s", errno, strerror(errno)); exit(EXIT_FAILURE); } /* * Register ourselves with the kernel. */ vss_msg->vss_hdr.operation = VSS_OP_REGISTER1; len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg)); if (len < 0) { syslog(LOG_ERR, "registration to kernel failed; error: %d %s", errno, strerror(errno)); close(vss_fd); exit(EXIT_FAILURE); } pfd.fd = vss_fd; while (1) { pfd.events = POLLIN; pfd.revents = 0; if (poll(&pfd, 1, -1) < 0) { syslog(LOG_ERR, "poll failed; error:%d %s", errno, strerror(errno)); if (errno == EINVAL) { close(vss_fd); exit(EXIT_FAILURE); } else continue; } len = read(vss_fd, vss_msg, sizeof(struct hv_vss_msg)); if (in_handshake) { if (len != sizeof(kernel_modver)) { syslog(LOG_ERR, "invalid version negotiation"); exit(EXIT_FAILURE); } kernel_modver = *(__u32 *)vss_msg; in_handshake = 0; syslog(LOG_INFO, "VSS: kernel module version: %d", kernel_modver); continue; } if (len != sizeof(struct hv_vss_msg)) { syslog(LOG_ERR, "read failed; error:%d %s", errno, strerror(errno)); goto reopen_vss_fd; } op = vss_msg->vss_hdr.operation; error = HV_S_OK; switch (op) { case VSS_OP_FREEZE: case VSS_OP_THAW: error = vss_operate(op); syslog(LOG_INFO, "VSS: op=%s: %s\n", op == VSS_OP_FREEZE ? "FREEZE" : "THAW", error ? "failed" : "succeeded"); if (error) { error = HV_E_FAIL; syslog(LOG_ERR, "op=%d failed!", op); syslog(LOG_ERR, "report it with these files:"); syslog(LOG_ERR, "/etc/fstab and /proc/mounts"); } break; case VSS_OP_HOT_BACKUP: syslog(LOG_INFO, "VSS: op=CHECK HOT BACKUP\n"); break; default: syslog(LOG_ERR, "Illegal op:%d\n", op); } /* * The write() may return an error due to the faked VSS_OP_THAW * message upon hibernation. Ignore the error by resetting the * dev file, i.e. closing and re-opening it. */ vss_msg->error = error; len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg)); if (len != sizeof(struct hv_vss_msg)) { syslog(LOG_ERR, "write failed; error: %d %s", errno, strerror(errno)); goto reopen_vss_fd; } } close(vss_fd); exit(0); }
linux-master
tools/hv/hv_vss_daemon.c
/* * An implementation of key value pair (KVP) functionality for Linux. * * * Copyright (C) 2010, Novell, Inc. * Author : K. Y. Srinivasan <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <sys/poll.h> #include <sys/utsname.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <ctype.h> #include <errno.h> #include <arpa/inet.h> #include <linux/hyperv.h> #include <ifaddrs.h> #include <netdb.h> #include <syslog.h> #include <sys/stat.h> #include <fcntl.h> #include <dirent.h> #include <net/if.h> #include <limits.h> #include <getopt.h> /* * KVP protocol: The user mode component first registers with the * kernel component. Subsequently, the kernel component requests, data * for the specified keys. In response to this message the user mode component * fills in the value corresponding to the specified key. We overload the * sequence field in the cn_msg header to define our KVP message types. * * We use this infrastructure for also supporting queries from user mode * application for state that may be maintained in the KVP kernel component. * */ enum key_index { FullyQualifiedDomainName = 0, IntegrationServicesVersion, /*This key is serviced in the kernel*/ NetworkAddressIPv4, NetworkAddressIPv6, OSBuildNumber, OSName, OSMajorVersion, OSMinorVersion, OSVersion, ProcessorArchitecture }; enum { IPADDR = 0, NETMASK, GATEWAY, DNS }; static int in_hand_shake; static char *os_name = ""; static char *os_major = ""; static char *os_minor = ""; static char *processor_arch; static char *os_build; static char *os_version; static char *lic_version = "Unknown version"; static char full_domain_name[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; static struct utsname uts_buf; /* * The location of the interface configuration file. */ #define KVP_CONFIG_LOC "/var/lib/hyperv" #ifndef KVP_SCRIPTS_PATH #define KVP_SCRIPTS_PATH "/usr/libexec/hypervkvpd/" #endif #define KVP_NET_DIR "/sys/class/net/" #define MAX_FILE_NAME 100 #define ENTRIES_PER_BLOCK 50 struct kvp_record { char key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; char value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; }; struct kvp_file_state { int fd; int num_blocks; struct kvp_record *records; int num_records; char fname[MAX_FILE_NAME]; }; static struct kvp_file_state kvp_file_info[KVP_POOL_COUNT]; static void kvp_acquire_lock(int pool) { struct flock fl = {F_WRLCK, SEEK_SET, 0, 0, 0}; fl.l_pid = getpid(); if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { syslog(LOG_ERR, "Failed to acquire the lock pool: %d; error: %d %s", pool, errno, strerror(errno)); exit(EXIT_FAILURE); } } static void kvp_release_lock(int pool) { struct flock fl = {F_UNLCK, SEEK_SET, 0, 0, 0}; fl.l_pid = getpid(); if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { syslog(LOG_ERR, "Failed to release the lock pool: %d; error: %d %s", pool, errno, strerror(errno)); exit(EXIT_FAILURE); } } static void kvp_update_file(int pool) { FILE *filep; /* * We are going to write our in-memory registry out to * disk; acquire the lock first. */ kvp_acquire_lock(pool); filep = fopen(kvp_file_info[pool].fname, "we"); if (!filep) { syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool, errno, strerror(errno)); kvp_release_lock(pool); exit(EXIT_FAILURE); } fwrite(kvp_file_info[pool].records, sizeof(struct kvp_record), kvp_file_info[pool].num_records, filep); if (ferror(filep) || fclose(filep)) { kvp_release_lock(pool); syslog(LOG_ERR, "Failed to write file, pool: %d", pool); exit(EXIT_FAILURE); } kvp_release_lock(pool); } static void kvp_update_mem_state(int pool) { FILE *filep; size_t records_read = 0; struct kvp_record *record = kvp_file_info[pool].records; struct kvp_record *readp; int num_blocks = kvp_file_info[pool].num_blocks; int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; kvp_acquire_lock(pool); filep = fopen(kvp_file_info[pool].fname, "re"); if (!filep) { syslog(LOG_ERR, "Failed to open file, pool: %d; error: %d %s", pool, errno, strerror(errno)); kvp_release_lock(pool); exit(EXIT_FAILURE); } for (;;) { readp = &record[records_read]; records_read += fread(readp, sizeof(struct kvp_record), ENTRIES_PER_BLOCK * num_blocks - records_read, filep); if (ferror(filep)) { syslog(LOG_ERR, "Failed to read file, pool: %d; error: %d %s", pool, errno, strerror(errno)); kvp_release_lock(pool); exit(EXIT_FAILURE); } if (!feof(filep)) { /* * We have more data to read. */ num_blocks++; record = realloc(record, alloc_unit * num_blocks); if (record == NULL) { syslog(LOG_ERR, "malloc failed"); kvp_release_lock(pool); exit(EXIT_FAILURE); } continue; } break; } kvp_file_info[pool].num_blocks = num_blocks; kvp_file_info[pool].records = record; kvp_file_info[pool].num_records = records_read; fclose(filep); kvp_release_lock(pool); } static int kvp_file_init(void) { int fd; char *fname; int i; int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; if (access(KVP_CONFIG_LOC, F_OK)) { if (mkdir(KVP_CONFIG_LOC, 0755 /* rwxr-xr-x */)) { syslog(LOG_ERR, "Failed to create '%s'; error: %d %s", KVP_CONFIG_LOC, errno, strerror(errno)); exit(EXIT_FAILURE); } } for (i = 0; i < KVP_POOL_COUNT; i++) { fname = kvp_file_info[i].fname; sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i); fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */); if (fd == -1) return 1; kvp_file_info[i].fd = fd; kvp_file_info[i].num_blocks = 1; kvp_file_info[i].records = malloc(alloc_unit); if (kvp_file_info[i].records == NULL) return 1; kvp_file_info[i].num_records = 0; kvp_update_mem_state(i); } return 0; } static int kvp_key_delete(int pool, const __u8 *key, int key_size) { int i; int j, k; int num_records; struct kvp_record *record; /* * First update the in-memory state. */ kvp_update_mem_state(pool); num_records = kvp_file_info[pool].num_records; record = kvp_file_info[pool].records; for (i = 0; i < num_records; i++) { if (memcmp(key, record[i].key, key_size)) continue; /* * Found a match; just move the remaining * entries up. */ if (i == (num_records - 1)) { kvp_file_info[pool].num_records--; kvp_update_file(pool); return 0; } j = i; k = j + 1; for (; k < num_records; k++) { strcpy(record[j].key, record[k].key); strcpy(record[j].value, record[k].value); j++; } kvp_file_info[pool].num_records--; kvp_update_file(pool); return 0; } return 1; } static int kvp_key_add_or_modify(int pool, const __u8 *key, int key_size, const __u8 *value, int value_size) { int i; int num_records; struct kvp_record *record; int num_blocks; if ((key_size > HV_KVP_EXCHANGE_MAX_KEY_SIZE) || (value_size > HV_KVP_EXCHANGE_MAX_VALUE_SIZE)) return 1; /* * First update the in-memory state. */ kvp_update_mem_state(pool); num_records = kvp_file_info[pool].num_records; record = kvp_file_info[pool].records; num_blocks = kvp_file_info[pool].num_blocks; for (i = 0; i < num_records; i++) { if (memcmp(key, record[i].key, key_size)) continue; /* * Found a match; just update the value - * this is the modify case. */ memcpy(record[i].value, value, value_size); kvp_update_file(pool); return 0; } /* * Need to add a new entry; */ if (num_records == (ENTRIES_PER_BLOCK * num_blocks)) { /* Need to allocate a larger array for reg entries. */ record = realloc(record, sizeof(struct kvp_record) * ENTRIES_PER_BLOCK * (num_blocks + 1)); if (record == NULL) return 1; kvp_file_info[pool].num_blocks++; } memcpy(record[i].value, value, value_size); memcpy(record[i].key, key, key_size); kvp_file_info[pool].records = record; kvp_file_info[pool].num_records++; kvp_update_file(pool); return 0; } static int kvp_get_value(int pool, const __u8 *key, int key_size, __u8 *value, int value_size) { int i; int num_records; struct kvp_record *record; if ((key_size > HV_KVP_EXCHANGE_MAX_KEY_SIZE) || (value_size > HV_KVP_EXCHANGE_MAX_VALUE_SIZE)) return 1; /* * First update the in-memory state. */ kvp_update_mem_state(pool); num_records = kvp_file_info[pool].num_records; record = kvp_file_info[pool].records; for (i = 0; i < num_records; i++) { if (memcmp(key, record[i].key, key_size)) continue; /* * Found a match; just copy the value out. */ memcpy(value, record[i].value, value_size); return 0; } return 1; } static int kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size, __u8 *value, int value_size) { struct kvp_record *record; /* * First update our in-memory database. */ kvp_update_mem_state(pool); record = kvp_file_info[pool].records; if (index >= kvp_file_info[pool].num_records) { return 1; } memcpy(key, record[index].key, key_size); memcpy(value, record[index].value, value_size); return 0; } void kvp_get_os_info(void) { FILE *file; char *p, buf[512]; uname(&uts_buf); os_version = uts_buf.release; os_build = strdup(uts_buf.release); os_name = uts_buf.sysname; processor_arch = uts_buf.machine; /* * The current windows host (win7) expects the build * string to be of the form: x.y.z * Strip additional information we may have. */ p = strchr(os_version, '-'); if (p) *p = '\0'; /* * Parse the /etc/os-release file if present: * https://www.freedesktop.org/software/systemd/man/os-release.html */ file = fopen("/etc/os-release", "r"); if (file != NULL) { while (fgets(buf, sizeof(buf), file)) { char *value, *q; /* Ignore comments */ if (buf[0] == '#') continue; /* Split into name=value */ p = strchr(buf, '='); if (!p) continue; *p++ = 0; /* Remove quotes and newline; un-escape */ value = p; q = p; while (*p) { if (*p == '\\') { ++p; if (!*p) break; *q++ = *p++; } else if (*p == '\'' || *p == '"' || *p == '\n') { ++p; } else { *q++ = *p++; } } *q = 0; if (!strcmp(buf, "NAME")) { p = strdup(value); if (!p) break; os_name = p; } else if (!strcmp(buf, "VERSION_ID")) { p = strdup(value); if (!p) break; os_major = p; } } fclose(file); return; } /* Fallback for older RH/SUSE releases */ file = fopen("/etc/SuSE-release", "r"); if (file != NULL) goto kvp_osinfo_found; file = fopen("/etc/redhat-release", "r"); if (file != NULL) goto kvp_osinfo_found; /* * We don't have information about the os. */ return; kvp_osinfo_found: /* up to three lines */ p = fgets(buf, sizeof(buf), file); if (p) { p = strchr(buf, '\n'); if (p) *p = '\0'; p = strdup(buf); if (!p) goto done; os_name = p; /* second line */ p = fgets(buf, sizeof(buf), file); if (p) { p = strchr(buf, '\n'); if (p) *p = '\0'; p = strdup(buf); if (!p) goto done; os_major = p; /* third line */ p = fgets(buf, sizeof(buf), file); if (p) { p = strchr(buf, '\n'); if (p) *p = '\0'; p = strdup(buf); if (p) os_minor = p; } } } done: fclose(file); return; } /* * Retrieve an interface name corresponding to the specified guid. * If there is a match, the function returns a pointer * to the interface name and if not, a NULL is returned. * If a match is found, the caller is responsible for * freeing the memory. */ static char *kvp_get_if_name(char *guid) { DIR *dir; struct dirent *entry; FILE *file; char *p, *x; char *if_name = NULL; char buf[256]; char dev_id[PATH_MAX]; dir = opendir(KVP_NET_DIR); if (dir == NULL) return NULL; while ((entry = readdir(dir)) != NULL) { /* * Set the state for the next pass. */ snprintf(dev_id, sizeof(dev_id), "%s%s/device/device_id", KVP_NET_DIR, entry->d_name); file = fopen(dev_id, "r"); if (file == NULL) continue; p = fgets(buf, sizeof(buf), file); if (p) { x = strchr(p, '\n'); if (x) *x = '\0'; if (!strcmp(p, guid)) { /* * Found the guid match; return the interface * name. The caller will free the memory. */ if_name = strdup(entry->d_name); fclose(file); break; } } fclose(file); } closedir(dir); return if_name; } /* * Retrieve the MAC address given the interface name. */ static char *kvp_if_name_to_mac(char *if_name) { FILE *file; char *p, *x; char buf[256]; char addr_file[PATH_MAX]; unsigned int i; char *mac_addr = NULL; snprintf(addr_file, sizeof(addr_file), "%s%s%s", KVP_NET_DIR, if_name, "/address"); file = fopen(addr_file, "r"); if (file == NULL) return NULL; p = fgets(buf, sizeof(buf), file); if (p) { x = strchr(p, '\n'); if (x) *x = '\0'; for (i = 0; i < strlen(p); i++) p[i] = toupper(p[i]); mac_addr = strdup(p); } fclose(file); return mac_addr; } static void kvp_process_ipconfig_file(char *cmd, char *config_buf, unsigned int len, int element_size, int offset) { char buf[256]; char *p; char *x; FILE *file; /* * First execute the command. */ file = popen(cmd, "r"); if (file == NULL) return; if (offset == 0) memset(config_buf, 0, len); while ((p = fgets(buf, sizeof(buf), file)) != NULL) { if (len < strlen(config_buf) + element_size + 1) break; x = strchr(p, '\n'); if (x) *x = '\0'; strcat(config_buf, p); strcat(config_buf, ";"); } pclose(file); } static void kvp_get_ipconfig_info(char *if_name, struct hv_kvp_ipaddr_value *buffer) { char cmd[512]; char dhcp_info[128]; char *p; FILE *file; /* * Get the address of default gateway (ipv4). */ sprintf(cmd, "%s %s", "ip route show dev", if_name); strcat(cmd, " | awk '/default/ {print $3 }'"); /* * Execute the command to gather gateway info. */ kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way, (MAX_GATEWAY_SIZE * 2), INET_ADDRSTRLEN, 0); /* * Get the address of default gateway (ipv6). */ sprintf(cmd, "%s %s", "ip -f inet6 route show dev", if_name); strcat(cmd, " | awk '/default/ {print $3 }'"); /* * Execute the command to gather gateway info (ipv6). */ kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way, (MAX_GATEWAY_SIZE * 2), INET6_ADDRSTRLEN, 1); /* * Gather the DNS state. * Since there is no standard way to get this information * across various distributions of interest; we just invoke * an external script that needs to be ported across distros * of interest. * * Following is the expected format of the information from the script: * * ipaddr1 (nameserver1) * ipaddr2 (nameserver2) * . * . */ sprintf(cmd, KVP_SCRIPTS_PATH "%s", "hv_get_dns_info"); /* * Execute the command to gather DNS info. */ kvp_process_ipconfig_file(cmd, (char *)buffer->dns_addr, (MAX_IP_ADDR_SIZE * 2), INET_ADDRSTRLEN, 0); /* * Gather the DHCP state. * We will gather this state by invoking an external script. * The parameter to the script is the interface name. * Here is the expected output: * * Enabled: DHCP enabled. */ sprintf(cmd, KVP_SCRIPTS_PATH "%s %s", "hv_get_dhcp_info", if_name); file = popen(cmd, "r"); if (file == NULL) return; p = fgets(dhcp_info, sizeof(dhcp_info), file); if (p == NULL) { pclose(file); return; } if (!strncmp(p, "Enabled", 7)) buffer->dhcp_enabled = 1; else buffer->dhcp_enabled = 0; pclose(file); } static unsigned int hweight32(unsigned int *w) { unsigned int res = *w - ((*w >> 1) & 0x55555555); res = (res & 0x33333333) + ((res >> 2) & 0x33333333); res = (res + (res >> 4)) & 0x0F0F0F0F; res = res + (res >> 8); return (res + (res >> 16)) & 0x000000FF; } static int kvp_process_ip_address(void *addrp, int family, char *buffer, int length, int *offset) { struct sockaddr_in *addr; struct sockaddr_in6 *addr6; int addr_length; char tmp[50]; const char *str; if (family == AF_INET) { addr = addrp; str = inet_ntop(family, &addr->sin_addr, tmp, 50); addr_length = INET_ADDRSTRLEN; } else { addr6 = addrp; str = inet_ntop(family, &addr6->sin6_addr.s6_addr, tmp, 50); addr_length = INET6_ADDRSTRLEN; } if ((length - *offset) < addr_length + 2) return HV_E_FAIL; if (str == NULL) { strcpy(buffer, "inet_ntop failed\n"); return HV_E_FAIL; } if (*offset == 0) strcpy(buffer, tmp); else { strcat(buffer, ";"); strcat(buffer, tmp); } *offset += strlen(str) + 1; return 0; } static int kvp_get_ip_info(int family, char *if_name, int op, void *out_buffer, unsigned int length) { struct ifaddrs *ifap; struct ifaddrs *curp; int offset = 0; int sn_offset = 0; int error = 0; char *buffer; struct hv_kvp_ipaddr_value *ip_buffer = NULL; char cidr_mask[5]; /* /xyz */ int weight; int i; unsigned int *w; char *sn_str; struct sockaddr_in6 *addr6; if (op == KVP_OP_ENUMERATE) { buffer = out_buffer; } else { ip_buffer = out_buffer; buffer = (char *)ip_buffer->ip_addr; ip_buffer->addr_family = 0; } /* * On entry into this function, the buffer is capable of holding the * maximum key value. */ if (getifaddrs(&ifap)) { strcpy(buffer, "getifaddrs failed\n"); return HV_E_FAIL; } curp = ifap; while (curp != NULL) { if (curp->ifa_addr == NULL) { curp = curp->ifa_next; continue; } if ((if_name != NULL) && (strncmp(curp->ifa_name, if_name, strlen(if_name)))) { /* * We want info about a specific interface; * just continue. */ curp = curp->ifa_next; continue; } /* * We only support two address families: AF_INET and AF_INET6. * If a family value of 0 is specified, we collect both * supported address families; if not we gather info on * the specified address family. */ if ((((family != 0) && (curp->ifa_addr->sa_family != family))) || (curp->ifa_flags & IFF_LOOPBACK)) { curp = curp->ifa_next; continue; } if ((curp->ifa_addr->sa_family != AF_INET) && (curp->ifa_addr->sa_family != AF_INET6)) { curp = curp->ifa_next; continue; } if (op == KVP_OP_GET_IP_INFO) { /* * Gather info other than the IP address. * IP address info will be gathered later. */ if (curp->ifa_addr->sa_family == AF_INET) { ip_buffer->addr_family |= ADDR_FAMILY_IPV4; /* * Get subnet info. */ error = kvp_process_ip_address( curp->ifa_netmask, AF_INET, (char *) ip_buffer->sub_net, length, &sn_offset); if (error) goto gather_ipaddr; } else { ip_buffer->addr_family |= ADDR_FAMILY_IPV6; /* * Get subnet info in CIDR format. */ weight = 0; sn_str = (char *)ip_buffer->sub_net; addr6 = (struct sockaddr_in6 *) curp->ifa_netmask; w = addr6->sin6_addr.s6_addr32; for (i = 0; i < 4; i++) weight += hweight32(&w[i]); sprintf(cidr_mask, "/%d", weight); if (length < sn_offset + strlen(cidr_mask) + 1) goto gather_ipaddr; if (sn_offset == 0) strcpy(sn_str, cidr_mask); else { strcat((char *)ip_buffer->sub_net, ";"); strcat(sn_str, cidr_mask); } sn_offset += strlen(sn_str) + 1; } /* * Collect other ip related configuration info. */ kvp_get_ipconfig_info(if_name, ip_buffer); } gather_ipaddr: error = kvp_process_ip_address(curp->ifa_addr, curp->ifa_addr->sa_family, buffer, length, &offset); if (error) goto getaddr_done; curp = curp->ifa_next; } getaddr_done: freeifaddrs(ifap); return error; } /* * Retrieve the IP given the MAC address. */ static int kvp_mac_to_ip(struct hv_kvp_ipaddr_value *kvp_ip_val) { char *mac = (char *)kvp_ip_val->adapter_id; DIR *dir; struct dirent *entry; FILE *file; char *p, *x; char *if_name = NULL; char buf[256]; char dev_id[PATH_MAX]; unsigned int i; int error = HV_E_FAIL; dir = opendir(KVP_NET_DIR); if (dir == NULL) return HV_E_FAIL; while ((entry = readdir(dir)) != NULL) { /* * Set the state for the next pass. */ snprintf(dev_id, sizeof(dev_id), "%s%s/address", KVP_NET_DIR, entry->d_name); file = fopen(dev_id, "r"); if (file == NULL) continue; p = fgets(buf, sizeof(buf), file); fclose(file); if (!p) continue; x = strchr(p, '\n'); if (x) *x = '\0'; for (i = 0; i < strlen(p); i++) p[i] = toupper(p[i]); if (strcmp(p, mac)) continue; /* * Found the MAC match. * A NIC (e.g. VF) matching the MAC, but without IP, is skipped. */ if_name = entry->d_name; if (!if_name) continue; error = kvp_get_ip_info(0, if_name, KVP_OP_GET_IP_INFO, kvp_ip_val, MAX_IP_ADDR_SIZE * 2); if (!error && strlen((char *)kvp_ip_val->ip_addr)) break; } closedir(dir); return error; } static int expand_ipv6(char *addr, int type) { int ret; struct in6_addr v6_addr; ret = inet_pton(AF_INET6, addr, &v6_addr); if (ret != 1) { if (type == NETMASK) return 1; return 0; } sprintf(addr, "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:" "%02x%02x:%02x%02x:%02x%02x", (int)v6_addr.s6_addr[0], (int)v6_addr.s6_addr[1], (int)v6_addr.s6_addr[2], (int)v6_addr.s6_addr[3], (int)v6_addr.s6_addr[4], (int)v6_addr.s6_addr[5], (int)v6_addr.s6_addr[6], (int)v6_addr.s6_addr[7], (int)v6_addr.s6_addr[8], (int)v6_addr.s6_addr[9], (int)v6_addr.s6_addr[10], (int)v6_addr.s6_addr[11], (int)v6_addr.s6_addr[12], (int)v6_addr.s6_addr[13], (int)v6_addr.s6_addr[14], (int)v6_addr.s6_addr[15]); return 1; } static int is_ipv4(char *addr) { int ret; struct in_addr ipv4_addr; ret = inet_pton(AF_INET, addr, &ipv4_addr); if (ret == 1) return 1; return 0; } static int parse_ip_val_buffer(char *in_buf, int *offset, char *out_buf, int out_len) { char *x; char *start; /* * in_buf has sequence of characters that are separated by * the character ';'. The last sequence does not have the * terminating ";" character. */ start = in_buf + *offset; x = strchr(start, ';'); if (x) *x = 0; else x = start + strlen(start); if (strlen(start) != 0) { int i = 0; /* * Get rid of leading spaces. */ while (start[i] == ' ') i++; if ((x - start) <= out_len) { strcpy(out_buf, (start + i)); *offset += (x - start) + 1; return 1; } } return 0; } static int kvp_write_file(FILE *f, char *s1, char *s2, char *s3) { int ret; ret = fprintf(f, "%s%s%s%s\n", s1, s2, "=", s3); if (ret < 0) return HV_E_FAIL; return 0; } static int process_ip_string(FILE *f, char *ip_string, int type) { int error = 0; char addr[INET6_ADDRSTRLEN]; int i = 0; int j = 0; char str[256]; char sub_str[13]; int offset = 0; memset(addr, 0, sizeof(addr)); while (parse_ip_val_buffer(ip_string, &offset, addr, (MAX_IP_ADDR_SIZE * 2))) { sub_str[0] = 0; if (is_ipv4(addr)) { switch (type) { case IPADDR: snprintf(str, sizeof(str), "%s", "IPADDR"); break; case NETMASK: snprintf(str, sizeof(str), "%s", "NETMASK"); break; case GATEWAY: snprintf(str, sizeof(str), "%s", "GATEWAY"); break; case DNS: snprintf(str, sizeof(str), "%s", "DNS"); break; } if (type == DNS) { snprintf(sub_str, sizeof(sub_str), "%d", ++i); } else if (type == GATEWAY && i == 0) { ++i; } else { snprintf(sub_str, sizeof(sub_str), "%d", i++); } } else if (expand_ipv6(addr, type)) { switch (type) { case IPADDR: snprintf(str, sizeof(str), "%s", "IPV6ADDR"); break; case NETMASK: snprintf(str, sizeof(str), "%s", "IPV6NETMASK"); break; case GATEWAY: snprintf(str, sizeof(str), "%s", "IPV6_DEFAULTGW"); break; case DNS: snprintf(str, sizeof(str), "%s", "DNS"); break; } if (type == DNS) { snprintf(sub_str, sizeof(sub_str), "%d", ++i); } else if (j == 0) { ++j; } else { snprintf(sub_str, sizeof(sub_str), "_%d", j++); } } else { return HV_INVALIDARG; } error = kvp_write_file(f, str, sub_str, addr); if (error) return error; memset(addr, 0, sizeof(addr)); } return 0; } static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val) { int error = 0; char if_file[PATH_MAX]; FILE *file; char cmd[PATH_MAX]; char *mac_addr; int str_len; /* * Set the configuration for the specified interface with * the information provided. Since there is no standard * way to configure an interface, we will have an external * script that does the job of configuring the interface and * flushing the configuration. * * The parameters passed to this external script are: * 1. A configuration file that has the specified configuration. * * We will embed the name of the interface in the configuration * file: ifcfg-ethx (where ethx is the interface name). * * The information provided here may be more than what is needed * in a given distro to configure the interface and so are free * ignore information that may not be relevant. * * Here is the format of the ip configuration file: * * HWADDR=macaddr * DEVICE=interface name * BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured * or "none" if no boot-time protocol should be used) * * IPADDR0=ipaddr1 * IPADDR1=ipaddr2 * IPADDRx=ipaddry (where y = x + 1) * * NETMASK0=netmask1 * NETMASKx=netmasky (where y = x + 1) * * GATEWAY=ipaddr1 * GATEWAYx=ipaddry (where y = x + 1) * * DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc) * * IPV6 addresses will be tagged as IPV6ADDR, IPV6 gateway will be * tagged as IPV6_DEFAULTGW and IPV6 NETMASK will be tagged as * IPV6NETMASK. * * The host can specify multiple ipv4 and ipv6 addresses to be * configured for the interface. Furthermore, the configuration * needs to be persistent. A subsequent GET call on the interface * is expected to return the configuration that is set via the SET * call. */ snprintf(if_file, sizeof(if_file), "%s%s%s", KVP_CONFIG_LOC, "/ifcfg-", if_name); file = fopen(if_file, "w"); if (file == NULL) { syslog(LOG_ERR, "Failed to open config file; error: %d %s", errno, strerror(errno)); return HV_E_FAIL; } /* * First write out the MAC address. */ mac_addr = kvp_if_name_to_mac(if_name); if (mac_addr == NULL) { error = HV_E_FAIL; goto setval_error; } error = kvp_write_file(file, "HWADDR", "", mac_addr); free(mac_addr); if (error) goto setval_error; error = kvp_write_file(file, "DEVICE", "", if_name); if (error) goto setval_error; /* * The dhcp_enabled flag is only for IPv4. In the case the host only * injects an IPv6 address, the flag is true, but we still need to * proceed to parse and pass the IPv6 information to the * disto-specific script hv_set_ifconfig. */ if (new_val->dhcp_enabled) { error = kvp_write_file(file, "BOOTPROTO", "", "dhcp"); if (error) goto setval_error; } else { error = kvp_write_file(file, "BOOTPROTO", "", "none"); if (error) goto setval_error; } /* * Write the configuration for ipaddress, netmask, gateway and * name servers. */ error = process_ip_string(file, (char *)new_val->ip_addr, IPADDR); if (error) goto setval_error; error = process_ip_string(file, (char *)new_val->sub_net, NETMASK); if (error) goto setval_error; error = process_ip_string(file, (char *)new_val->gate_way, GATEWAY); if (error) goto setval_error; error = process_ip_string(file, (char *)new_val->dns_addr, DNS); if (error) goto setval_error; fclose(file); /* * Now that we have populated the configuration file, * invoke the external script to do its magic. */ str_len = snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s", "hv_set_ifconfig", if_file); /* * This is a little overcautious, but it's necessary to suppress some * false warnings from gcc 8.0.1. */ if (str_len <= 0 || (unsigned int)str_len >= sizeof(cmd)) { syslog(LOG_ERR, "Cmd '%s' (len=%d) may be too long", cmd, str_len); return HV_E_FAIL; } if (system(cmd)) { syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s", cmd, errno, strerror(errno)); return HV_E_FAIL; } return 0; setval_error: syslog(LOG_ERR, "Failed to write config file"); fclose(file); return error; } static void kvp_get_domain_name(char *buffer, int length) { struct addrinfo hints, *info ; int error = 0; gethostname(buffer, length); memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; /*Get only ipv4 addrinfo. */ hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_CANONNAME; error = getaddrinfo(buffer, NULL, &hints, &info); if (error != 0) { snprintf(buffer, length, "getaddrinfo failed: 0x%x %s", error, gai_strerror(error)); return; } snprintf(buffer, length, "%s", info->ai_canonname); freeaddrinfo(info); } void print_usage(char *argv[]) { fprintf(stderr, "Usage: %s [options]\n" "Options are:\n" " -n, --no-daemon stay in foreground, don't daemonize\n" " -h, --help print this help\n", argv[0]); } int main(int argc, char *argv[]) { int kvp_fd = -1, len; int error; struct pollfd pfd; char *p; struct hv_kvp_msg hv_msg[1]; char *key_value; char *key_name; int op; int pool; char *if_name; struct hv_kvp_ipaddr_value *kvp_ip_val; int daemonize = 1, long_index = 0, opt; static struct option long_options[] = { {"help", no_argument, 0, 'h' }, {"no-daemon", no_argument, 0, 'n' }, {0, 0, 0, 0 } }; while ((opt = getopt_long(argc, argv, "hn", long_options, &long_index)) != -1) { switch (opt) { case 'n': daemonize = 0; break; case 'h': print_usage(argv); exit(0); default: print_usage(argv); exit(EXIT_FAILURE); } } if (daemonize && daemon(1, 0)) return 1; openlog("KVP", 0, LOG_USER); syslog(LOG_INFO, "KVP starting; pid is:%d", getpid()); /* * Retrieve OS release information. */ kvp_get_os_info(); /* * Cache Fully Qualified Domain Name because getaddrinfo takes an * unpredictable amount of time to finish. */ kvp_get_domain_name(full_domain_name, sizeof(full_domain_name)); if (kvp_file_init()) { syslog(LOG_ERR, "Failed to initialize the pools"); exit(EXIT_FAILURE); } reopen_kvp_fd: if (kvp_fd != -1) close(kvp_fd); in_hand_shake = 1; kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC); if (kvp_fd < 0) { syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s", errno, strerror(errno)); exit(EXIT_FAILURE); } /* * Register ourselves with the kernel. */ hv_msg->kvp_hdr.operation = KVP_OP_REGISTER1; len = write(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg)); if (len != sizeof(struct hv_kvp_msg)) { syslog(LOG_ERR, "registration to kernel failed; error: %d %s", errno, strerror(errno)); close(kvp_fd); exit(EXIT_FAILURE); } pfd.fd = kvp_fd; while (1) { pfd.events = POLLIN; pfd.revents = 0; if (poll(&pfd, 1, -1) < 0) { syslog(LOG_ERR, "poll failed; error: %d %s", errno, strerror(errno)); if (errno == EINVAL) { close(kvp_fd); exit(EXIT_FAILURE); } else continue; } len = read(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg)); if (len != sizeof(struct hv_kvp_msg)) { syslog(LOG_ERR, "read failed; error:%d %s", errno, strerror(errno)); goto reopen_kvp_fd; } /* * We will use the KVP header information to pass back * the error from this daemon. So, first copy the state * and set the error code to success. */ op = hv_msg->kvp_hdr.operation; pool = hv_msg->kvp_hdr.pool; hv_msg->error = HV_S_OK; if ((in_hand_shake) && (op == KVP_OP_REGISTER1)) { /* * Driver is registering with us; stash away the version * information. */ in_hand_shake = 0; p = (char *)hv_msg->body.kvp_register.version; lic_version = malloc(strlen(p) + 1); if (lic_version) { strcpy(lic_version, p); syslog(LOG_INFO, "KVP LIC Version: %s", lic_version); } else { syslog(LOG_ERR, "malloc failed"); } continue; } switch (op) { case KVP_OP_GET_IP_INFO: kvp_ip_val = &hv_msg->body.kvp_ip_val; error = kvp_mac_to_ip(kvp_ip_val); if (error) hv_msg->error = error; break; case KVP_OP_SET_IP_INFO: kvp_ip_val = &hv_msg->body.kvp_ip_val; if_name = kvp_get_if_name( (char *)kvp_ip_val->adapter_id); if (if_name == NULL) { /* * We could not map the guid to an * interface name; return error. */ hv_msg->error = HV_GUID_NOTFOUND; break; } error = kvp_set_ip_info(if_name, kvp_ip_val); if (error) hv_msg->error = error; free(if_name); break; case KVP_OP_SET: if (kvp_key_add_or_modify(pool, hv_msg->body.kvp_set.data.key, hv_msg->body.kvp_set.data.key_size, hv_msg->body.kvp_set.data.value, hv_msg->body.kvp_set.data.value_size)) hv_msg->error = HV_S_CONT; break; case KVP_OP_GET: if (kvp_get_value(pool, hv_msg->body.kvp_set.data.key, hv_msg->body.kvp_set.data.key_size, hv_msg->body.kvp_set.data.value, hv_msg->body.kvp_set.data.value_size)) hv_msg->error = HV_S_CONT; break; case KVP_OP_DELETE: if (kvp_key_delete(pool, hv_msg->body.kvp_delete.key, hv_msg->body.kvp_delete.key_size)) hv_msg->error = HV_S_CONT; break; default: break; } if (op != KVP_OP_ENUMERATE) goto kvp_done; /* * If the pool is KVP_POOL_AUTO, dynamically generate * both the key and the value; if not read from the * appropriate pool. */ if (pool != KVP_POOL_AUTO) { if (kvp_pool_enumerate(pool, hv_msg->body.kvp_enum_data.index, hv_msg->body.kvp_enum_data.data.key, HV_KVP_EXCHANGE_MAX_KEY_SIZE, hv_msg->body.kvp_enum_data.data.value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE)) hv_msg->error = HV_S_CONT; goto kvp_done; } key_name = (char *)hv_msg->body.kvp_enum_data.data.key; key_value = (char *)hv_msg->body.kvp_enum_data.data.value; switch (hv_msg->body.kvp_enum_data.index) { case FullyQualifiedDomainName: strcpy(key_value, full_domain_name); strcpy(key_name, "FullyQualifiedDomainName"); break; case IntegrationServicesVersion: strcpy(key_name, "IntegrationServicesVersion"); strcpy(key_value, lic_version); break; case NetworkAddressIPv4: kvp_get_ip_info(AF_INET, NULL, KVP_OP_ENUMERATE, key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE); strcpy(key_name, "NetworkAddressIPv4"); break; case NetworkAddressIPv6: kvp_get_ip_info(AF_INET6, NULL, KVP_OP_ENUMERATE, key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE); strcpy(key_name, "NetworkAddressIPv6"); break; case OSBuildNumber: strcpy(key_value, os_build); strcpy(key_name, "OSBuildNumber"); break; case OSName: strcpy(key_value, os_name); strcpy(key_name, "OSName"); break; case OSMajorVersion: strcpy(key_value, os_major); strcpy(key_name, "OSMajorVersion"); break; case OSMinorVersion: strcpy(key_value, os_minor); strcpy(key_name, "OSMinorVersion"); break; case OSVersion: strcpy(key_value, os_version); strcpy(key_name, "OSVersion"); break; case ProcessorArchitecture: strcpy(key_value, processor_arch); strcpy(key_name, "ProcessorArchitecture"); break; default: hv_msg->error = HV_S_CONT; break; } /* * Send the value back to the kernel. Note: the write() may * return an error due to hibernation; we can ignore the error * by resetting the dev file, i.e. closing and re-opening it. */ kvp_done: len = write(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg)); if (len != sizeof(struct hv_kvp_msg)) { syslog(LOG_ERR, "write failed; error: %d %s", errno, strerror(errno)); goto reopen_kvp_fd; } } close(kvp_fd); exit(0); }
linux-master
tools/hv/hv_kvp_daemon.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sample application for SMBIOS communication over WMI interface * Performs the following: * - Simple cmd_class/cmd_select lookup for TPM information * - Simple query of known tokens and their values * - Simple activation of a token * * Copyright (C) 2017 Dell, Inc. */ #include <errno.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <sys/ioctl.h> #include <unistd.h> /* if uapi header isn't installed, this might not yet exist */ #ifndef __packed #define __packed __attribute__((packed)) #endif #include <linux/wmi.h> /* It would be better to discover these using udev, but for a simple * application they're hardcoded */ static const char *ioctl_devfs = "/dev/wmi/dell-smbios"; static const char *token_sysfs = "/sys/bus/platform/devices/dell-smbios.0/tokens"; static void show_buffer(struct dell_wmi_smbios_buffer *buffer) { printf("Call: %x/%x [%x,%x,%x,%x]\nResults: [%8x,%8x,%8x,%8x]\n", buffer->std.cmd_class, buffer->std.cmd_select, buffer->std.input[0], buffer->std.input[1], buffer->std.input[2], buffer->std.input[3], buffer->std.output[0], buffer->std.output[1], buffer->std.output[2], buffer->std.output[3]); } static int run_wmi_smbios_cmd(struct dell_wmi_smbios_buffer *buffer) { int fd; int ret; fd = open(ioctl_devfs, O_NONBLOCK); ret = ioctl(fd, DELL_WMI_SMBIOS_CMD, buffer); close(fd); return ret; } static int find_token(__u16 token, __u16 *location, __u16 *value) { char location_sysfs[60]; char value_sysfs[57]; char buf[4096]; FILE *f; int ret; ret = sprintf(value_sysfs, "%s/%04x_value", token_sysfs, token); if (ret < 0) { printf("sprintf value failed\n"); return 2; } f = fopen(value_sysfs, "rb"); if (!f) { printf("failed to open %s\n", value_sysfs); return 2; } fread(buf, 1, 4096, f); fclose(f); *value = (__u16) strtol(buf, NULL, 16); ret = sprintf(location_sysfs, "%s/%04x_location", token_sysfs, token); if (ret < 0) { printf("sprintf location failed\n"); return 1; } f = fopen(location_sysfs, "rb"); if (!f) { printf("failed to open %s\n", location_sysfs); return 2; } fread(buf, 1, 4096, f); fclose(f); *location = (__u16) strtol(buf, NULL, 16); if (*location) return 0; return 2; } static int token_is_active(__u16 *location, __u16 *cmpvalue, struct dell_wmi_smbios_buffer *buffer) { int ret; buffer->std.cmd_class = CLASS_TOKEN_READ; buffer->std.cmd_select = SELECT_TOKEN_STD; buffer->std.input[0] = *location; ret = run_wmi_smbios_cmd(buffer); if (ret != 0 || buffer->std.output[0] != 0) return ret; ret = (buffer->std.output[1] == *cmpvalue); return ret; } static int query_token(__u16 token, struct dell_wmi_smbios_buffer *buffer) { __u16 location; __u16 value; int ret; ret = find_token(token, &location, &value); if (ret != 0) { printf("unable to find token %04x\n", token); return 1; } return token_is_active(&location, &value, buffer); } static int activate_token(struct dell_wmi_smbios_buffer *buffer, __u16 token) { __u16 location; __u16 value; int ret; ret = find_token(token, &location, &value); if (ret != 0) { printf("unable to find token %04x\n", token); return 1; } buffer->std.cmd_class = CLASS_TOKEN_WRITE; buffer->std.cmd_select = SELECT_TOKEN_STD; buffer->std.input[0] = location; buffer->std.input[1] = 1; ret = run_wmi_smbios_cmd(buffer); return ret; } static int query_buffer_size(__u64 *buffer_size) { FILE *f; f = fopen(ioctl_devfs, "rb"); if (!f) return -EINVAL; fread(buffer_size, sizeof(__u64), 1, f); fclose(f); return EXIT_SUCCESS; } int main(void) { struct dell_wmi_smbios_buffer *buffer; int ret; __u64 value = 0; ret = query_buffer_size(&value); if (ret == EXIT_FAILURE || !value) { printf("Unable to read buffer size\n"); goto out; } printf("Detected required buffer size %lld\n", value); buffer = malloc(value); if (buffer == NULL) { printf("failed to alloc memory for ioctl\n"); ret = -ENOMEM; goto out; } buffer->length = value; /* simple SMBIOS call for looking up TPM info */ buffer->std.cmd_class = CLASS_FLASH_INTERFACE; buffer->std.cmd_select = SELECT_FLASH_INTERFACE; buffer->std.input[0] = 2; ret = run_wmi_smbios_cmd(buffer); if (ret) { printf("smbios ioctl failed: %d\n", ret); ret = EXIT_FAILURE; goto out; } show_buffer(buffer); /* query some tokens */ ret = query_token(CAPSULE_EN_TOKEN, buffer); printf("UEFI Capsule enabled token is: %d\n", ret); ret = query_token(CAPSULE_DIS_TOKEN, buffer); printf("UEFI Capsule disabled token is: %d\n", ret); /* activate UEFI capsule token if disabled */ if (ret) { printf("Enabling UEFI capsule token"); if (activate_token(buffer, CAPSULE_EN_TOKEN)) { printf("activate failed\n"); ret = -1; goto out; } } ret = EXIT_SUCCESS; out: free(buffer); return ret; }
linux-master
tools/wmi/dell-smbios-example.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/firewire-constants.h> #include <stdio.h> #include <stdlib.h> #include "list.h" #include "nosy-dump.h" #define CSR_FCP_COMMAND 0xfffff0000b00ull #define CSR_FCP_RESPONSE 0xfffff0000d00ull static const char * const ctype_names[] = { [0x0] = "control", [0x8] = "not implemented", [0x1] = "status", [0x9] = "accepted", [0x2] = "specific inquiry", [0xa] = "rejected", [0x3] = "notify", [0xb] = "in transition", [0x4] = "general inquiry", [0xc] = "stable", [0x5] = "(reserved 0x05)", [0xd] = "changed", [0x6] = "(reserved 0x06)", [0xe] = "(reserved 0x0e)", [0x7] = "(reserved 0x07)", [0xf] = "interim", }; static const char * const subunit_type_names[] = { [0x00] = "monitor", [0x10] = "(reserved 0x10)", [0x01] = "audio", [0x11] = "(reserved 0x11)", [0x02] = "printer", [0x12] = "(reserved 0x12)", [0x03] = "disc", [0x13] = "(reserved 0x13)", [0x04] = "tape recorder/player",[0x14] = "(reserved 0x14)", [0x05] = "tuner", [0x15] = "(reserved 0x15)", [0x06] = "ca", [0x16] = "(reserved 0x16)", [0x07] = "camera", [0x17] = "(reserved 0x17)", [0x08] = "(reserved 0x08)", [0x18] = "(reserved 0x18)", [0x09] = "panel", [0x19] = "(reserved 0x19)", [0x0a] = "bulletin board", [0x1a] = "(reserved 0x1a)", [0x0b] = "camera storage", [0x1b] = "(reserved 0x1b)", [0x0c] = "(reserved 0x0c)", [0x1c] = "vendor unique", [0x0d] = "(reserved 0x0d)", [0x1d] = "all subunit types", [0x0e] = "(reserved 0x0e)", [0x1e] = "subunit_type extended to next byte", [0x0f] = "(reserved 0x0f)", [0x1f] = "unit", }; struct avc_enum { int value; const char *name; }; struct avc_field { const char *name; /* Short name for field. */ int offset; /* Location of field, specified in bits; */ /* negative means from end of packet. */ int width; /* Width of field, 0 means use data_length. */ struct avc_enum *names; }; struct avc_opcode_info { const char *name; struct avc_field fields[8]; }; struct avc_enum power_field_names[] = { { 0x70, "on" }, { 0x60, "off" }, { } }; static const struct avc_opcode_info opcode_info[256] = { /* TA Document 1999026 */ /* AV/C Digital Interface Command Set General Specification 4.0 */ [0xb2] = { "power", { { "state", 0, 8, power_field_names } } }, [0x30] = { "unit info", { { "foo", 0, 8 }, { "unit_type", 8, 5 }, { "unit", 13, 3 }, { "company id", 16, 24 }, } }, [0x31] = { "subunit info" }, [0x01] = { "reserve" }, [0xb0] = { "version" }, [0x00] = { "vendor dependent" }, [0x02] = { "plug info" }, [0x12] = { "channel usage" }, [0x24] = { "connect" }, [0x20] = { "connect av" }, [0x22] = { "connections" }, [0x11] = { "digital input" }, [0x10] = { "digital output" }, [0x25] = { "disconnect" }, [0x21] = { "disconnect av" }, [0x19] = { "input plug signal format" }, [0x18] = { "output plug signal format" }, [0x1f] = { "general bus setup" }, /* TA Document 1999025 */ /* AV/C Descriptor Mechanism Specification Version 1.0 */ [0x0c] = { "create descriptor" }, [0x08] = { "open descriptor" }, [0x09] = { "read descriptor" }, [0x0a] = { "write descriptor" }, [0x05] = { "open info block" }, [0x06] = { "read info block" }, [0x07] = { "write info block" }, [0x0b] = { "search descriptor" }, [0x0d] = { "object number select" }, /* TA Document 1999015 */ /* AV/C Command Set for Rate Control of Isochronous Data Flow 1.0 */ [0xb3] = { "rate", { { "subfunction", 0, 8 }, { "result", 8, 8 }, { "plug_type", 16, 8 }, { "plug_id", 16, 8 }, } }, /* TA Document 1999008 */ /* AV/C Audio Subunit Specification 1.0 */ [0xb8] = { "function block" }, /* TA Document 2001001 */ /* AV/C Panel Subunit Specification 1.1 */ [0x7d] = { "gui update" }, [0x7e] = { "push gui data" }, [0x7f] = { "user action" }, [0x7c] = { "pass through" }, /* */ [0x26] = { "asynchronous connection" }, }; struct avc_frame { uint32_t operand0:8; uint32_t opcode:8; uint32_t subunit_id:3; uint32_t subunit_type:5; uint32_t ctype:4; uint32_t cts:4; }; static void decode_avc(struct link_transaction *t) { struct avc_frame *frame = (struct avc_frame *) t->request->packet.write_block.data; const struct avc_opcode_info *info; const char *name; char buffer[32]; int i; info = &opcode_info[frame->opcode]; if (info->name == NULL) { snprintf(buffer, sizeof(buffer), "(unknown opcode 0x%02x)", frame->opcode); name = buffer; } else { name = info->name; } printf("av/c %s, subunit_type=%s, subunit_id=%d, opcode=%s", ctype_names[frame->ctype], subunit_type_names[frame->subunit_type], frame->subunit_id, name); for (i = 0; info->fields[i].name != NULL; i++) printf(", %s", info->fields[i].name); printf("\n"); } int decode_fcp(struct link_transaction *t) { struct avc_frame *frame = (struct avc_frame *) t->request->packet.write_block.data; unsigned long long offset = ((unsigned long long) t->request->packet.common.offset_high << 32) | t->request->packet.common.offset_low; if (t->request->packet.common.tcode != TCODE_WRITE_BLOCK_REQUEST) return 0; if (offset == CSR_FCP_COMMAND || offset == CSR_FCP_RESPONSE) { switch (frame->cts) { case 0x00: decode_avc(t); break; case 0x01: printf("cal fcp frame (cts=0x01)\n"); break; case 0x02: printf("ehs fcp frame (cts=0x02)\n"); break; case 0x03: printf("havi fcp frame (cts=0x03)\n"); break; case 0x0e: printf("vendor specific fcp frame (cts=0x0e)\n"); break; case 0x0f: printf("extended cts\n"); break; default: printf("reserved fcp frame (ctx=0x%02x)\n", frame->cts); break; } return 1; } return 0; }
linux-master
tools/firewire/decode-fcp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * nosy-dump - Interface to snoop mode driver for TI PCILynx 1394 controllers * Copyright (C) 2002-2006 Kristian Høgsberg */ #include <byteswap.h> #include <endian.h> #include <fcntl.h> #include <linux/firewire-constants.h> #include <poll.h> #include <popt.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> #include <sys/time.h> #include <termios.h> #include <unistd.h> #include "list.h" #include "nosy-dump.h" #include "nosy-user.h" enum { PACKET_FIELD_DETAIL = 0x01, PACKET_FIELD_DATA_LENGTH = 0x02, /* Marks the fields we print in transaction view. */ PACKET_FIELD_TRANSACTION = 0x04, }; static void print_packet(uint32_t *data, size_t length); static void decode_link_packet(struct link_packet *packet, size_t length, int include_flags, int exclude_flags); static int run = 1; sig_t sys_sigint_handler; static char *option_nosy_device = "/dev/nosy"; static char *option_view = "packet"; static char *option_output; static char *option_input; static int option_hex; static int option_iso; static int option_cycle_start; static int option_version; static int option_verbose; enum { VIEW_TRANSACTION, VIEW_PACKET, VIEW_STATS, }; static const struct poptOption options[] = { { .longName = "device", .shortName = 'd', .argInfo = POPT_ARG_STRING, .arg = &option_nosy_device, .descrip = "Path to nosy device.", .argDescrip = "DEVICE" }, { .longName = "view", .argInfo = POPT_ARG_STRING, .arg = &option_view, .descrip = "Specify view of bus traffic: packet, transaction or stats.", .argDescrip = "VIEW" }, { .longName = "hex", .shortName = 'x', .argInfo = POPT_ARG_NONE, .arg = &option_hex, .descrip = "Print each packet in hex.", }, { .longName = "iso", .argInfo = POPT_ARG_NONE, .arg = &option_iso, .descrip = "Print iso packets.", }, { .longName = "cycle-start", .argInfo = POPT_ARG_NONE, .arg = &option_cycle_start, .descrip = "Print cycle start packets.", }, { .longName = "verbose", .shortName = 'v', .argInfo = POPT_ARG_NONE, .arg = &option_verbose, .descrip = "Verbose packet view.", }, { .longName = "output", .shortName = 'o', .argInfo = POPT_ARG_STRING, .arg = &option_output, .descrip = "Log to output file.", .argDescrip = "FILENAME" }, { .longName = "input", .shortName = 'i', .argInfo = POPT_ARG_STRING, .arg = &option_input, .descrip = "Decode log from file.", .argDescrip = "FILENAME" }, { .longName = "version", .argInfo = POPT_ARG_NONE, .arg = &option_version, .descrip = "Specify print version info.", }, POPT_AUTOHELP POPT_TABLEEND }; /* Allow all ^C except the first to interrupt the program in the usual way. */ static void sigint_handler(int signal_num) { if (run == 1) { run = 0; signal(SIGINT, SIG_DFL); } } static struct subaction * subaction_create(uint32_t *data, size_t length) { struct subaction *sa; /* we put the ack in the subaction struct for easy access. */ sa = malloc(sizeof *sa - sizeof sa->packet + length); if (!sa) exit(EXIT_FAILURE); sa->ack = data[length / 4 - 1]; sa->length = length; memcpy(&sa->packet, data, length); return sa; } static void subaction_destroy(struct subaction *sa) { free(sa); } static struct list pending_transaction_list = { &pending_transaction_list, &pending_transaction_list }; static struct link_transaction * link_transaction_lookup(int request_node, int response_node, int tlabel) { struct link_transaction *t; list_for_each_entry(t, &pending_transaction_list, link) { if (t->request_node == request_node && t->response_node == response_node && t->tlabel == tlabel) return t; } t = malloc(sizeof *t); if (!t) exit(EXIT_FAILURE); t->request_node = request_node; t->response_node = response_node; t->tlabel = tlabel; list_init(&t->request_list); list_init(&t->response_list); list_append(&pending_transaction_list, &t->link); return t; } static void link_transaction_destroy(struct link_transaction *t) { struct subaction *sa; while (!list_empty(&t->request_list)) { sa = list_head(&t->request_list, struct subaction, link); list_remove(&sa->link); subaction_destroy(sa); } while (!list_empty(&t->response_list)) { sa = list_head(&t->response_list, struct subaction, link); list_remove(&sa->link); subaction_destroy(sa); } free(t); } struct protocol_decoder { const char *name; int (*decode)(struct link_transaction *t); }; static const struct protocol_decoder protocol_decoders[] = { { "FCP", decode_fcp } }; static void handle_transaction(struct link_transaction *t) { struct subaction *sa; int i; if (!t->request) { printf("BUG in handle_transaction\n"); return; } for (i = 0; i < array_length(protocol_decoders); i++) if (protocol_decoders[i].decode(t)) break; /* HACK: decode only fcp right now. */ return; decode_link_packet(&t->request->packet, t->request->length, PACKET_FIELD_TRANSACTION, 0); if (t->response) decode_link_packet(&t->response->packet, t->request->length, PACKET_FIELD_TRANSACTION, 0); else printf("[no response]"); if (option_verbose) { list_for_each_entry(sa, &t->request_list, link) print_packet((uint32_t *) &sa->packet, sa->length); list_for_each_entry(sa, &t->response_list, link) print_packet((uint32_t *) &sa->packet, sa->length); } printf("\r\n"); link_transaction_destroy(t); } static void clear_pending_transaction_list(void) { struct link_transaction *t; while (!list_empty(&pending_transaction_list)) { t = list_head(&pending_transaction_list, struct link_transaction, link); list_remove(&t->link); link_transaction_destroy(t); /* print unfinished transactions */ } } static const char * const tcode_names[] = { [0x0] = "write_quadlet_request", [0x6] = "read_quadlet_response", [0x1] = "write_block_request", [0x7] = "read_block_response", [0x2] = "write_response", [0x8] = "cycle_start", [0x3] = "reserved", [0x9] = "lock_request", [0x4] = "read_quadlet_request", [0xa] = "iso_data", [0x5] = "read_block_request", [0xb] = "lock_response", }; static const char * const ack_names[] = { [0x0] = "no ack", [0x8] = "reserved (0x08)", [0x1] = "ack_complete", [0x9] = "reserved (0x09)", [0x2] = "ack_pending", [0xa] = "reserved (0x0a)", [0x3] = "reserved (0x03)", [0xb] = "reserved (0x0b)", [0x4] = "ack_busy_x", [0xc] = "reserved (0x0c)", [0x5] = "ack_busy_a", [0xd] = "ack_data_error", [0x6] = "ack_busy_b", [0xe] = "ack_type_error", [0x7] = "reserved (0x07)", [0xf] = "reserved (0x0f)", }; static const char * const rcode_names[] = { [0x0] = "complete", [0x4] = "conflict_error", [0x1] = "reserved (0x01)", [0x5] = "data_error", [0x2] = "reserved (0x02)", [0x6] = "type_error", [0x3] = "reserved (0x03)", [0x7] = "address_error", }; static const char * const retry_names[] = { [0x0] = "retry_1", [0x1] = "retry_x", [0x2] = "retry_a", [0x3] = "retry_b", }; enum { PACKET_RESERVED, PACKET_REQUEST, PACKET_RESPONSE, PACKET_OTHER, }; struct packet_info { const char *name; int type; int response_tcode; const struct packet_field *fields; int field_count; }; struct packet_field { const char *name; /* Short name for field. */ int offset; /* Location of field, specified in bits; */ /* negative means from end of packet. */ int width; /* Width of field, 0 means use data_length. */ int flags; /* Show options. */ const char * const *value_names; }; #define COMMON_REQUEST_FIELDS \ { "dest", 0, 16, PACKET_FIELD_TRANSACTION }, \ { "tl", 16, 6 }, \ { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \ { "tcode", 24, 4, PACKET_FIELD_TRANSACTION, tcode_names }, \ { "pri", 28, 4, PACKET_FIELD_DETAIL }, \ { "src", 32, 16, PACKET_FIELD_TRANSACTION }, \ { "offs", 48, 48, PACKET_FIELD_TRANSACTION } #define COMMON_RESPONSE_FIELDS \ { "dest", 0, 16 }, \ { "tl", 16, 6 }, \ { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \ { "tcode", 24, 4, 0, tcode_names }, \ { "pri", 28, 4, PACKET_FIELD_DETAIL }, \ { "src", 32, 16 }, \ { "rcode", 48, 4, PACKET_FIELD_TRANSACTION, rcode_names } static const struct packet_field read_quadlet_request_fields[] = { COMMON_REQUEST_FIELDS, { "crc", 96, 32, PACKET_FIELD_DETAIL }, { "ack", 156, 4, 0, ack_names }, }; static const struct packet_field read_quadlet_response_fields[] = { COMMON_RESPONSE_FIELDS, { "data", 96, 32, PACKET_FIELD_TRANSACTION }, { "crc", 128, 32, PACKET_FIELD_DETAIL }, { "ack", 188, 4, 0, ack_names }, }; static const struct packet_field read_block_request_fields[] = { COMMON_REQUEST_FIELDS, { "data_length", 96, 16, PACKET_FIELD_TRANSACTION }, { "extended_tcode", 112, 16 }, { "crc", 128, 32, PACKET_FIELD_DETAIL }, { "ack", 188, 4, 0, ack_names }, }; static const struct packet_field block_response_fields[] = { COMMON_RESPONSE_FIELDS, { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH }, { "extended_tcode", 112, 16 }, { "crc", 128, 32, PACKET_FIELD_DETAIL }, { "data", 160, 0, PACKET_FIELD_TRANSACTION }, { "crc", -64, 32, PACKET_FIELD_DETAIL }, { "ack", -4, 4, 0, ack_names }, }; static const struct packet_field write_quadlet_request_fields[] = { COMMON_REQUEST_FIELDS, { "data", 96, 32, PACKET_FIELD_TRANSACTION }, { "ack", -4, 4, 0, ack_names }, }; static const struct packet_field block_request_fields[] = { COMMON_REQUEST_FIELDS, { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH | PACKET_FIELD_TRANSACTION }, { "extended_tcode", 112, 16, PACKET_FIELD_TRANSACTION }, { "crc", 128, 32, PACKET_FIELD_DETAIL }, { "data", 160, 0, PACKET_FIELD_TRANSACTION }, { "crc", -64, 32, PACKET_FIELD_DETAIL }, { "ack", -4, 4, 0, ack_names }, }; static const struct packet_field write_response_fields[] = { COMMON_RESPONSE_FIELDS, { "reserved", 64, 32, PACKET_FIELD_DETAIL }, { "ack", -4, 4, 0, ack_names }, }; static const struct packet_field iso_data_fields[] = { { "data_length", 0, 16, PACKET_FIELD_DATA_LENGTH }, { "tag", 16, 2 }, { "channel", 18, 6 }, { "tcode", 24, 4, 0, tcode_names }, { "sy", 28, 4 }, { "crc", 32, 32, PACKET_FIELD_DETAIL }, { "data", 64, 0 }, { "crc", -64, 32, PACKET_FIELD_DETAIL }, { "ack", -4, 4, 0, ack_names }, }; static const struct packet_info packet_info[] = { { .name = "write_quadlet_request", .type = PACKET_REQUEST, .response_tcode = TCODE_WRITE_RESPONSE, .fields = write_quadlet_request_fields, .field_count = array_length(write_quadlet_request_fields) }, { .name = "write_block_request", .type = PACKET_REQUEST, .response_tcode = TCODE_WRITE_RESPONSE, .fields = block_request_fields, .field_count = array_length(block_request_fields) }, { .name = "write_response", .type = PACKET_RESPONSE, .fields = write_response_fields, .field_count = array_length(write_response_fields) }, { .name = "reserved", .type = PACKET_RESERVED, }, { .name = "read_quadlet_request", .type = PACKET_REQUEST, .response_tcode = TCODE_READ_QUADLET_RESPONSE, .fields = read_quadlet_request_fields, .field_count = array_length(read_quadlet_request_fields) }, { .name = "read_block_request", .type = PACKET_REQUEST, .response_tcode = TCODE_READ_BLOCK_RESPONSE, .fields = read_block_request_fields, .field_count = array_length(read_block_request_fields) }, { .name = "read_quadlet_response", .type = PACKET_RESPONSE, .fields = read_quadlet_response_fields, .field_count = array_length(read_quadlet_response_fields) }, { .name = "read_block_response", .type = PACKET_RESPONSE, .fields = block_response_fields, .field_count = array_length(block_response_fields) }, { .name = "cycle_start", .type = PACKET_OTHER, .fields = write_quadlet_request_fields, .field_count = array_length(write_quadlet_request_fields) }, { .name = "lock_request", .type = PACKET_REQUEST, .fields = block_request_fields, .field_count = array_length(block_request_fields) }, { .name = "iso_data", .type = PACKET_OTHER, .fields = iso_data_fields, .field_count = array_length(iso_data_fields) }, { .name = "lock_response", .type = PACKET_RESPONSE, .fields = block_response_fields, .field_count = array_length(block_response_fields) }, }; static int handle_request_packet(uint32_t *data, size_t length) { struct link_packet *p = (struct link_packet *) data; struct subaction *sa, *prev; struct link_transaction *t; t = link_transaction_lookup(p->common.source, p->common.destination, p->common.tlabel); sa = subaction_create(data, length); t->request = sa; if (!list_empty(&t->request_list)) { prev = list_tail(&t->request_list, struct subaction, link); if (!ACK_BUSY(prev->ack)) { /* * error, we should only see ack_busy_* before the * ack_pending/ack_complete -- this is an ack_pending * instead (ack_complete would have finished the * transaction). */ } if (prev->packet.common.tcode != sa->packet.common.tcode || prev->packet.common.tlabel != sa->packet.common.tlabel) { /* memcmp() ? */ /* error, these should match for retries. */ } } list_append(&t->request_list, &sa->link); switch (sa->ack) { case ACK_COMPLETE: if (p->common.tcode != TCODE_WRITE_QUADLET_REQUEST && p->common.tcode != TCODE_WRITE_BLOCK_REQUEST) /* error, unified transactions only allowed for write */; list_remove(&t->link); handle_transaction(t); break; case ACK_NO_ACK: case ACK_DATA_ERROR: case ACK_TYPE_ERROR: list_remove(&t->link); handle_transaction(t); break; case ACK_PENDING: /* request subaction phase over, wait for response. */ break; case ACK_BUSY_X: case ACK_BUSY_A: case ACK_BUSY_B: /* ok, wait for retry. */ /* check that retry protocol is respected. */ break; } return 1; } static int handle_response_packet(uint32_t *data, size_t length) { struct link_packet *p = (struct link_packet *) data; struct subaction *sa, *prev; struct link_transaction *t; t = link_transaction_lookup(p->common.destination, p->common.source, p->common.tlabel); if (list_empty(&t->request_list)) { /* unsolicited response */ } sa = subaction_create(data, length); t->response = sa; if (!list_empty(&t->response_list)) { prev = list_tail(&t->response_list, struct subaction, link); if (!ACK_BUSY(prev->ack)) { /* * error, we should only see ack_busy_* before the * ack_pending/ack_complete */ } if (prev->packet.common.tcode != sa->packet.common.tcode || prev->packet.common.tlabel != sa->packet.common.tlabel) { /* use memcmp() instead? */ /* error, these should match for retries. */ } } else { prev = list_tail(&t->request_list, struct subaction, link); if (prev->ack != ACK_PENDING) { /* * error, should not get response unless last request got * ack_pending. */ } if (packet_info[prev->packet.common.tcode].response_tcode != sa->packet.common.tcode) { /* error, tcode mismatch */ } } list_append(&t->response_list, &sa->link); switch (sa->ack) { case ACK_COMPLETE: case ACK_NO_ACK: case ACK_DATA_ERROR: case ACK_TYPE_ERROR: list_remove(&t->link); handle_transaction(t); /* transaction complete, remove t from pending list. */ break; case ACK_PENDING: /* error for responses. */ break; case ACK_BUSY_X: case ACK_BUSY_A: case ACK_BUSY_B: /* no problem, wait for next retry */ break; } return 1; } static int handle_packet(uint32_t *data, size_t length) { if (length == 0) { printf("bus reset\r\n"); clear_pending_transaction_list(); } else if (length > sizeof(struct phy_packet)) { struct link_packet *p = (struct link_packet *) data; switch (packet_info[p->common.tcode].type) { case PACKET_REQUEST: return handle_request_packet(data, length); case PACKET_RESPONSE: return handle_response_packet(data, length); case PACKET_OTHER: case PACKET_RESERVED: return 0; } } return 1; } static unsigned int get_bits(struct link_packet *packet, int offset, int width) { uint32_t *data = (uint32_t *) packet; uint32_t index, shift, mask; index = offset / 32 + 1; shift = 32 - (offset & 31) - width; mask = width == 32 ? ~0 : (1 << width) - 1; return (data[index] >> shift) & mask; } #if __BYTE_ORDER == __LITTLE_ENDIAN #define byte_index(i) ((i) ^ 3) #elif __BYTE_ORDER == __BIG_ENDIAN #define byte_index(i) (i) #else #error unsupported byte order. #endif static void dump_data(unsigned char *data, int length) { int i, print_length; if (length > 128) print_length = 128; else print_length = length; for (i = 0; i < print_length; i++) printf("%s%02hhx", (i % 4 == 0 && i != 0) ? " " : "", data[byte_index(i)]); if (print_length < length) printf(" (%d more bytes)", length - print_length); } static void decode_link_packet(struct link_packet *packet, size_t length, int include_flags, int exclude_flags) { const struct packet_info *pi; int data_length = 0; int i; pi = &packet_info[packet->common.tcode]; for (i = 0; i < pi->field_count; i++) { const struct packet_field *f = &pi->fields[i]; int offset; if (f->flags & exclude_flags) continue; if (include_flags && !(f->flags & include_flags)) continue; if (f->offset < 0) offset = length * 8 + f->offset - 32; else offset = f->offset; if (f->value_names != NULL) { uint32_t bits; bits = get_bits(packet, offset, f->width); printf("%s", f->value_names[bits]); } else if (f->width == 0) { printf("%s=[", f->name); dump_data((unsigned char *) packet + (offset / 8 + 4), data_length); printf("]"); } else { unsigned long long bits; int high_width, low_width; if ((offset & ~31) != ((offset + f->width - 1) & ~31)) { /* Bit field spans quadlet boundary. */ high_width = ((offset + 31) & ~31) - offset; low_width = f->width - high_width; bits = get_bits(packet, offset, high_width); bits = (bits << low_width) | get_bits(packet, offset + high_width, low_width); } else { bits = get_bits(packet, offset, f->width); } printf("%s=0x%0*llx", f->name, (f->width + 3) / 4, bits); if (f->flags & PACKET_FIELD_DATA_LENGTH) data_length = bits; } if (i < pi->field_count - 1) printf(", "); } } static void print_packet(uint32_t *data, size_t length) { int i; printf("%6u ", data[0]); if (length == 4) { printf("bus reset"); } else if (length < sizeof(struct phy_packet)) { printf("short packet: "); for (i = 1; i < length / 4; i++) printf("%s%08x", i == 0 ? "[" : " ", data[i]); printf("]"); } else if (length == sizeof(struct phy_packet) && data[1] == ~data[2]) { struct phy_packet *pp = (struct phy_packet *) data; /* phy packet are 3 quadlets: the 1 quadlet payload, * the bitwise inverse of the payload and the snoop * mode ack */ switch (pp->common.identifier) { case PHY_PACKET_CONFIGURATION: if (!pp->phy_config.set_root && !pp->phy_config.set_gap_count) { printf("ext phy config: phy_id=%02x", pp->phy_config.root_id); } else { printf("phy config:"); if (pp->phy_config.set_root) printf(" set_root_id=%02x", pp->phy_config.root_id); if (pp->phy_config.set_gap_count) printf(" set_gap_count=%d", pp->phy_config.gap_count); } break; case PHY_PACKET_LINK_ON: printf("link-on packet, phy_id=%02x", pp->link_on.phy_id); break; case PHY_PACKET_SELF_ID: if (pp->self_id.extended) { printf("extended self id: phy_id=%02x, seq=%d", pp->ext_self_id.phy_id, pp->ext_self_id.sequence); } else { static const char * const speed_names[] = { "S100", "S200", "S400", "BETA" }; printf("self id: phy_id=%02x, link %s, gap_count=%d, speed=%s%s%s", pp->self_id.phy_id, (pp->self_id.link_active ? "active" : "not active"), pp->self_id.gap_count, speed_names[pp->self_id.phy_speed], (pp->self_id.contender ? ", irm contender" : ""), (pp->self_id.initiated_reset ? ", initiator" : "")); } break; default: printf("unknown phy packet: "); for (i = 1; i < length / 4; i++) printf("%s%08x", i == 0 ? "[" : " ", data[i]); printf("]"); break; } } else { struct link_packet *packet = (struct link_packet *) data; decode_link_packet(packet, length, 0, option_verbose ? 0 : PACKET_FIELD_DETAIL); } if (option_hex) { printf(" ["); dump_data((unsigned char *) data + 4, length - 4); printf("]"); } printf("\r\n"); } #define HIDE_CURSOR "\033[?25l" #define SHOW_CURSOR "\033[?25h" #define CLEAR "\033[H\033[2J" static void print_stats(uint32_t *data, size_t length) { static int bus_reset_count, short_packet_count, phy_packet_count; static int tcode_count[16]; static struct timeval last_update; struct timeval now; int i; if (length == 0) bus_reset_count++; else if (length < sizeof(struct phy_packet)) short_packet_count++; else if (length == sizeof(struct phy_packet) && data[1] == ~data[2]) phy_packet_count++; else { struct link_packet *packet = (struct link_packet *) data; tcode_count[packet->common.tcode]++; } gettimeofday(&now, NULL); if (now.tv_sec <= last_update.tv_sec && now.tv_usec < last_update.tv_usec + 500000) return; last_update = now; printf(CLEAR HIDE_CURSOR " bus resets : %8d\n" " short packets : %8d\n" " phy packets : %8d\n", bus_reset_count, short_packet_count, phy_packet_count); for (i = 0; i < array_length(packet_info); i++) if (packet_info[i].type != PACKET_RESERVED) printf(" %-24s: %8d\n", packet_info[i].name, tcode_count[i]); printf(SHOW_CURSOR "\n"); } static struct termios saved_attributes; static void reset_input_mode(void) { tcsetattr(STDIN_FILENO, TCSANOW, &saved_attributes); } static void set_input_mode(void) { struct termios tattr; /* Make sure stdin is a terminal. */ if (!isatty(STDIN_FILENO)) { fprintf(stderr, "Not a terminal.\n"); exit(EXIT_FAILURE); } /* Save the terminal attributes so we can restore them later. */ tcgetattr(STDIN_FILENO, &saved_attributes); atexit(reset_input_mode); /* Set the funny terminal modes. */ tcgetattr(STDIN_FILENO, &tattr); tattr.c_lflag &= ~(ICANON|ECHO); /* Clear ICANON and ECHO. */ tattr.c_cc[VMIN] = 1; tattr.c_cc[VTIME] = 0; tcsetattr(STDIN_FILENO, TCSAFLUSH, &tattr); } int main(int argc, const char *argv[]) { uint32_t buf[128 * 1024]; uint32_t filter; int length, retval, view; int fd = -1; FILE *output = NULL, *input = NULL; poptContext con; char c; struct pollfd pollfds[2]; sys_sigint_handler = signal(SIGINT, sigint_handler); con = poptGetContext(NULL, argc, argv, options, 0); retval = poptGetNextOpt(con); if (retval < -1) { poptPrintUsage(con, stdout, 0); return -1; } if (option_version) { printf("dump tool for nosy sniffer, version %s\n", VERSION); return 0; } if (__BYTE_ORDER != __LITTLE_ENDIAN) fprintf(stderr, "warning: nosy has only been tested on little " "endian machines\n"); if (option_input != NULL) { input = fopen(option_input, "r"); if (input == NULL) { fprintf(stderr, "Could not open %s, %m\n", option_input); return -1; } } else { fd = open(option_nosy_device, O_RDWR); if (fd < 0) { fprintf(stderr, "Could not open %s, %m\n", option_nosy_device); return -1; } set_input_mode(); } if (strcmp(option_view, "transaction") == 0) view = VIEW_TRANSACTION; else if (strcmp(option_view, "stats") == 0) view = VIEW_STATS; else view = VIEW_PACKET; if (option_output) { output = fopen(option_output, "w"); if (output == NULL) { fprintf(stderr, "Could not open %s, %m\n", option_output); return -1; } } setvbuf(stdout, NULL, _IOLBF, BUFSIZ); filter = ~0; if (!option_iso) filter &= ~(1 << TCODE_STREAM_DATA); if (!option_cycle_start) filter &= ~(1 << TCODE_CYCLE_START); if (view == VIEW_STATS) filter = ~(1 << TCODE_CYCLE_START); ioctl(fd, NOSY_IOC_FILTER, filter); ioctl(fd, NOSY_IOC_START); pollfds[0].fd = fd; pollfds[0].events = POLLIN; pollfds[1].fd = STDIN_FILENO; pollfds[1].events = POLLIN; while (run) { if (input != NULL) { if (fread(&length, sizeof length, 1, input) != 1) return 0; fread(buf, 1, length, input); } else { poll(pollfds, 2, -1); if (pollfds[1].revents) { read(STDIN_FILENO, &c, sizeof c); switch (c) { case 'q': if (output != NULL) fclose(output); return 0; } } if (pollfds[0].revents) length = read(fd, buf, sizeof buf); else continue; } if (output != NULL) { fwrite(&length, sizeof length, 1, output); fwrite(buf, 1, length, output); } switch (view) { case VIEW_TRANSACTION: handle_packet(buf, length); break; case VIEW_PACKET: print_packet(buf, length); break; case VIEW_STATS: print_stats(buf, length); break; } } if (output != NULL) fclose(output); close(fd); poptFreeContext(con); return 0; }
linux-master
tools/firewire/nosy-dump.c
// SPDX-License-Identifier: GPL-2.0-only /* * Thermal monitoring tool based on the thermal netlink events. * * Copyright (C) 2022 Linaro Ltd. * * Author: Daniel Lezcano <[email protected]> */ #include <errno.h> #include <fcntl.h> #include <getopt.h> #include <libgen.h> #include <limits.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <signal.h> #include <unistd.h> #include <syslog.h> #include <sys/epoll.h> #include <sys/stat.h> #include <sys/types.h> #include <thermal.h> #include "thermal-tools.h" struct options { int loglevel; int logopt; int interactive; int daemonize; }; struct thermal_data { struct thermal_zone *tz; struct thermal_handler *th; }; static int show_trip(struct thermal_trip *tt, __maybe_unused void *arg) { INFO("trip id=%d, type=%d, temp=%d, hyst=%d\n", tt->id, tt->type, tt->temp, tt->hyst); return 0; } static int show_temp(struct thermal_zone *tz, __maybe_unused void *arg) { thermal_cmd_get_temp(arg, tz); INFO("temperature: %d\n", tz->temp); return 0; } static int show_governor(struct thermal_zone *tz, __maybe_unused void *arg) { thermal_cmd_get_governor(arg, tz); INFO("governor: '%s'\n", tz->governor); return 0; } static int show_tz(struct thermal_zone *tz, __maybe_unused void *arg) { INFO("thermal zone '%s', id=%d\n", tz->name, tz->id); for_each_thermal_trip(tz->trip, show_trip, NULL); show_temp(tz, arg); show_governor(tz, arg); return 0; } static int tz_create(const char *name, int tz_id, __maybe_unused void *arg) { INFO("Thermal zone '%s'/%d created\n", name, tz_id); return 0; } static int tz_delete(int tz_id, __maybe_unused void *arg) { INFO("Thermal zone %d deleted\n", tz_id); return 0; } static int tz_disable(int tz_id, void *arg) { struct thermal_data *td = arg; struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id); INFO("Thermal zone %d ('%s') disabled\n", tz_id, tz->name); return 0; } static int tz_enable(int tz_id, void *arg) { struct thermal_data *td = arg; struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id); INFO("Thermal zone %d ('%s') enabled\n", tz_id, tz->name); return 0; } static int trip_high(int tz_id, int trip_id, int temp, void *arg) { struct thermal_data *td = arg; struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id); INFO("Thermal zone %d ('%s'): trip point %d crossed way up with %d °C\n", tz_id, tz->name, trip_id, temp); return 0; } static int trip_low(int tz_id, int trip_id, int temp, void *arg) { struct thermal_data *td = arg; struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id); INFO("Thermal zone %d ('%s'): trip point %d crossed way down with %d °C\n", tz_id, tz->name, trip_id, temp); return 0; } static int trip_add(int tz_id, int trip_id, int type, int temp, int hyst, __maybe_unused void *arg) { INFO("Trip point added %d: id=%d, type=%d, temp=%d, hyst=%d\n", tz_id, trip_id, type, temp, hyst); return 0; } static int trip_delete(int tz_id, int trip_id, __maybe_unused void *arg) { INFO("Trip point deleted %d: id=%d\n", tz_id, trip_id); return 0; } static int trip_change(int tz_id, int trip_id, int type, int temp, int hyst, __maybe_unused void *arg) { struct thermal_data *td = arg; struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id); INFO("Trip point changed %d: id=%d, type=%d, temp=%d, hyst=%d\n", tz_id, trip_id, type, temp, hyst); tz->trip[trip_id].type = type; tz->trip[trip_id].temp = temp; tz->trip[trip_id].hyst = hyst; return 0; } static int cdev_add(const char *name, int cdev_id, int max_state, __maybe_unused void *arg) { INFO("Cooling device '%s'/%d (max state=%d) added\n", name, cdev_id, max_state); return 0; } static int cdev_delete(int cdev_id, __maybe_unused void *arg) { INFO("Cooling device %d deleted", cdev_id); return 0; } static int cdev_update(int cdev_id, int cur_state, __maybe_unused void *arg) { INFO("cdev:%d state:%d\n", cdev_id, cur_state); return 0; } static int gov_change(int tz_id, const char *name, __maybe_unused void *arg) { struct thermal_data *td = arg; struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id); INFO("%s: governor changed %s -> %s\n", tz->name, tz->governor, name); strcpy(tz->governor, name); return 0; } static struct thermal_ops ops = { .events.tz_create = tz_create, .events.tz_delete = tz_delete, .events.tz_disable = tz_disable, .events.tz_enable = tz_enable, .events.trip_high = trip_high, .events.trip_low = trip_low, .events.trip_add = trip_add, .events.trip_delete = trip_delete, .events.trip_change = trip_change, .events.cdev_add = cdev_add, .events.cdev_delete = cdev_delete, .events.cdev_update = cdev_update, .events.gov_change = gov_change }; static int thermal_event(__maybe_unused int fd, __maybe_unused void *arg) { struct thermal_data *td = arg; return thermal_events_handle(td->th, td); } static void usage(const char *cmd) { printf("%s : A thermal monitoring engine based on notifications\n", cmd); printf("Usage: %s [options]\n", cmd); printf("\t-h, --help\t\tthis help\n"); printf("\t-d, --daemonize\n"); printf("\t-l <level>, --loglevel <level>\tlog level: "); printf("DEBUG, INFO, NOTICE, WARN, ERROR\n"); printf("\t-s, --syslog\t\toutput to syslog\n"); printf("\n"); exit(0); } static int options_init(int argc, char *argv[], struct options *options) { int opt; struct option long_options[] = { { "help", no_argument, NULL, 'h' }, { "daemonize", no_argument, NULL, 'd' }, { "syslog", no_argument, NULL, 's' }, { "loglevel", required_argument, NULL, 'l' }, { 0, 0, 0, 0 } }; while (1) { int optindex = 0; opt = getopt_long(argc, argv, "l:dhs", long_options, &optindex); if (opt == -1) break; switch (opt) { case 'l': options->loglevel = log_str2level(optarg); break; case 'd': options->daemonize = 1; break; case 's': options->logopt = TO_SYSLOG; break; case 'h': usage(basename(argv[0])); break; default: /* '?' */ return -1; } } return 0; } enum { THERMAL_ENGINE_SUCCESS = 0, THERMAL_ENGINE_OPTION_ERROR, THERMAL_ENGINE_DAEMON_ERROR, THERMAL_ENGINE_LOG_ERROR, THERMAL_ENGINE_THERMAL_ERROR, THERMAL_ENGINE_MAINLOOP_ERROR, }; int main(int argc, char *argv[]) { struct thermal_data td; struct options options = { .loglevel = LOG_INFO, .logopt = TO_STDOUT, }; if (options_init(argc, argv, &options)) { ERROR("Usage: %s --help\n", argv[0]); return THERMAL_ENGINE_OPTION_ERROR; } if (options.daemonize && daemon(0, 0)) { ERROR("Failed to daemonize: %p\n"); return THERMAL_ENGINE_DAEMON_ERROR; } if (log_init(options.loglevel, basename(argv[0]), options.logopt)) { ERROR("Failed to initialize logging facility\n"); return THERMAL_ENGINE_LOG_ERROR; } td.th = thermal_init(&ops); if (!td.th) { ERROR("Failed to initialize the thermal library\n"); return THERMAL_ENGINE_THERMAL_ERROR; } td.tz = thermal_zone_discover(td.th); if (!td.tz) { ERROR("No thermal zone available\n"); return THERMAL_ENGINE_THERMAL_ERROR; } for_each_thermal_zone(td.tz, show_tz, td.th); if (mainloop_init()) { ERROR("Failed to initialize the mainloop\n"); return THERMAL_ENGINE_MAINLOOP_ERROR; } if (mainloop_add(thermal_events_fd(td.th), thermal_event, &td)) { ERROR("Failed to setup the mainloop\n"); return THERMAL_ENGINE_MAINLOOP_ERROR; } INFO("Waiting for thermal events ...\n"); if (mainloop(-1)) { ERROR("Mainloop failed\n"); return THERMAL_ENGINE_MAINLOOP_ERROR; } return THERMAL_ENGINE_SUCCESS; }
linux-master
tools/thermal/thermal-engine/thermal-engine.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <[email protected]> #define _GNU_SOURCE #include <dirent.h> #include <fcntl.h> #include <getopt.h> #include <regex.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <sys/signalfd.h> #include <sys/timerfd.h> #include <sys/types.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include <linux/thermal.h> #include <libconfig.h> #include "thermal-tools.h" #define CLASS_THERMAL "/sys/class/thermal" enum { THERMOMETER_SUCCESS = 0, THERMOMETER_OPTION_ERROR, THERMOMETER_LOG_ERROR, THERMOMETER_CONFIG_ERROR, THERMOMETER_TIME_ERROR, THERMOMETER_INIT_ERROR, THERMOMETER_RUNTIME_ERROR }; struct options { int loglvl; int logopt; int overwrite; int duration; const char *config; char postfix[PATH_MAX]; char output[PATH_MAX]; }; struct tz_regex { regex_t regex; int polling; }; struct configuration { struct tz_regex *tz_regex; int nr_tz_regex; }; struct tz { FILE *file_out; int fd_temp; int fd_timer; int polling; const char *name; }; struct thermometer { struct tz *tz; int nr_tz; }; static struct tz_regex *configuration_tz_match(const char *expr, struct configuration *config) { int i; for (i = 0; i < config->nr_tz_regex; i++) { if (!regexec(&config->tz_regex[i].regex, expr, 0, NULL, 0)) return &config->tz_regex[i]; } return NULL; } static int configuration_default_init(struct configuration *config) { config->tz_regex = realloc(config->tz_regex, sizeof(*config->tz_regex) * (config->nr_tz_regex + 1)); if (regcomp(&config->tz_regex[config->nr_tz_regex].regex, ".*", REG_NOSUB | REG_EXTENDED)) { ERROR("Invalid regular expression\n"); return -1; } config->tz_regex[config->nr_tz_regex].polling = 250; config->nr_tz_regex = 1; return 0; } static int configuration_init(const char *path, struct configuration *config) { config_t cfg; config_setting_t *tz; int i, length; if (path && access(path, F_OK)) { ERROR("'%s' is not accessible\n", path); return -1; } if (!path && !config->nr_tz_regex) { INFO("No thermal zones configured, using wildcard for all of them\n"); return configuration_default_init(config); } config_init(&cfg); if (!config_read_file(&cfg, path)) { ERROR("Failed to parse %s:%d - %s\n", config_error_file(&cfg), config_error_line(&cfg), config_error_text(&cfg)); return -1; } tz = config_lookup(&cfg, "thermal-zones"); if (!tz) { ERROR("No thermal zone configured to be monitored\n"); return -1; } length = config_setting_length(tz); INFO("Found %d thermal zone(s) regular expression\n", length); for (i = 0; i < length; i++) { config_setting_t *node; const char *name; int polling; node = config_setting_get_elem(tz, i); if (!node) { ERROR("Missing node name '%d'\n", i); return -1; } if (!config_setting_lookup_string(node, "name", &name)) { ERROR("Thermal zone name not found\n"); return -1; } if (!config_setting_lookup_int(node, "polling", &polling)) { ERROR("Polling value not found"); return -1; } config->tz_regex = realloc(config->tz_regex, sizeof(*config->tz_regex) * (config->nr_tz_regex + 1)); if (regcomp(&config->tz_regex[config->nr_tz_regex].regex, name, REG_NOSUB | REG_EXTENDED)) { ERROR("Invalid regular expression '%s'\n", name); continue; } config->tz_regex[config->nr_tz_regex].polling = polling; config->nr_tz_regex++; INFO("Thermal zone regular expression '%s' with polling %d\n", name, polling); } return 0; } static void usage(const char *cmd) { printf("%s Version: %s\n", cmd, VERSION); printf("Usage: %s [options]\n", cmd); printf("\t-h, --help\t\tthis help\n"); printf("\t-o, --output <dir>\toutput directory for temperature capture\n"); printf("\t-c, --config <file>\tconfiguration file\n"); printf("\t-d, --duration <seconds>\tcapture duration\n"); printf("\t-l, --loglevel <level>\tlog level: "); printf("DEBUG, INFO, NOTICE, WARN, ERROR\n"); printf("\t-p, --postfix <string>\tpostfix to be happened at the end of the files\n"); printf("\t-s, --syslog\t\toutput to syslog\n"); printf("\t-w, --overwrite\t\toverwrite the temperature capture files if they exist\n"); printf("\n"); exit(0); } static int options_init(int argc, char *argv[], struct options *options) { int opt; time_t now = time(NULL); struct option long_options[] = { { "help", no_argument, NULL, 'h' }, { "config", required_argument, NULL, 'c' }, { "duration", required_argument, NULL, 'd' }, { "loglevel", required_argument, NULL, 'l' }, { "postfix", required_argument, NULL, 'p' }, { "output", required_argument, NULL, 'o' }, { "syslog", required_argument, NULL, 's' }, { "overwrite", no_argument, NULL, 'w' }, { 0, 0, 0, 0 } }; strftime(options->postfix, sizeof(options->postfix), "-%Y-%m-%d_%H:%M:%S", gmtime(&now)); while (1) { int optindex = 0; opt = getopt_long(argc, argv, "ho:c:d:l:p:sw", long_options, &optindex); if (opt == -1) break; switch (opt) { case 'c': options->config = optarg; break; case 'd': options->duration = atoi(optarg) * 1000; break; case 'l': options->loglvl = log_str2level(optarg); break; case 'h': usage(basename(argv[0])); break; case 'p': strcpy(options->postfix, optarg); break; case 'o': strcpy(options->output, optarg); break; case 's': options->logopt = TO_SYSLOG; break; case 'w': options->overwrite = 1; break; default: /* '?' */ ERROR("Usage: %s --help\n", argv[0]); return -1; } } return 0; } static int thermometer_add_tz(const char *path, const char *name, int polling, struct thermometer *thermometer) { int fd; char tz_path[PATH_MAX]; sprintf(tz_path, CLASS_THERMAL"/%s/temp", path); fd = open(tz_path, O_RDONLY); if (fd < 0) { ERROR("Failed to open '%s': %m\n", tz_path); return -1; } thermometer->tz = realloc(thermometer->tz, sizeof(*thermometer->tz) * (thermometer->nr_tz + 1)); if (!thermometer->tz) { ERROR("Failed to allocate thermometer->tz\n"); return -1; } thermometer->tz[thermometer->nr_tz].fd_temp = fd; thermometer->tz[thermometer->nr_tz].name = strdup(name); thermometer->tz[thermometer->nr_tz].polling = polling; thermometer->nr_tz++; INFO("Added thermal zone '%s->%s (polling:%d)'\n", path, name, polling); return 0; } static int thermometer_init(struct configuration *config, struct thermometer *thermometer) { DIR *dir; struct dirent *dirent; struct tz_regex *tz_regex; const char *tz_dirname = "thermal_zone"; if (mainloop_init()) { ERROR("Failed to start mainloop\n"); return -1; } dir = opendir(CLASS_THERMAL); if (!dir) { ERROR("failed to open '%s'\n", CLASS_THERMAL); return -1; } while ((dirent = readdir(dir))) { char tz_type[THERMAL_NAME_LENGTH]; char tz_path[PATH_MAX]; FILE *tz_file; if (strncmp(dirent->d_name, tz_dirname, strlen(tz_dirname))) continue; sprintf(tz_path, CLASS_THERMAL"/%s/type", dirent->d_name); tz_file = fopen(tz_path, "r"); if (!tz_file) { ERROR("Failed to open '%s': %m", tz_path); continue; } fscanf(tz_file, "%s", tz_type); fclose(tz_file); tz_regex = configuration_tz_match(tz_type, config); if (!tz_regex) continue; if (thermometer_add_tz(dirent->d_name, tz_type, tz_regex->polling, thermometer)) continue; } closedir(dir); return 0; } static int timer_temperature_callback(int fd, void *arg) { struct tz *tz = arg; char buf[16] = { 0 }; pread(tz->fd_temp, buf, sizeof(buf), 0); fprintf(tz->file_out, "%ld %s", getuptimeofday_ms(), buf); read(fd, buf, sizeof(buf)); return 0; } static int thermometer_start(struct thermometer *thermometer, struct options *options) { struct itimerspec timer_it = { 0 }; char *path; FILE *f; int i; INFO("Capturing %d thermal zone(s) temperature...\n", thermometer->nr_tz); if (access(options->output, F_OK) && mkdir(options->output, 0700)) { ERROR("Failed to create directory '%s'\n", options->output); return -1; } for (i = 0; i < thermometer->nr_tz; i++) { asprintf(&path, "%s/%s%s", options->output, thermometer->tz[i].name, options->postfix); if (!options->overwrite && !access(path, F_OK)) { ERROR("'%s' already exists\n", path); return -1; } f = fopen(path, "w"); if (!f) { ERROR("Failed to create '%s':%m\n", path); return -1; } fprintf(f, "timestamp(ms) %s(°mC)\n", thermometer->tz[i].name); thermometer->tz[i].file_out = f; DEBUG("Created '%s' file for thermal zone '%s'\n", path, thermometer->tz[i].name); /* * Create polling timer */ thermometer->tz[i].fd_timer = timerfd_create(CLOCK_MONOTONIC, 0); if (thermometer->tz[i].fd_timer < 0) { ERROR("Failed to create timer for '%s': %m\n", thermometer->tz[i].name); return -1; } DEBUG("Watching '%s' every %d ms\n", thermometer->tz[i].name, thermometer->tz[i].polling); timer_it.it_interval = timer_it.it_value = msec_to_timespec(thermometer->tz[i].polling); if (timerfd_settime(thermometer->tz[i].fd_timer, 0, &timer_it, NULL) < 0) return -1; if (mainloop_add(thermometer->tz[i].fd_timer, timer_temperature_callback, &thermometer->tz[i])) return -1; } return 0; } static int thermometer_execute(int argc, char *argv[], char *const envp[], pid_t *pid) { if (!argc) return 0; *pid = fork(); if (*pid < 0) { ERROR("Failed to fork process: %m"); return -1; } if (!(*pid)) { execvpe(argv[0], argv, envp); exit(1); } return 0; } static int kill_process(__maybe_unused int fd, void *arg) { pid_t pid = *(pid_t *)arg; if (kill(pid, SIGTERM)) ERROR("Failed to send SIGTERM signal to '%d': %p\n", pid); else if (waitpid(pid, NULL, 0)) ERROR("Failed to wait pid '%d': %p\n", pid); mainloop_exit(); return 0; } static int exit_mainloop(__maybe_unused int fd, __maybe_unused void *arg) { mainloop_exit(); return 0; } static int thermometer_wait(struct options *options, pid_t pid) { int fd; sigset_t mask; /* * If there is a duration specified, we will exit the mainloop * and gracefully close all the files which will flush the * file system cache */ if (options->duration) { struct itimerspec timer_it = { 0 }; timer_it.it_value = msec_to_timespec(options->duration); fd = timerfd_create(CLOCK_MONOTONIC, 0); if (fd < 0) { ERROR("Failed to create duration timer: %m\n"); return -1; } if (timerfd_settime(fd, 0, &timer_it, NULL)) { ERROR("Failed to set timer time: %m\n"); return -1; } if (mainloop_add(fd, pid < 0 ? exit_mainloop : kill_process, &pid)) { ERROR("Failed to set timer exit mainloop callback\n"); return -1; } } /* * We want to catch any keyboard interrupt, as well as child * signals if any in order to exit properly */ sigemptyset(&mask); sigaddset(&mask, SIGINT); sigaddset(&mask, SIGQUIT); sigaddset(&mask, SIGCHLD); if (sigprocmask(SIG_BLOCK, &mask, NULL)) { ERROR("Failed to set sigprocmask: %m\n"); return -1; } fd = signalfd(-1, &mask, 0); if (fd < 0) { ERROR("Failed to set the signalfd: %m\n"); return -1; } if (mainloop_add(fd, exit_mainloop, NULL)) { ERROR("Failed to set timer exit mainloop callback\n"); return -1; } return mainloop(-1); } static int thermometer_stop(struct thermometer *thermometer) { int i; INFO("Closing/flushing output files\n"); for (i = 0; i < thermometer->nr_tz; i++) fclose(thermometer->tz[i].file_out); return 0; } int main(int argc, char *argv[], char *const envp[]) { struct options options = { .loglvl = LOG_DEBUG, .logopt = TO_STDOUT, .output = ".", }; struct configuration config = { 0 }; struct thermometer thermometer = { 0 }; pid_t pid = -1; if (options_init(argc, argv, &options)) return THERMOMETER_OPTION_ERROR; if (log_init(options.loglvl, argv[0], options.logopt)) return THERMOMETER_LOG_ERROR; if (configuration_init(options.config, &config)) return THERMOMETER_CONFIG_ERROR; if (uptimeofday_init()) return THERMOMETER_TIME_ERROR; if (thermometer_init(&config, &thermometer)) return THERMOMETER_INIT_ERROR; if (thermometer_start(&thermometer, &options)) return THERMOMETER_RUNTIME_ERROR; if (thermometer_execute(argc - optind, &argv[optind], envp, &pid)) return THERMOMETER_RUNTIME_ERROR; if (thermometer_wait(&options, pid)) return THERMOMETER_RUNTIME_ERROR; if (thermometer_stop(&thermometer)) return THERMOMETER_RUNTIME_ERROR; return THERMOMETER_SUCCESS; }
linux-master
tools/thermal/thermometer/thermometer.c
// SPDX-License-Identifier: LGPL-2.1+ // Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <[email protected]> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <syslog.h> #include "log.h" static const char *__ident = "unknown"; static int __options; static const char * const loglvl[] = { [LOG_DEBUG] = "DEBUG", [LOG_INFO] = "INFO", [LOG_NOTICE] = "NOTICE", [LOG_WARNING] = "WARN", [LOG_ERR] = "ERROR", [LOG_CRIT] = "CRITICAL", [LOG_ALERT] = "ALERT", [LOG_EMERG] = "EMERG", }; int log_str2level(const char *lvl) { int i; for (i = 0; i < sizeof(loglvl) / sizeof(loglvl[LOG_DEBUG]); i++) if (!strcmp(lvl, loglvl[i])) return i; return LOG_DEBUG; } extern void logit(int level, const char *format, ...) { va_list args; va_start(args, format); if (__options & TO_SYSLOG) vsyslog(level, format, args); if (__options & TO_STDERR) vfprintf(stderr, format, args); if (__options & TO_STDOUT) vfprintf(stdout, format, args); va_end(args); } int log_init(int level, const char *ident, int options) { if (!options) return -1; if (level > LOG_DEBUG) return -1; if (!ident) return -1; __ident = ident; __options = options; if (options & TO_SYSLOG) { openlog(__ident, options | LOG_NDELAY, LOG_USER); setlogmask(LOG_UPTO(level)); } return 0; } void log_exit(void) { closelog(); }
linux-master
tools/thermal/lib/log.c
// SPDX-License-Identifier: LGPL-2.1+ // Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <[email protected]> #include <stdio.h> #include <sys/time.h> #include <linux/sysinfo.h> #include "thermal-tools.h" static unsigned long __offset; static struct timeval __tv; int uptimeofday_init(void) { struct sysinfo info; if (sysinfo(&info)) return -1; gettimeofday(&__tv, NULL); __offset = __tv.tv_sec - info.uptime; return 0; } unsigned long getuptimeofday_ms(void) { gettimeofday(&__tv, NULL); return ((__tv.tv_sec - __offset) * 1000) + (__tv.tv_usec / 1000); } struct timespec msec_to_timespec(int msec) { struct timespec tv = { .tv_sec = (msec / 1000), .tv_nsec = (msec % 1000) * 1000000, }; return tv; }
linux-master
tools/thermal/lib/uptimeofday.c
// SPDX-License-Identifier: LGPL-2.1+ // Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <[email protected]> #include <stdlib.h> #include <errno.h> #include <unistd.h> #include <signal.h> #include <sys/epoll.h> #include "mainloop.h" #include "log.h" static int epfd = -1; static unsigned short nrhandler; static sig_atomic_t exit_mainloop; struct mainloop_data { mainloop_callback_t cb; void *data; int fd; }; static struct mainloop_data **mds; #define MAX_EVENTS 10 int mainloop(unsigned int timeout) { int i, nfds; struct epoll_event events[MAX_EVENTS]; struct mainloop_data *md; if (epfd < 0) return -1; for (;;) { nfds = epoll_wait(epfd, events, MAX_EVENTS, timeout); if (exit_mainloop || !nfds) return 0; if (nfds < 0) { if (errno == EINTR) continue; return -1; } for (i = 0; i < nfds; i++) { md = events[i].data.ptr; if (md->cb(md->fd, md->data) > 0) return 0; } } } int mainloop_add(int fd, mainloop_callback_t cb, void *data) { struct epoll_event ev = { .events = EPOLLIN, }; struct mainloop_data *md; if (fd >= nrhandler) { mds = realloc(mds, sizeof(*mds) * (fd + 1)); if (!mds) return -1; nrhandler = fd + 1; } md = malloc(sizeof(*md)); if (!md) return -1; md->data = data; md->cb = cb; md->fd = fd; mds[fd] = md; ev.data.ptr = md; if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev) < 0) { free(md); return -1; } return 0; } int mainloop_del(int fd) { if (fd >= nrhandler) return -1; if (epoll_ctl(epfd, EPOLL_CTL_DEL, fd, NULL) < 0) return -1; free(mds[fd]); return 0; } int mainloop_init(void) { epfd = epoll_create(2); if (epfd < 0) return -1; return 0; } void mainloop_exit(void) { exit_mainloop = 1; } void mainloop_fini(void) { close(epfd); }
linux-master
tools/thermal/lib/mainloop.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * tui.c ncurses text user interface for TMON program * * Copyright (C) 2013 Intel Corporation. All rights reserved. * * Author: Jacob Pan <[email protected]> */ #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #include <ncurses.h> #include <time.h> #include <syslog.h> #include <panel.h> #include <pthread.h> #include <signal.h> #include "tmon.h" #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) #define max(x, y) ({ \ typeof(x) _max1 = (x); \ typeof(y) _max2 = (y); \ (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) static PANEL *data_panel; static PANEL *dialogue_panel; static PANEL *top; static WINDOW *title_bar_window; static WINDOW *tz_sensor_window; static WINDOW *cooling_device_window; static WINDOW *control_window; static WINDOW *status_bar_window; static WINDOW *thermal_data_window; static WINDOW *dialogue_window; char status_bar_slots[10][40]; static void draw_hbar(WINDOW *win, int y, int start, int len, unsigned long pattern, bool end); static int maxx, maxy; static int maxwidth = 200; #define TITLE_BAR_HIGHT 1 #define SENSOR_WIN_HIGHT 4 /* one row for tz name, one for trip points */ /* daemon mode flag (set by startup parameter -d) */ static int tui_disabled; static void close_panel(PANEL *p) { if (p) { del_panel(p); p = NULL; } } static void close_window(WINDOW *win) { if (win) { delwin(win); win = NULL; } } void close_windows(void) { if (tui_disabled) return; /* must delete panels before their attached windows */ if (dialogue_window) close_panel(dialogue_panel); if (cooling_device_window) close_panel(data_panel); close_window(title_bar_window); close_window(tz_sensor_window); close_window(status_bar_window); close_window(cooling_device_window); close_window(control_window); close_window(thermal_data_window); close_window(dialogue_window); } void write_status_bar(int x, char *line) { mvwprintw(status_bar_window, 0, x, "%s", line); wrefresh(status_bar_window); } /* wrap at 5 */ #define DIAG_DEV_ROWS 5 /* * list cooling devices + "set temp" entry; wraps after 5 rows, if they fit */ static int diag_dev_rows(void) { int entries = ptdata.nr_cooling_dev + 1; int rows = max(DIAG_DEV_ROWS, (entries + 1) / 2); return min(rows, entries); } void setup_windows(void) { int y_begin = 1; if (tui_disabled) return; getmaxyx(stdscr, maxy, maxx); resizeterm(maxy, maxx); title_bar_window = subwin(stdscr, TITLE_BAR_HIGHT, maxx, 0, 0); y_begin += TITLE_BAR_HIGHT; tz_sensor_window = subwin(stdscr, SENSOR_WIN_HIGHT, maxx, y_begin, 0); y_begin += SENSOR_WIN_HIGHT; cooling_device_window = subwin(stdscr, ptdata.nr_cooling_dev + 3, maxx, y_begin, 0); y_begin += ptdata.nr_cooling_dev + 3; /* 2 lines for border */ /* two lines to show borders, one line per tz show trip point position * and value. * dialogue window is a pop-up, when needed it lays on top of cdev win */ dialogue_window = subwin(stdscr, diag_dev_rows() + 5, maxx-50, DIAG_Y, DIAG_X); thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor * NR_LINES_TZDATA + 3, maxx, y_begin, 0); y_begin += ptdata.nr_tz_sensor * NR_LINES_TZDATA + 3; control_window = subwin(stdscr, 4, maxx, y_begin, 0); scrollok(cooling_device_window, TRUE); maxwidth = maxx - 18; status_bar_window = subwin(stdscr, 1, maxx, maxy-1, 0); strcpy(status_bar_slots[0], " Ctrl-c - Quit "); strcpy(status_bar_slots[1], " TAB - Tuning "); wmove(status_bar_window, 1, 30); /* prepare panels for dialogue, if panel already created then we must * be doing resizing, so just replace windows with new ones, old ones * should have been deleted by close_window */ data_panel = new_panel(cooling_device_window); if (!data_panel) syslog(LOG_DEBUG, "No data panel\n"); else { if (dialogue_window) { dialogue_panel = new_panel(dialogue_window); if (!dialogue_panel) syslog(LOG_DEBUG, "No dialogue panel\n"); else { /* Set up the user pointer to the next panel*/ set_panel_userptr(data_panel, dialogue_panel); set_panel_userptr(dialogue_panel, data_panel); top = data_panel; } } else syslog(LOG_INFO, "no dialogue win, term too small\n"); } doupdate(); werase(stdscr); refresh(); } void resize_handler(int sig) { /* start over when term gets resized, but first we clean up */ close_windows(); endwin(); refresh(); clear(); getmaxyx(stdscr, maxy, maxx); /* get the new screen size */ setup_windows(); /* rate limit */ sleep(1); syslog(LOG_DEBUG, "SIG %d, term resized to %d x %d\n", sig, maxy, maxx); signal(SIGWINCH, resize_handler); } const char cdev_title[] = " COOLING DEVICES "; void show_cooling_device(void) { int i, j, x, y = 0; if (tui_disabled || !cooling_device_window) return; werase(cooling_device_window); wattron(cooling_device_window, A_BOLD); mvwprintw(cooling_device_window, 1, 1, "ID Cooling Dev Cur Max Thermal Zone Binding"); wattroff(cooling_device_window, A_BOLD); for (j = 0; j < ptdata.nr_cooling_dev; j++) { /* draw cooling device list on the left in the order of * cooling device instances. skip unused idr. */ mvwprintw(cooling_device_window, j + 2, 1, "%02d %12.12s%6d %6d", ptdata.cdi[j].instance, ptdata.cdi[j].type, ptdata.cdi[j].cur_state, ptdata.cdi[j].max_state); } /* show cdev binding, y is the global cooling device instance */ for (i = 0; i < ptdata.nr_tz_sensor; i++) { int tz_inst = ptdata.tzi[i].instance; for (j = 0; j < ptdata.nr_cooling_dev; j++) { int cdev_inst; y = j; x = tz_inst * TZONE_RECORD_SIZE + TZ_LEFT_ALIGN; draw_hbar(cooling_device_window, y+2, x, TZONE_RECORD_SIZE-1, ACS_VLINE, false); /* draw a column of spaces to separate thermal zones */ mvwprintw(cooling_device_window, y+2, x-1, " "); if (ptdata.tzi[i].cdev_binding) { cdev_inst = ptdata.cdi[j].instance; unsigned long trip_binding = ptdata.tzi[i].trip_binding[cdev_inst]; int k = 0; /* per zone trip point id that * binded to this cdev, one to * many possible based on the * binding bitmask. */ syslog(LOG_DEBUG, "bind tz%d cdev%d tp%lx %d cdev%lx\n", i, j, trip_binding, y, ptdata.tzi[i].cdev_binding); /* draw each trip binding for the cdev */ while (trip_binding >>= 1) { k++; if (!(trip_binding & 1)) continue; /* draw '*' to show binding */ mvwprintw(cooling_device_window, y + 2, x + ptdata.tzi[i].nr_trip_pts - k - 1, "*"); } } } } /* draw border after data so that border will not be messed up * even there is not enough space for all the data to be shown */ wborder(cooling_device_window, 0, 0, 0, 0, 0, 0, 0, 0); wattron(cooling_device_window, A_BOLD); mvwprintw(cooling_device_window, 0, maxx/2 - sizeof(cdev_title), cdev_title); wattroff(cooling_device_window, A_BOLD); wrefresh(cooling_device_window); } const char DIAG_TITLE[] = "[ TUNABLES ]"; void show_dialogue(void) { int j, x = 0, y = 0; int rows, cols; WINDOW *w = dialogue_window; if (tui_disabled || !w) return; getmaxyx(w, rows, cols); /* Silence compiler 'unused' warnings */ (void)cols; werase(w); box(w, 0, 0); mvwprintw(w, 0, maxx/4, DIAG_TITLE); /* list all the available tunables */ for (j = 0; j <= ptdata.nr_cooling_dev; j++) { y = j % diag_dev_rows(); if (y == 0 && j != 0) x += 20; if (j == ptdata.nr_cooling_dev) /* save last choice for target temp */ mvwprintw(w, y+1, x+1, "%C-%.12s", 'A'+j, "Set Temp"); else mvwprintw(w, y+1, x+1, "%C-%.10s-%2d", 'A'+j, ptdata.cdi[j].type, ptdata.cdi[j].instance); } wattron(w, A_BOLD); mvwprintw(w, diag_dev_rows()+1, 1, "Enter Choice [A-Z]?"); wattroff(w, A_BOLD); /* print legend at the bottom line */ mvwprintw(w, rows - 2, 1, "Legend: A=Active, P=Passive, C=Critical"); wrefresh(dialogue_window); } void write_dialogue_win(char *buf, int y, int x) { WINDOW *w = dialogue_window; mvwprintw(w, y, x, "%s", buf); } const char control_title[] = " CONTROLS "; void show_control_w(void) { unsigned long state; get_ctrl_state(&state); if (tui_disabled || !control_window) return; werase(control_window); mvwprintw(control_window, 1, 1, "PID gain: kp=%2.2f ki=%2.2f kd=%2.2f Output %2.2f", p_param.kp, p_param.ki, p_param.kd, p_param.y_k); mvwprintw(control_window, 2, 1, "Target Temp: %2.1fC, Zone: %d, Control Device: %.12s", p_param.t_target, target_thermal_zone, ctrl_cdev); /* draw border last such that everything is within boundary */ wborder(control_window, 0, 0, 0, 0, 0, 0, 0, 0); wattron(control_window, A_BOLD); mvwprintw(control_window, 0, maxx/2 - sizeof(control_title), control_title); wattroff(control_window, A_BOLD); wrefresh(control_window); } void initialize_curses(void) { if (tui_disabled) return; initscr(); start_color(); keypad(stdscr, TRUE); /* enable keyboard mapping */ nonl(); /* tell curses not to do NL->CR/NL on output */ cbreak(); /* take input chars one at a time */ noecho(); /* dont echo input */ curs_set(0); /* turn off cursor */ use_default_colors(); init_pair(PT_COLOR_DEFAULT, COLOR_WHITE, COLOR_BLACK); init_pair(PT_COLOR_HEADER_BAR, COLOR_BLACK, COLOR_WHITE); init_pair(PT_COLOR_ERROR, COLOR_BLACK, COLOR_RED); init_pair(PT_COLOR_RED, COLOR_WHITE, COLOR_RED); init_pair(PT_COLOR_YELLOW, COLOR_WHITE, COLOR_YELLOW); init_pair(PT_COLOR_GREEN, COLOR_WHITE, COLOR_GREEN); init_pair(PT_COLOR_BLUE, COLOR_WHITE, COLOR_BLUE); init_pair(PT_COLOR_BRIGHT, COLOR_WHITE, COLOR_BLACK); } void show_title_bar(void) { int i; int x = 0; if (tui_disabled || !title_bar_window) return; wattrset(title_bar_window, COLOR_PAIR(PT_COLOR_HEADER_BAR)); wbkgd(title_bar_window, COLOR_PAIR(PT_COLOR_HEADER_BAR)); werase(title_bar_window); mvwprintw(title_bar_window, 0, 0, " TMON v%s", VERSION); wrefresh(title_bar_window); werase(status_bar_window); for (i = 0; i < 10; i++) { if (strlen(status_bar_slots[i]) == 0) continue; wattron(status_bar_window, A_REVERSE); mvwprintw(status_bar_window, 0, x, "%s", status_bar_slots[i]); wattroff(status_bar_window, A_REVERSE); x += strlen(status_bar_slots[i]) + 1; } wrefresh(status_bar_window); } static void handle_input_val(int ch) { char buf[32]; int val; char path[256]; WINDOW *w = dialogue_window; echo(); keypad(w, TRUE); wgetnstr(w, buf, 31); val = atoi(buf); if (ch == ptdata.nr_cooling_dev) { snprintf(buf, 31, "Invalid Temp %d! %d-%d", val, MIN_CTRL_TEMP, MAX_CTRL_TEMP); if (val < MIN_CTRL_TEMP || val > MAX_CTRL_TEMP) write_status_bar(40, buf); else { p_param.t_target = val; snprintf(buf, 31, "Set New Target Temp %d", val); write_status_bar(40, buf); } } else { snprintf(path, 256, "%s/%s%d", THERMAL_SYSFS, CDEV, ptdata.cdi[ch].instance); sysfs_set_ulong(path, "cur_state", val); } noecho(); dialogue_on = 0; show_data_w(); show_control_w(); top = (PANEL *)panel_userptr(top); top_panel(top); } static void handle_input_choice(int ch) { char buf[48]; int base = 0; int cdev_id = 0; if ((ch >= 'A' && ch <= 'A' + ptdata.nr_cooling_dev) || (ch >= 'a' && ch <= 'a' + ptdata.nr_cooling_dev)) { base = (ch < 'a') ? 'A' : 'a'; cdev_id = ch - base; if (ptdata.nr_cooling_dev == cdev_id) snprintf(buf, sizeof(buf), "New Target Temp:"); else snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ", ptdata.cdi[cdev_id].type, ptdata.cdi[cdev_id].instance); write_dialogue_win(buf, diag_dev_rows() + 2, 2); handle_input_val(cdev_id); } else { snprintf(buf, sizeof(buf), "Invalid selection %d", ch); write_dialogue_win(buf, 8, 2); } } void *handle_tui_events(void *arg) { int ch; keypad(cooling_device_window, TRUE); while ((ch = wgetch(cooling_device_window)) != EOF) { if (tmon_exit) break; /* when term size is too small, no dialogue panels are set. * we need to filter out such cases. */ if (!data_panel || !dialogue_panel || !cooling_device_window || !dialogue_window) { continue; } pthread_mutex_lock(&input_lock); if (dialogue_on) { handle_input_choice(ch); /* top panel filter */ if (ch == 'q' || ch == 'Q') ch = 0; } switch (ch) { case KEY_LEFT: box(cooling_device_window, 10, 0); break; case 9: /* TAB */ top = (PANEL *)panel_userptr(top); top_panel(top); if (top == dialogue_panel) { dialogue_on = 1; show_dialogue(); } else { dialogue_on = 0; /* force refresh */ show_data_w(); show_control_w(); } break; case 'q': case 'Q': tmon_exit = 1; break; } update_panels(); doupdate(); pthread_mutex_unlock(&input_lock); } if (arg) *(int *)arg = 0; /* make gcc happy */ return NULL; } /* draw a horizontal bar in given pattern */ static void draw_hbar(WINDOW *win, int y, int start, int len, unsigned long ptn, bool end) { mvwaddch(win, y, start, ptn); whline(win, ptn, len); if (end) mvwaddch(win, y, MAX_DISP_TEMP+TDATA_LEFT, ']'); } static char trip_type_to_char(int type) { switch (type) { case THERMAL_TRIP_CRITICAL: return 'C'; case THERMAL_TRIP_HOT: return 'H'; case THERMAL_TRIP_PASSIVE: return 'P'; case THERMAL_TRIP_ACTIVE: return 'A'; default: return '?'; } } /* fill a string with trip point type and value in one line * e.g. P(56) C(106) * maintain the distance one degree per char */ static void draw_tp_line(int tz, int y) { int j; int x; for (j = 0; j < ptdata.tzi[tz].nr_trip_pts; j++) { x = ptdata.tzi[tz].tp[j].temp / 1000; mvwprintw(thermal_data_window, y + 0, x + TDATA_LEFT, "%c%d", trip_type_to_char(ptdata.tzi[tz].tp[j].type), x); syslog(LOG_INFO, "%s:tz %d tp %d temp = %lu\n", __func__, tz, j, ptdata.tzi[tz].tp[j].temp); } } const char data_win_title[] = " THERMAL DATA "; void show_data_w(void) { int i; if (tui_disabled || !thermal_data_window) return; werase(thermal_data_window); wattron(thermal_data_window, A_BOLD); mvwprintw(thermal_data_window, 0, maxx/2 - sizeof(data_win_title), data_win_title); wattroff(thermal_data_window, A_BOLD); /* draw a line as ruler */ for (i = 10; i < MAX_DISP_TEMP; i += 10) mvwprintw(thermal_data_window, 1, i+TDATA_LEFT, "%2d", i); for (i = 0; i < ptdata.nr_tz_sensor; i++) { int temp = trec[cur_thermal_record].temp[i] / 1000; int y = 0; y = i * NR_LINES_TZDATA + 2; /* y at tz temp data line */ mvwprintw(thermal_data_window, y, 1, "%6.6s%2d:[%3d][", ptdata.tzi[i].type, ptdata.tzi[i].instance, temp); draw_hbar(thermal_data_window, y, TDATA_LEFT, temp, ACS_RARROW, true); draw_tp_line(i, y); } wborder(thermal_data_window, 0, 0, 0, 0, 0, 0, 0, 0); wrefresh(thermal_data_window); } const char tz_title[] = "THERMAL ZONES(SENSORS)"; void show_sensors_w(void) { int i, j; char buffer[512]; if (tui_disabled || !tz_sensor_window) return; werase(tz_sensor_window); memset(buffer, 0, sizeof(buffer)); wattron(tz_sensor_window, A_BOLD); mvwprintw(tz_sensor_window, 1, 1, "Thermal Zones:"); wattroff(tz_sensor_window, A_BOLD); mvwprintw(tz_sensor_window, 1, TZ_LEFT_ALIGN, "%s", buffer); /* fill trip points for each tzone */ wattron(tz_sensor_window, A_BOLD); mvwprintw(tz_sensor_window, 2, 1, "Trip Points:"); wattroff(tz_sensor_window, A_BOLD); /* draw trip point from low to high for each tz */ for (i = 0; i < ptdata.nr_tz_sensor; i++) { int inst = ptdata.tzi[i].instance; mvwprintw(tz_sensor_window, 1, TZ_LEFT_ALIGN+TZONE_RECORD_SIZE * inst, "%.9s%02d", ptdata.tzi[i].type, ptdata.tzi[i].instance); for (j = ptdata.tzi[i].nr_trip_pts - 1; j >= 0; j--) { /* loop through all trip points */ char type; int tp_pos; /* reverse the order here since trips are sorted * in ascending order in terms of temperature. */ tp_pos = ptdata.tzi[i].nr_trip_pts - j - 1; type = trip_type_to_char(ptdata.tzi[i].tp[j].type); mvwaddch(tz_sensor_window, 2, inst * TZONE_RECORD_SIZE + TZ_LEFT_ALIGN + tp_pos, type); syslog(LOG_DEBUG, "draw tz %d tp %d ch:%c\n", inst, j, type); } } wborder(tz_sensor_window, 0, 0, 0, 0, 0, 0, 0, 0); wattron(tz_sensor_window, A_BOLD); mvwprintw(tz_sensor_window, 0, maxx/2 - sizeof(tz_title), tz_title); wattroff(tz_sensor_window, A_BOLD); wrefresh(tz_sensor_window); } void disable_tui(void) { tui_disabled = 1; }
linux-master
tools/thermal/tmon/tui.c